aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 15:42:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 15:42:54 -0400
commitf2fde3a65e88330017b816faf2ef75f141d21375 (patch)
tree57152ab5756e7ed1c58742e7e16f13a45ff11f21 /drivers
parent28f3d717618156c0dcd2f497d791b578a7931d87 (diff)
parent8c914028f5ddaa417b7d0f4b7fdc24caceaa8043 (diff)
Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
Pull main drm updates from Dave Airlie: "This is the main merge window request for the drm. It's big, but jam packed will lots of features and of course 0 regressions. (okay maybe there'll be one). Highlights: - new KMS drivers for server GPU chipsets: ast, mgag200 and cirrus (qemu only). These drivers use the generic modesetting drivers. - initial prime/dma-buf support for i915, nouveau, radeon, udl and exynos - switcheroo audio support: so GPUs with HDMI can turn off the sound driver without crashing stuff. - There are some patches drifting outside drivers/gpu into x86 and EFI for better handling of multiple video adapters in Apple Macs, they've got correct acks except one trivial fixup. - Core: edid parser has better DMT and reduced blanking support, crtc properties, plane properties, - Drivers: exynos: add 2D core accel support, prime support, hdmi features intel: more Haswell support, initial Valleyview support, more hdmi infoframe fixes, update MAINTAINERS for Daniel, lots of cleanups and fixes radeon: more HDMI audio support, improved GPU lockup recovery support, remove nested mutexes, less memory copying on PCIE, fix bus master enable race (kexec), improved fence handling gma500: cleanups, 1080p support, acpi fixes nouveau: better nva3 memory reclocking, kepler accel (needs external firmware rip), async buffer moves on nv84+ hw. I've some more dma-buf patches that rely on the dma-buf merge for vmap stuff, and I've a few fixes building up, but I'd decided I'd better get rid of the main pull sooner rather than later, so the audio guys are also unblocked." Fix up trivial conflict due to some duplicated changes in drivers/gpu/drm/i915/intel_ringbuffer.c * 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (605 commits) drm/nouveau/nvd9: Fix GPIO initialisation sequence. drm/nouveau: Unregister switcheroo client on exit drm/nouveau: Check dsm on switcheroo unregister drm/nouveau: fix a minor annoyance in an output string drm/nouveau: turn a BUG into a WARN drm/nv50: decode PGRAPH DATA_ERROR = 0x24 drm/nouveau/disp: fix dithering not being enabled on some eDP macbooks drm/nvd9/copy: initialise copy engine, seems to work like nvc0 drm/nvc0/ttm: use copy engines for async buffer moves drm/nva3/ttm: use copy engine for async buffer moves drm/nv98/ttm: add in a (disabled) crypto engine buffer copy method drm/nv84/ttm: use crypto engine for async buffer copies drm/nouveau/ttm: untangle code to support accelerated buffer moves drm/nouveau/fbcon: use fence for sync, rather than notifier drm/nv98/crypt: non-stub implementation of the engine hooks drm/nouveau/fifo: turn all fifo modules into engine modules drm/nv50/graph: remove ability to do interrupt-driven context switching drm/nv50: remove manual context unload on context destruction drm/nv50: remove execution engine context saves on suspend drm/nv50/fifo: use hardware channel kickoff functionality ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c45
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig16
-rw-r--r--drivers/gpu/drm/ast/Makefile9
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h144
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c244
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h356
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c341
-rw-r--r--drivers/gpu/drm/ast/ast_main.c527
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1160
-rw-r--r--drivers/gpu/drm/ast/ast_post.c1780
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h265
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c453
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig12
-rw-r--r--drivers/gpu/drm/cirrus/Makefile5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c108
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h246
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c307
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c335
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c629
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c453
-rw-r--r--drivers/gpu/drm/drm_cache.c23
-rw-r--r--drivers/gpu/drm/drm_context.c9
-rw-r--r--drivers/gpu/drm/drm_crtc.c583
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c222
-rw-r--r--drivers/gpu/drm/drm_edid_load.c12
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h292
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c35
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c23
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c48
-rw-r--r--drivers/gpu/drm/drm_stub.c7
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/drm_vm.c18
-rw-r--r--drivers/gpu/drm/exynos/Kconfig12
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c272
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c937
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c429
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c401
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h6
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c231
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c697
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c76
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c57
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c32
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c274
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h161
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c452
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c330
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c295
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h25
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c134
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c138
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c66
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/opregion.c344
-rw-r--r--drivers/gpu/drm/gma500/opregion.h (renamed from drivers/gpu/drm/gma500/intel_opregion.c)64
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c66
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h208
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c348
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h35
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c14
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c383
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1160
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c277
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h245
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1916
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c171
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1732
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4338
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h105
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c310
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c384
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c71
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3796
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c725
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c107
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c102
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig15
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c116
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h276
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c294
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c388
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1533
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h661
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c452
-rw-r--r--drivers/gpu/drm/nouveau/Makefile11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c385
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h176
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c578
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c163
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c270
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c278
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c351
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c596
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c229
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c166
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c31
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c290
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c16
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c423
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c155
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c216
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h220
-rw-r--r--drivers/gpu/drm/radeon/ni.c41
-rw-r--r--drivers/gpu/drm/radeon/r100.c121
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c34
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c191
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c101
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c464
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h45
-rw-r--r--drivers/gpu/drm/radeon/r600d.h233
-rw-r--r--drivers/gpu/drm/radeon/radeon.h235
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c143
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c113
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c621
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c396
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c325
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c67
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c48
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h191
-rw-r--r--drivers/gpu/drm/radeon/si.c30
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c8
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c75
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c284
-rw-r--r--drivers/gpu/vga/vgaarb.c9
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c7
-rw-r--r--drivers/staging/omapdrm/omap_drv.c4
-rw-r--r--drivers/video/efifb.c79
302 files changed, 41423 insertions, 14710 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 17e05d1076b3..a0df182f6f7d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -958,7 +958,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
958 if (set_memory_uc((unsigned long)table, 1 << page_order)) 958 if (set_memory_uc((unsigned long)table, 1 << page_order))
959 printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); 959 printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
960 960
961 bridge->gatt_table = (void *)table; 961 bridge->gatt_table = (u32 __iomem *)table;
962#else 962#else
963 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 963 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
964 (PAGE_SIZE * (1 << page_order))); 964 (PAGE_SIZE * (1 << page_order)));
@@ -1010,7 +1010,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
1010 case LVL2_APER_SIZE: 1010 case LVL2_APER_SIZE:
1011 /* The generic routines can't deal with 2 level gatt's */ 1011 /* The generic routines can't deal with 2 level gatt's */
1012 return -EINVAL; 1012 return -EINVAL;
1013 break;
1014 default: 1013 default:
1015 page_order = 0; 1014 page_order = 0;
1016 break; 1015 break;
@@ -1077,7 +1076,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1077 case LVL2_APER_SIZE: 1076 case LVL2_APER_SIZE:
1078 /* The generic routines can't deal with 2 level gatt's */ 1077 /* The generic routines can't deal with 2 level gatt's */
1079 return -EINVAL; 1078 return -EINVAL;
1080 break;
1081 default: 1079 default:
1082 num_entries = 0; 1080 num_entries = 0;
1083 break; 1081 break;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 962e75dc4781..764f70c5e690 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -907,6 +907,11 @@ static struct pci_device_id agp_intel_pci_table[] = {
907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), 907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), 908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), 909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
910 ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
911 ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
912 ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
913 ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
914 ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
910 { } 915 { }
911}; 916};
912 917
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 7ea18a5fe71c..c0091753a0d1 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -96,6 +96,7 @@
96#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) 96#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
97 97
98#define GFX_FLSH_CNTL 0x2170 /* 915+ */ 98#define GFX_FLSH_CNTL 0x2170 /* 915+ */
99#define GFX_FLSH_CNTL_VLV 0x101008
99 100
100#define I810_DRAM_CTL 0x3000 101#define I810_DRAM_CTL 0x3000
101#define I810_DRAM_ROW_0 0x00000001 102#define I810_DRAM_ROW_0 0x00000001
@@ -235,6 +236,19 @@
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ 236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A 237#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
237#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A 238#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
239#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
240#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
241#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
242#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
243#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
244#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
245#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
246#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
247#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
248#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
249#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
250#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
251#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
238 252
239int intel_gmch_probe(struct pci_dev *pdev, 253int intel_gmch_probe(struct pci_dev *pdev,
240 struct agp_bridge_data *bridge); 254 struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7f025fb620de..1237e7575c3f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1179 writel(addr | pte_flags, intel_private.gtt + entry); 1179 writel(addr | pte_flags, intel_private.gtt + entry);
1180} 1180}
1181 1181
1182static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1183 unsigned int flags)
1184{
1185 u32 pte_flags;
1186
1187 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1188
1189 /* gen6 has bit11-4 for physical addr bit39-32 */
1190 addr |= (addr >> 28) & 0xff0;
1191 writel(addr | pte_flags, intel_private.gtt + entry);
1192
1193 writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
1194}
1195
1182static void gen6_cleanup(void) 1196static void gen6_cleanup(void)
1183{ 1197{
1184} 1198}
@@ -1205,12 +1219,16 @@ static inline int needs_idle_maps(void)
1205static int i9xx_setup(void) 1219static int i9xx_setup(void)
1206{ 1220{
1207 u32 reg_addr; 1221 u32 reg_addr;
1222 int size = KB(512);
1208 1223
1209 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); 1224 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1210 1225
1211 reg_addr &= 0xfff80000; 1226 reg_addr &= 0xfff80000;
1212 1227
1213 intel_private.registers = ioremap(reg_addr, 128 * 4096); 1228 if (INTEL_GTT_GEN >= 7)
1229 size = MB(2);
1230
1231 intel_private.registers = ioremap(reg_addr, size);
1214 if (!intel_private.registers) 1232 if (!intel_private.registers)
1215 return -ENOMEM; 1233 return -ENOMEM;
1216 1234
@@ -1354,6 +1372,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1354 .check_flags = gen6_check_flags, 1372 .check_flags = gen6_check_flags,
1355 .chipset_flush = i9xx_chipset_flush, 1373 .chipset_flush = i9xx_chipset_flush,
1356}; 1374};
1375static const struct intel_gtt_driver valleyview_gtt_driver = {
1376 .gen = 7,
1377 .setup = i9xx_setup,
1378 .cleanup = gen6_cleanup,
1379 .write_entry = valleyview_write_entry,
1380 .dma_mask_size = 40,
1381 .check_flags = gen6_check_flags,
1382 .chipset_flush = i9xx_chipset_flush,
1383};
1357 1384
1358/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1385/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1359 * driver and gmch_driver must be non-null, and find_gmch will determine 1386 * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1460,6 +1487,22 @@ static const struct intel_gtt_driver_description {
1460 "Ivybridge", &sandybridge_gtt_driver }, 1487 "Ivybridge", &sandybridge_gtt_driver },
1461 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, 1488 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
1462 "Ivybridge", &sandybridge_gtt_driver }, 1489 "Ivybridge", &sandybridge_gtt_driver },
1490 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1491 "ValleyView", &valleyview_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1493 "Haswell", &sandybridge_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1495 "Haswell", &sandybridge_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1497 "Haswell", &sandybridge_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1499 "Haswell", &sandybridge_gtt_driver },
1500 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1501 "Haswell", &sandybridge_gtt_driver },
1502 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1503 "Haswell", &sandybridge_gtt_driver },
1504 { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
1505 "Haswell", &sandybridge_gtt_driver },
1463 { 0, NULL, NULL } 1506 { 0, NULL, NULL }
1464}; 1507};
1465 1508
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index ffa888cd1c88..192000377737 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -158,7 +158,6 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
158 break; 158 break;
159 case LVL2_APER_SIZE: 159 case LVL2_APER_SIZE:
160 return -EINVAL; 160 return -EINVAL;
161 break;
162 default: 161 default:
163 num_entries = 0; 162 num_entries = 0;
164 break; 163 break;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e354bc0b052a..23120c00a881 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
186source "drivers/gpu/drm/gma500/Kconfig" 186source "drivers/gpu/drm/gma500/Kconfig"
187 187
188source "drivers/gpu/drm/udl/Kconfig" 188source "drivers/gpu/drm/udl/Kconfig"
189
190source "drivers/gpu/drm/ast/Kconfig"
191
192source "drivers/gpu/drm/mgag200/Kconfig"
193
194source "drivers/gpu/drm/cirrus/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c20da5bda355..f65f65ed0ddf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
34obj-$(CONFIG_DRM_MGA) += mga/ 34obj-$(CONFIG_DRM_MGA) += mga/
35obj-$(CONFIG_DRM_I810) += i810/ 35obj-$(CONFIG_DRM_I810) += i810/
36obj-$(CONFIG_DRM_I915) += i915/ 36obj-$(CONFIG_DRM_I915) += i915/
37obj-$(CONFIG_DRM_MGAG200) += mgag200/
38obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
37obj-$(CONFIG_DRM_SIS) += sis/ 39obj-$(CONFIG_DRM_SIS) += sis/
38obj-$(CONFIG_DRM_SAVAGE)+= savage/ 40obj-$(CONFIG_DRM_SAVAGE)+= savage/
39obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ 41obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
42obj-$(CONFIG_DRM_EXYNOS) +=exynos/ 44obj-$(CONFIG_DRM_EXYNOS) +=exynos/
43obj-$(CONFIG_DRM_GMA500) += gma500/ 45obj-$(CONFIG_DRM_GMA500) += gma500/
44obj-$(CONFIG_DRM_UDL) += udl/ 46obj-$(CONFIG_DRM_UDL) += udl/
47obj-$(CONFIG_DRM_AST) += ast/
45obj-y += i2c/ 48obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 000000000000..a277b1257888
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
1config DRM_AST
2 tristate "AST server chips"
3 depends on DRM && PCI && EXPERIMENTAL
4 select DRM_TTM
5 select FB_SYS_COPYAREA
6 select FB_SYS_FILLRECT
7 select FB_SYS_IMAGEBLIT
8 select DRM_KMS_HELPER
9 select DRM_TTM
10 help
11 Say yes for experimental AST GPU driver. Do not enable
12 this driver without having a working -modesetting,
13 and a version of AST that knows to fail if KMS
14 is bound to the driver. These GPUs are commonly found
15 in server chipsets.
16
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 000000000000..8df4f284ee24
--- /dev/null
+++ b/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
8
9obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 000000000000..cc04539c0ff3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
1#ifndef AST_DRAM_TABLES_H
2#define AST_DRAM_TABLES_H
3
4/* DRAM timing tables */
5struct ast_dramstruct {
6 u16 index;
7 u32 data;
8};
9
10static const struct ast_dramstruct ast2000_dram_table_data[] = {
11 { 0x0108, 0x00000000 },
12 { 0x0120, 0x00004a21 },
13 { 0xFF00, 0x00000043 },
14 { 0x0000, 0xFFFFFFFF },
15 { 0x0004, 0x00000089 },
16 { 0x0008, 0x22331353 },
17 { 0x000C, 0x0d07000b },
18 { 0x0010, 0x11113333 },
19 { 0x0020, 0x00110350 },
20 { 0x0028, 0x1e0828f0 },
21 { 0x0024, 0x00000001 },
22 { 0x001C, 0x00000000 },
23 { 0x0014, 0x00000003 },
24 { 0xFF00, 0x00000043 },
25 { 0x0018, 0x00000131 },
26 { 0x0014, 0x00000001 },
27 { 0xFF00, 0x00000043 },
28 { 0x0018, 0x00000031 },
29 { 0x0014, 0x00000001 },
30 { 0xFF00, 0x00000043 },
31 { 0x0028, 0x1e0828f1 },
32 { 0x0024, 0x00000003 },
33 { 0x002C, 0x1f0f28fb },
34 { 0x0030, 0xFFFFFE01 },
35 { 0xFFFF, 0xFFFFFFFF }
36};
37
38static const struct ast_dramstruct ast1100_dram_table_data[] = {
39 { 0x2000, 0x1688a8a8 },
40 { 0x2020, 0x000041f0 },
41 { 0xFF00, 0x00000043 },
42 { 0x0000, 0xfc600309 },
43 { 0x006C, 0x00909090 },
44 { 0x0064, 0x00050000 },
45 { 0x0004, 0x00000585 },
46 { 0x0008, 0x0011030f },
47 { 0x0010, 0x22201724 },
48 { 0x0018, 0x1e29011a },
49 { 0x0020, 0x00c82222 },
50 { 0x0014, 0x01001523 },
51 { 0x001C, 0x1024010d },
52 { 0x0024, 0x00cb2522 },
53 { 0x0038, 0xffffff82 },
54 { 0x003C, 0x00000000 },
55 { 0x0040, 0x00000000 },
56 { 0x0044, 0x00000000 },
57 { 0x0048, 0x00000000 },
58 { 0x004C, 0x00000000 },
59 { 0x0050, 0x00000000 },
60 { 0x0054, 0x00000000 },
61 { 0x0058, 0x00000000 },
62 { 0x005C, 0x00000000 },
63 { 0x0060, 0x032aa02a },
64 { 0x0064, 0x002d3000 },
65 { 0x0068, 0x00000000 },
66 { 0x0070, 0x00000000 },
67 { 0x0074, 0x00000000 },
68 { 0x0078, 0x00000000 },
69 { 0x007C, 0x00000000 },
70 { 0x0034, 0x00000001 },
71 { 0xFF00, 0x00000043 },
72 { 0x002C, 0x00000732 },
73 { 0x0030, 0x00000040 },
74 { 0x0028, 0x00000005 },
75 { 0x0028, 0x00000007 },
76 { 0x0028, 0x00000003 },
77 { 0x0028, 0x00000001 },
78 { 0x000C, 0x00005a08 },
79 { 0x002C, 0x00000632 },
80 { 0x0028, 0x00000001 },
81 { 0x0030, 0x000003c0 },
82 { 0x0028, 0x00000003 },
83 { 0x0030, 0x00000040 },
84 { 0x0028, 0x00000003 },
85 { 0x000C, 0x00005a21 },
86 { 0x0034, 0x00007c03 },
87 { 0x0120, 0x00004c41 },
88 { 0xffff, 0xffffffff },
89};
90
91static const struct ast_dramstruct ast2100_dram_table_data[] = {
92 { 0x2000, 0x1688a8a8 },
93 { 0x2020, 0x00004120 },
94 { 0xFF00, 0x00000043 },
95 { 0x0000, 0xfc600309 },
96 { 0x006C, 0x00909090 },
97 { 0x0064, 0x00070000 },
98 { 0x0004, 0x00000489 },
99 { 0x0008, 0x0011030f },
100 { 0x0010, 0x32302926 },
101 { 0x0018, 0x274c0122 },
102 { 0x0020, 0x00ce2222 },
103 { 0x0014, 0x01001523 },
104 { 0x001C, 0x1024010d },
105 { 0x0024, 0x00cb2522 },
106 { 0x0038, 0xffffff82 },
107 { 0x003C, 0x00000000 },
108 { 0x0040, 0x00000000 },
109 { 0x0044, 0x00000000 },
110 { 0x0048, 0x00000000 },
111 { 0x004C, 0x00000000 },
112 { 0x0050, 0x00000000 },
113 { 0x0054, 0x00000000 },
114 { 0x0058, 0x00000000 },
115 { 0x005C, 0x00000000 },
116 { 0x0060, 0x0f2aa02a },
117 { 0x0064, 0x003f3005 },
118 { 0x0068, 0x02020202 },
119 { 0x0070, 0x00000000 },
120 { 0x0074, 0x00000000 },
121 { 0x0078, 0x00000000 },
122 { 0x007C, 0x00000000 },
123 { 0x0034, 0x00000001 },
124 { 0xFF00, 0x00000043 },
125 { 0x002C, 0x00000942 },
126 { 0x0030, 0x00000040 },
127 { 0x0028, 0x00000005 },
128 { 0x0028, 0x00000007 },
129 { 0x0028, 0x00000003 },
130 { 0x0028, 0x00000001 },
131 { 0x000C, 0x00005a08 },
132 { 0x002C, 0x00000842 },
133 { 0x0028, 0x00000001 },
134 { 0x0030, 0x000003c0 },
135 { 0x0028, 0x00000003 },
136 { 0x0030, 0x00000040 },
137 { 0x0028, 0x00000003 },
138 { 0x000C, 0x00005a21 },
139 { 0x0034, 0x00007c03 },
140 { 0x0120, 0x00005061 },
141 { 0xffff, 0xffffffff },
142};
143
144#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 000000000000..d0c4574ef49c
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include <linux/module.h>
29#include <linux/console.h>
30
31#include "drmP.h"
32#include "drm.h"
33#include "drm_crtc_helper.h"
34
35#include "ast_drv.h"
36
37int ast_modeset = -1;
38
39MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
40module_param_named(modeset, ast_modeset, int, 0400);
41
42#define PCI_VENDOR_ASPEED 0x1a03
43
44static struct drm_driver driver;
45
46#define AST_VGA_DEVICE(id, info) { \
47 .class = PCI_BASE_CLASS_DISPLAY << 16, \
48 .class_mask = 0xff0000, \
49 .vendor = PCI_VENDOR_ASPEED, \
50 .device = id, \
51 .subvendor = PCI_ANY_ID, \
52 .subdevice = PCI_ANY_ID, \
53 .driver_data = (unsigned long) info }
54
55static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
56 AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
57 AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
58 /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
59 {0, 0, 0},
60};
61
62MODULE_DEVICE_TABLE(pci, pciidlist);
63
64static int __devinit
65ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
66{
67 return drm_get_pci_dev(pdev, ent, &driver);
68}
69
70static void
71ast_pci_remove(struct pci_dev *pdev)
72{
73 struct drm_device *dev = pci_get_drvdata(pdev);
74
75 drm_put_dev(dev);
76}
77
78
79
80static int ast_drm_freeze(struct drm_device *dev)
81{
82 drm_kms_helper_poll_disable(dev);
83
84 pci_save_state(dev->pdev);
85
86 console_lock();
87 ast_fbdev_set_suspend(dev, 1);
88 console_unlock();
89 return 0;
90}
91
92static int ast_drm_thaw(struct drm_device *dev)
93{
94 int error = 0;
95
96 ast_post_gpu(dev);
97
98 drm_mode_config_reset(dev);
99 mutex_lock(&dev->mode_config.mutex);
100 drm_helper_resume_force_mode(dev);
101 mutex_unlock(&dev->mode_config.mutex);
102
103 console_lock();
104 ast_fbdev_set_suspend(dev, 0);
105 console_unlock();
106 return error;
107}
108
109static int ast_drm_resume(struct drm_device *dev)
110{
111 int ret;
112
113 if (pci_enable_device(dev->pdev))
114 return -EIO;
115
116 ret = ast_drm_thaw(dev);
117 if (ret)
118 return ret;
119
120 drm_kms_helper_poll_enable(dev);
121 return 0;
122}
123
124static int ast_pm_suspend(struct device *dev)
125{
126 struct pci_dev *pdev = to_pci_dev(dev);
127 struct drm_device *ddev = pci_get_drvdata(pdev);
128 int error;
129
130 error = ast_drm_freeze(ddev);
131 if (error)
132 return error;
133
134 pci_disable_device(pdev);
135 pci_set_power_state(pdev, PCI_D3hot);
136 return 0;
137}
138static int ast_pm_resume(struct device *dev)
139{
140 struct pci_dev *pdev = to_pci_dev(dev);
141 struct drm_device *ddev = pci_get_drvdata(pdev);
142 return ast_drm_resume(ddev);
143}
144
145static int ast_pm_freeze(struct device *dev)
146{
147 struct pci_dev *pdev = to_pci_dev(dev);
148 struct drm_device *ddev = pci_get_drvdata(pdev);
149
150 if (!ddev || !ddev->dev_private)
151 return -ENODEV;
152 return ast_drm_freeze(ddev);
153
154}
155
156static int ast_pm_thaw(struct device *dev)
157{
158 struct pci_dev *pdev = to_pci_dev(dev);
159 struct drm_device *ddev = pci_get_drvdata(pdev);
160 return ast_drm_thaw(ddev);
161}
162
163static int ast_pm_poweroff(struct device *dev)
164{
165 struct pci_dev *pdev = to_pci_dev(dev);
166 struct drm_device *ddev = pci_get_drvdata(pdev);
167
168 return ast_drm_freeze(ddev);
169}
170
171static const struct dev_pm_ops ast_pm_ops = {
172 .suspend = ast_pm_suspend,
173 .resume = ast_pm_resume,
174 .freeze = ast_pm_freeze,
175 .thaw = ast_pm_thaw,
176 .poweroff = ast_pm_poweroff,
177 .restore = ast_pm_resume,
178};
179
180static struct pci_driver ast_pci_driver = {
181 .name = DRIVER_NAME,
182 .id_table = pciidlist,
183 .probe = ast_pci_probe,
184 .remove = ast_pci_remove,
185 .driver.pm = &ast_pm_ops,
186};
187
188static const struct file_operations ast_fops = {
189 .owner = THIS_MODULE,
190 .open = drm_open,
191 .release = drm_release,
192 .unlocked_ioctl = drm_ioctl,
193 .mmap = ast_mmap,
194 .poll = drm_poll,
195 .fasync = drm_fasync,
196 .read = drm_read,
197};
198
199static struct drm_driver driver = {
200 .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
201 .dev_priv_size = 0,
202
203 .load = ast_driver_load,
204 .unload = ast_driver_unload,
205
206 .fops = &ast_fops,
207 .name = DRIVER_NAME,
208 .desc = DRIVER_DESC,
209 .date = DRIVER_DATE,
210 .major = DRIVER_MAJOR,
211 .minor = DRIVER_MINOR,
212 .patchlevel = DRIVER_PATCHLEVEL,
213
214 .gem_init_object = ast_gem_init_object,
215 .gem_free_object = ast_gem_free_object,
216 .dumb_create = ast_dumb_create,
217 .dumb_map_offset = ast_dumb_mmap_offset,
218 .dumb_destroy = ast_dumb_destroy,
219
220};
221
222static int __init ast_init(void)
223{
224#ifdef CONFIG_VGA_CONSOLE
225 if (vgacon_text_force() && ast_modeset == -1)
226 return -EINVAL;
227#endif
228
229 if (ast_modeset == 0)
230 return -EINVAL;
231 return drm_pci_init(&driver, &ast_pci_driver);
232}
233static void __exit ast_exit(void)
234{
235 drm_pci_exit(&driver, &ast_pci_driver);
236}
237
238module_init(ast_init);
239module_exit(ast_exit);
240
241MODULE_AUTHOR(DRIVER_AUTHOR);
242MODULE_DESCRIPTION(DRIVER_DESC);
243MODULE_LICENSE("GPL and additional rights");
244
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 000000000000..d4af9edcbb97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,356 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#ifndef __AST_DRV_H__
29#define __AST_DRV_H__
30
31#include "drm_fb_helper.h"
32
33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_bo_driver.h"
35#include "ttm/ttm_placement.h"
36#include "ttm/ttm_memory.h"
37#include "ttm/ttm_module.h"
38
39#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h>
41
42#define DRIVER_AUTHOR "Dave Airlie"
43
44#define DRIVER_NAME "ast"
45#define DRIVER_DESC "AST"
46#define DRIVER_DATE "20120228"
47
48#define DRIVER_MAJOR 0
49#define DRIVER_MINOR 1
50#define DRIVER_PATCHLEVEL 0
51
52#define PCI_CHIP_AST2000 0x2000
53#define PCI_CHIP_AST2100 0x2010
54#define PCI_CHIP_AST1180 0x1180
55
56
57enum ast_chip {
58 AST2000,
59 AST2100,
60 AST1100,
61 AST2200,
62 AST2150,
63 AST2300,
64 AST1180,
65};
66
67#define AST_DRAM_512Mx16 0
68#define AST_DRAM_1Gx16 1
69#define AST_DRAM_512Mx32 2
70#define AST_DRAM_1Gx32 3
71#define AST_DRAM_2Gx16 6
72#define AST_DRAM_4Gx16 7
73
74struct ast_fbdev;
75
76struct ast_private {
77 struct drm_device *dev;
78
79 void __iomem *regs;
80 void __iomem *ioregs;
81
82 enum ast_chip chip;
83 bool vga2_clone;
84 uint32_t dram_bus_width;
85 uint32_t dram_type;
86 uint32_t mclk;
87 uint32_t vram_size;
88
89 struct ast_fbdev *fbdev;
90
91 int fb_mtrr;
92
93 struct {
94 struct drm_global_reference mem_global_ref;
95 struct ttm_bo_global_ref bo_global_ref;
96 struct ttm_bo_device bdev;
97 atomic_t validate_sequence;
98 } ttm;
99
100 struct drm_gem_object *cursor_cache;
101 uint64_t cursor_cache_gpu_addr;
102 struct ttm_bo_kmap_obj cache_kmap;
103 int next_cursor;
104};
105
106int ast_driver_load(struct drm_device *dev, unsigned long flags);
107int ast_driver_unload(struct drm_device *dev);
108
109struct ast_gem_object;
110
111#define AST_IO_AR_PORT_WRITE (0x40)
112#define AST_IO_MISC_PORT_WRITE (0x42)
113#define AST_IO_SEQ_PORT (0x44)
114#define AST_DAC_INDEX_READ (0x3c7)
115#define AST_IO_DAC_INDEX_WRITE (0x48)
116#define AST_IO_DAC_DATA (0x49)
117#define AST_IO_GR_PORT (0x4E)
118#define AST_IO_CRTC_PORT (0x54)
119#define AST_IO_INPUT_STATUS1_READ (0x5A)
120#define AST_IO_MISC_PORT_READ (0x4C)
121
122#define __ast_read(x) \
123static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
124u##x val = 0;\
125val = ioread##x(ast->regs + reg); \
126return val;\
127}
128
129__ast_read(8);
130__ast_read(16);
131__ast_read(32)
132
133#define __ast_io_read(x) \
134static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
135u##x val = 0;\
136val = ioread##x(ast->ioregs + reg); \
137return val;\
138}
139
140__ast_io_read(8);
141__ast_io_read(16);
142__ast_io_read(32);
143
144#define __ast_write(x) \
145static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
146 iowrite##x(val, ast->regs + reg);\
147 }
148
149__ast_write(8);
150__ast_write(16);
151__ast_write(32);
152
153#define __ast_io_write(x) \
154static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
155 iowrite##x(val, ast->ioregs + reg);\
156 }
157
158__ast_io_write(8);
159__ast_io_write(16);
160#undef __ast_io_write
161
162static inline void ast_set_index_reg(struct ast_private *ast,
163 uint32_t base, uint8_t index,
164 uint8_t val)
165{
166 ast_io_write16(ast, base, ((u16)val << 8) | index);
167}
168
169void ast_set_index_reg_mask(struct ast_private *ast,
170 uint32_t base, uint8_t index,
171 uint8_t mask, uint8_t val);
172uint8_t ast_get_index_reg(struct ast_private *ast,
173 uint32_t base, uint8_t index);
174uint8_t ast_get_index_reg_mask(struct ast_private *ast,
175 uint32_t base, uint8_t index, uint8_t mask);
176
177static inline void ast_open_key(struct ast_private *ast)
178{
179 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
180}
181
182#define AST_VIDMEM_SIZE_8M 0x00800000
183#define AST_VIDMEM_SIZE_16M 0x01000000
184#define AST_VIDMEM_SIZE_32M 0x02000000
185#define AST_VIDMEM_SIZE_64M 0x04000000
186#define AST_VIDMEM_SIZE_128M 0x08000000
187
188#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
189
190#define AST_MAX_HWC_WIDTH 64
191#define AST_MAX_HWC_HEIGHT 64
192
193#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
194#define AST_HWC_SIGNATURE_SIZE 32
195
196#define AST_DEFAULT_HWC_NUM 2
197/* define for signature structure */
198#define AST_HWC_SIGNATURE_CHECKSUM 0x00
199#define AST_HWC_SIGNATURE_SizeX 0x04
200#define AST_HWC_SIGNATURE_SizeY 0x08
201#define AST_HWC_SIGNATURE_X 0x0C
202#define AST_HWC_SIGNATURE_Y 0x10
203#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
204#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
205
206
207struct ast_i2c_chan {
208 struct i2c_adapter adapter;
209 struct drm_device *dev;
210 struct i2c_algo_bit_data bit;
211};
212
213struct ast_connector {
214 struct drm_connector base;
215 struct ast_i2c_chan *i2c;
216};
217
218struct ast_crtc {
219 struct drm_crtc base;
220 u8 lut_r[256], lut_g[256], lut_b[256];
221 struct drm_gem_object *cursor_bo;
222 uint64_t cursor_addr;
223 int cursor_width, cursor_height;
224 u8 offset_x, offset_y;
225};
226
227struct ast_encoder {
228 struct drm_encoder base;
229};
230
231struct ast_framebuffer {
232 struct drm_framebuffer base;
233 struct drm_gem_object *obj;
234};
235
236struct ast_fbdev {
237 struct drm_fb_helper helper;
238 struct ast_framebuffer afb;
239 struct list_head fbdev_list;
240 void *sysram;
241 int size;
242 struct ttm_bo_kmap_obj mapping;
243};
244
245#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
246#define to_ast_connector(x) container_of(x, struct ast_connector, base)
247#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
248#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
249
250struct ast_vbios_stdtable {
251 u8 misc;
252 u8 seq[4];
253 u8 crtc[25];
254 u8 ar[20];
255 u8 gr[9];
256};
257
258struct ast_vbios_enhtable {
259 u32 ht;
260 u32 hde;
261 u32 hfp;
262 u32 hsync;
263 u32 vt;
264 u32 vde;
265 u32 vfp;
266 u32 vsync;
267 u32 dclk_index;
268 u32 flags;
269 u32 refresh_rate;
270 u32 refresh_rate_index;
271 u32 mode_id;
272};
273
274struct ast_vbios_dclk_info {
275 u8 param1;
276 u8 param2;
277 u8 param3;
278};
279
280struct ast_vbios_mode_info {
281 struct ast_vbios_stdtable *std_table;
282 struct ast_vbios_enhtable *enh_table;
283};
284
285extern int ast_mode_init(struct drm_device *dev);
286extern void ast_mode_fini(struct drm_device *dev);
287
288int ast_framebuffer_init(struct drm_device *dev,
289 struct ast_framebuffer *ast_fb,
290 struct drm_mode_fb_cmd2 *mode_cmd,
291 struct drm_gem_object *obj);
292
293int ast_fbdev_init(struct drm_device *dev);
294void ast_fbdev_fini(struct drm_device *dev);
295void ast_fbdev_set_suspend(struct drm_device *dev, int state);
296
297struct ast_bo {
298 struct ttm_buffer_object bo;
299 struct ttm_placement placement;
300 struct ttm_bo_kmap_obj kmap;
301 struct drm_gem_object gem;
302 u32 placements[3];
303 int pin_count;
304};
305#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
306
307static inline struct ast_bo *
308ast_bo(struct ttm_buffer_object *bo)
309{
310 return container_of(bo, struct ast_bo, bo);
311}
312
313
314#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
315
316#define AST_MM_ALIGN_SHIFT 4
317#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
318
319extern int ast_dumb_create(struct drm_file *file,
320 struct drm_device *dev,
321 struct drm_mode_create_dumb *args);
322extern int ast_dumb_destroy(struct drm_file *file,
323 struct drm_device *dev,
324 uint32_t handle);
325
326extern int ast_gem_init_object(struct drm_gem_object *obj);
327extern void ast_gem_free_object(struct drm_gem_object *obj);
328extern int ast_dumb_mmap_offset(struct drm_file *file,
329 struct drm_device *dev,
330 uint32_t handle,
331 uint64_t *offset);
332
333#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
334
335int ast_mm_init(struct ast_private *ast);
336void ast_mm_fini(struct ast_private *ast);
337
338int ast_bo_create(struct drm_device *dev, int size, int align,
339 uint32_t flags, struct ast_bo **pastbo);
340
341int ast_gem_create(struct drm_device *dev,
342 u32 size, bool iskernel,
343 struct drm_gem_object **obj);
344
345int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
346int ast_bo_unpin(struct ast_bo *bo);
347
348int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
349void ast_bo_unreserve(struct ast_bo *bo);
350void ast_ttm_placement(struct ast_bo *bo, int domain);
351int ast_bo_push_sysram(struct ast_bo *bo);
352int ast_mmap(struct file *filp, struct vm_area_struct *vma);
353
354/* ast post */
355void ast_post_gpu(struct drm_device *dev);
356#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 000000000000..2fc8e9e860b1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,341 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/mm.h>
33#include <linux/tty.h>
34#include <linux/sysrq.h>
35#include <linux/delay.h>
36#include <linux/fb.h>
37#include <linux/init.h>
38
39
40#include "drmP.h"
41#include "drm.h"
42#include "drm_crtc.h"
43#include "drm_fb_helper.h"
44#include "ast_drv.h"
45
46static void ast_dirty_update(struct ast_fbdev *afbdev,
47 int x, int y, int width, int height)
48{
49 int i;
50 struct drm_gem_object *obj;
51 struct ast_bo *bo;
52 int src_offset, dst_offset;
53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
54 int ret;
55 bool unmap = false;
56
57 obj = afbdev->afb.obj;
58 bo = gem_to_ast_bo(obj);
59
60 ret = ast_bo_reserve(bo, true);
61 if (ret) {
62 DRM_ERROR("failed to reserve fb bo\n");
63 return;
64 }
65
66 if (!bo->kmap.virtual) {
67 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
68 if (ret) {
69 DRM_ERROR("failed to kmap fb updates\n");
70 ast_bo_unreserve(bo);
71 return;
72 }
73 unmap = true;
74 }
75 for (i = y; i < y + height; i++) {
76 /* assume equal stride for now */
77 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
78 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
79
80 }
81 if (unmap)
82 ttm_bo_kunmap(&bo->kmap);
83
84 ast_bo_unreserve(bo);
85}
86
87static void ast_fillrect(struct fb_info *info,
88 const struct fb_fillrect *rect)
89{
90 struct ast_fbdev *afbdev = info->par;
91 sys_fillrect(info, rect);
92 ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
93 rect->height);
94}
95
96static void ast_copyarea(struct fb_info *info,
97 const struct fb_copyarea *area)
98{
99 struct ast_fbdev *afbdev = info->par;
100 sys_copyarea(info, area);
101 ast_dirty_update(afbdev, area->dx, area->dy, area->width,
102 area->height);
103}
104
105static void ast_imageblit(struct fb_info *info,
106 const struct fb_image *image)
107{
108 struct ast_fbdev *afbdev = info->par;
109 sys_imageblit(info, image);
110 ast_dirty_update(afbdev, image->dx, image->dy, image->width,
111 image->height);
112}
113
114static struct fb_ops astfb_ops = {
115 .owner = THIS_MODULE,
116 .fb_check_var = drm_fb_helper_check_var,
117 .fb_set_par = drm_fb_helper_set_par,
118 .fb_fillrect = ast_fillrect,
119 .fb_copyarea = ast_copyarea,
120 .fb_imageblit = ast_imageblit,
121 .fb_pan_display = drm_fb_helper_pan_display,
122 .fb_blank = drm_fb_helper_blank,
123 .fb_setcmap = drm_fb_helper_setcmap,
124 .fb_debug_enter = drm_fb_helper_debug_enter,
125 .fb_debug_leave = drm_fb_helper_debug_leave,
126};
127
128static int astfb_create_object(struct ast_fbdev *afbdev,
129 struct drm_mode_fb_cmd2 *mode_cmd,
130 struct drm_gem_object **gobj_p)
131{
132 struct drm_device *dev = afbdev->helper.dev;
133 u32 bpp, depth;
134 u32 size;
135 struct drm_gem_object *gobj;
136
137 int ret = 0;
138 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
139
140 size = mode_cmd->pitches[0] * mode_cmd->height;
141 ret = ast_gem_create(dev, size, true, &gobj);
142 if (ret)
143 return ret;
144
145 *gobj_p = gobj;
146 return ret;
147}
148
149static int astfb_create(struct ast_fbdev *afbdev,
150 struct drm_fb_helper_surface_size *sizes)
151{
152 struct drm_device *dev = afbdev->helper.dev;
153 struct drm_mode_fb_cmd2 mode_cmd;
154 struct drm_framebuffer *fb;
155 struct fb_info *info;
156 int size, ret;
157 struct device *device = &dev->pdev->dev;
158 void *sysram;
159 struct drm_gem_object *gobj = NULL;
160 struct ast_bo *bo = NULL;
161 mode_cmd.width = sizes->surface_width;
162 mode_cmd.height = sizes->surface_height;
163 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
164
165 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
166 sizes->surface_depth);
167
168 size = mode_cmd.pitches[0] * mode_cmd.height;
169
170 ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
171 if (ret) {
172 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
173 return ret;
174 }
175 bo = gem_to_ast_bo(gobj);
176
177 sysram = vmalloc(size);
178 if (!sysram)
179 return -ENOMEM;
180
181 info = framebuffer_alloc(0, device);
182 if (!info) {
183 ret = -ENOMEM;
184 goto out;
185 }
186 info->par = afbdev;
187
188 ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
189 if (ret)
190 goto out;
191
192 afbdev->sysram = sysram;
193 afbdev->size = size;
194
195 fb = &afbdev->afb.base;
196 afbdev->helper.fb = fb;
197 afbdev->helper.fbdev = info;
198
199 strcpy(info->fix.id, "astdrmfb");
200
201 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
202 info->fbops = &astfb_ops;
203
204 ret = fb_alloc_cmap(&info->cmap, 256, 0);
205 if (ret) {
206 ret = -ENOMEM;
207 goto out;
208 }
209
210 info->apertures = alloc_apertures(1);
211 if (!info->apertures) {
212 ret = -ENOMEM;
213 goto out;
214 }
215 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
216 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
217
218 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
219 drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
220
221 info->screen_base = sysram;
222 info->screen_size = size;
223
224 info->pixmap.flags = FB_PIXMAP_SYSTEM;
225
226 DRM_DEBUG_KMS("allocated %dx%d\n",
227 fb->width, fb->height);
228
229 return 0;
230out:
231 return ret;
232}
233
234static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
235 u16 blue, int regno)
236{
237 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
238 ast_crtc->lut_r[regno] = red >> 8;
239 ast_crtc->lut_g[regno] = green >> 8;
240 ast_crtc->lut_b[regno] = blue >> 8;
241}
242
243static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
244 u16 *blue, int regno)
245{
246 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
247 *red = ast_crtc->lut_r[regno] << 8;
248 *green = ast_crtc->lut_g[regno] << 8;
249 *blue = ast_crtc->lut_b[regno] << 8;
250}
251
252static int ast_find_or_create_single(struct drm_fb_helper *helper,
253 struct drm_fb_helper_surface_size *sizes)
254{
255 struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
256 int new_fb = 0;
257 int ret;
258
259 if (!helper->fb) {
260 ret = astfb_create(afbdev, sizes);
261 if (ret)
262 return ret;
263 new_fb = 1;
264 }
265 return new_fb;
266}
267
268static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
269 .gamma_set = ast_fb_gamma_set,
270 .gamma_get = ast_fb_gamma_get,
271 .fb_probe = ast_find_or_create_single,
272};
273
274static void ast_fbdev_destroy(struct drm_device *dev,
275 struct ast_fbdev *afbdev)
276{
277 struct fb_info *info;
278 struct ast_framebuffer *afb = &afbdev->afb;
279 if (afbdev->helper.fbdev) {
280 info = afbdev->helper.fbdev;
281 unregister_framebuffer(info);
282 if (info->cmap.len)
283 fb_dealloc_cmap(&info->cmap);
284 framebuffer_release(info);
285 }
286
287 if (afb->obj) {
288 drm_gem_object_unreference_unlocked(afb->obj);
289 afb->obj = NULL;
290 }
291 drm_fb_helper_fini(&afbdev->helper);
292
293 vfree(afbdev->sysram);
294 drm_framebuffer_cleanup(&afb->base);
295}
296
297int ast_fbdev_init(struct drm_device *dev)
298{
299 struct ast_private *ast = dev->dev_private;
300 struct ast_fbdev *afbdev;
301 int ret;
302
303 afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
304 if (!afbdev)
305 return -ENOMEM;
306
307 ast->fbdev = afbdev;
308 afbdev->helper.funcs = &ast_fb_helper_funcs;
309 ret = drm_fb_helper_init(dev, &afbdev->helper,
310 1, 1);
311 if (ret) {
312 kfree(afbdev);
313 return ret;
314 }
315
316 drm_fb_helper_single_add_all_connectors(&afbdev->helper);
317 drm_fb_helper_initial_config(&afbdev->helper, 32);
318 return 0;
319}
320
321void ast_fbdev_fini(struct drm_device *dev)
322{
323 struct ast_private *ast = dev->dev_private;
324
325 if (!ast->fbdev)
326 return;
327
328 ast_fbdev_destroy(dev, ast->fbdev);
329 kfree(ast->fbdev);
330 ast->fbdev = NULL;
331}
332
333void ast_fbdev_set_suspend(struct drm_device *dev, int state)
334{
335 struct ast_private *ast = dev->dev_private;
336
337 if (!ast->fbdev)
338 return;
339
340 fb_set_suspend(ast->fbdev->helper.fbdev, state);
341}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 000000000000..95ae55b8214b
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include "drmP.h"
29#include "ast_drv.h"
30
31
32#include "drm_fb_helper.h"
33#include "drm_crtc_helper.h"
34
35#include "ast_dram_tables.h"
36
37void ast_set_index_reg_mask(struct ast_private *ast,
38 uint32_t base, uint8_t index,
39 uint8_t mask, uint8_t val)
40{
41 u8 tmp;
42 ast_io_write8(ast, base, index);
43 tmp = (ast_io_read8(ast, base + 1) & mask) | val;
44 ast_set_index_reg(ast, base, index, tmp);
45}
46
47uint8_t ast_get_index_reg(struct ast_private *ast,
48 uint32_t base, uint8_t index)
49{
50 uint8_t ret;
51 ast_io_write8(ast, base, index);
52 ret = ast_io_read8(ast, base + 1);
53 return ret;
54}
55
56uint8_t ast_get_index_reg_mask(struct ast_private *ast,
57 uint32_t base, uint8_t index, uint8_t mask)
58{
59 uint8_t ret;
60 ast_io_write8(ast, base, index);
61 ret = ast_io_read8(ast, base + 1) & mask;
62 return ret;
63}
64
65
66static int ast_detect_chip(struct drm_device *dev)
67{
68 struct ast_private *ast = dev->dev_private;
69
70 if (dev->pdev->device == PCI_CHIP_AST1180) {
71 ast->chip = AST1100;
72 DRM_INFO("AST 1180 detected\n");
73 } else {
74 if (dev->pdev->revision >= 0x20) {
75 ast->chip = AST2300;
76 DRM_INFO("AST 2300 detected\n");
77 } else if (dev->pdev->revision >= 0x10) {
78 uint32_t data;
79 ast_write32(ast, 0xf004, 0x1e6e0000);
80 ast_write32(ast, 0xf000, 0x1);
81
82 data = ast_read32(ast, 0x1207c);
83 switch (data & 0x0300) {
84 case 0x0200:
85 ast->chip = AST1100;
86 DRM_INFO("AST 1100 detected\n");
87 break;
88 case 0x0100:
89 ast->chip = AST2200;
90 DRM_INFO("AST 2200 detected\n");
91 break;
92 case 0x0000:
93 ast->chip = AST2150;
94 DRM_INFO("AST 2150 detected\n");
95 break;
96 default:
97 ast->chip = AST2100;
98 DRM_INFO("AST 2100 detected\n");
99 break;
100 }
101 ast->vga2_clone = false;
102 } else {
103 ast->chip = 2000;
104 DRM_INFO("AST 2000 detected\n");
105 }
106 }
107 return 0;
108}
109
110static int ast_get_dram_info(struct drm_device *dev)
111{
112 struct ast_private *ast = dev->dev_private;
113 uint32_t data, data2;
114 uint32_t denum, num, div, ref_pll;
115
116 ast_write32(ast, 0xf004, 0x1e6e0000);
117 ast_write32(ast, 0xf000, 0x1);
118
119
120 ast_write32(ast, 0x10000, 0xfc600309);
121
122 do {
123 ;
124 } while (ast_read32(ast, 0x10000) != 0x01);
125 data = ast_read32(ast, 0x10004);
126
127 if (data & 0x400)
128 ast->dram_bus_width = 16;
129 else
130 ast->dram_bus_width = 32;
131
132 if (ast->chip == AST2300) {
133 switch (data & 0x03) {
134 case 0:
135 ast->dram_type = AST_DRAM_512Mx16;
136 break;
137 default:
138 case 1:
139 ast->dram_type = AST_DRAM_1Gx16;
140 break;
141 case 2:
142 ast->dram_type = AST_DRAM_2Gx16;
143 break;
144 case 3:
145 ast->dram_type = AST_DRAM_4Gx16;
146 break;
147 }
148 } else {
149 switch (data & 0x0c) {
150 case 0:
151 case 4:
152 ast->dram_type = AST_DRAM_512Mx16;
153 break;
154 case 8:
155 if (data & 0x40)
156 ast->dram_type = AST_DRAM_1Gx16;
157 else
158 ast->dram_type = AST_DRAM_512Mx32;
159 break;
160 case 0xc:
161 ast->dram_type = AST_DRAM_1Gx32;
162 break;
163 }
164 }
165
166 data = ast_read32(ast, 0x10120);
167 data2 = ast_read32(ast, 0x10170);
168 if (data2 & 0x2000)
169 ref_pll = 14318;
170 else
171 ref_pll = 12000;
172
173 denum = data & 0x1f;
174 num = (data & 0x3fe0) >> 5;
175 data = (data & 0xc000) >> 14;
176 switch (data) {
177 case 3:
178 div = 0x4;
179 break;
180 case 2:
181 case 1:
182 div = 0x2;
183 break;
184 default:
185 div = 0x1;
186 break;
187 }
188 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
189 return 0;
190}
191
192uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
193{
194 struct ast_private *ast = dev->dev_private;
195 uint32_t dclk, jreg;
196 uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
197
198 dram_bus_width = ast->dram_bus_width;
199 mclk = ast->mclk;
200
201 if (ast->chip == AST2100 ||
202 ast->chip == AST1100 ||
203 ast->chip == AST2200 ||
204 ast->chip == AST2150 ||
205 ast->dram_bus_width == 16)
206 dram_efficency = 600;
207 else if (ast->chip == AST2300)
208 dram_efficency = 400;
209
210 dram_bandwidth = mclk * dram_bus_width * 2 / 8;
211 actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
212
213 if (ast->chip == AST1180)
214 dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
215 else {
216 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
217 if ((jreg & 0x08) && (ast->chip == AST2000))
218 dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
219 else if ((jreg & 0x08) && (bpp == 8))
220 dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
221 else
222 dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
223 }
224
225 if (ast->chip == AST2100 ||
226 ast->chip == AST2200 ||
227 ast->chip == AST2300 ||
228 ast->chip == AST1180) {
229 if (dclk > 200)
230 dclk = 200;
231 } else {
232 if (dclk > 165)
233 dclk = 165;
234 }
235
236 return dclk;
237}
238
239static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
240{
241 struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
242 if (ast_fb->obj)
243 drm_gem_object_unreference_unlocked(ast_fb->obj);
244
245 drm_framebuffer_cleanup(fb);
246 kfree(fb);
247}
248
249static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
250 struct drm_file *file,
251 unsigned int *handle)
252{
253 return -EINVAL;
254}
255
256static const struct drm_framebuffer_funcs ast_fb_funcs = {
257 .destroy = ast_user_framebuffer_destroy,
258 .create_handle = ast_user_framebuffer_create_handle,
259};
260
261
262int ast_framebuffer_init(struct drm_device *dev,
263 struct ast_framebuffer *ast_fb,
264 struct drm_mode_fb_cmd2 *mode_cmd,
265 struct drm_gem_object *obj)
266{
267 int ret;
268
269 ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
270 if (ret) {
271 DRM_ERROR("framebuffer init failed %d\n", ret);
272 return ret;
273 }
274 drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
275 ast_fb->obj = obj;
276 return 0;
277}
278
279static struct drm_framebuffer *
280ast_user_framebuffer_create(struct drm_device *dev,
281 struct drm_file *filp,
282 struct drm_mode_fb_cmd2 *mode_cmd)
283{
284 struct drm_gem_object *obj;
285 struct ast_framebuffer *ast_fb;
286 int ret;
287
288 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
289 if (obj == NULL)
290 return ERR_PTR(-ENOENT);
291
292 ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
293 if (!ast_fb) {
294 drm_gem_object_unreference_unlocked(obj);
295 return ERR_PTR(-ENOMEM);
296 }
297
298 ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
299 if (ret) {
300 drm_gem_object_unreference_unlocked(obj);
301 kfree(ast_fb);
302 return ERR_PTR(ret);
303 }
304 return &ast_fb->base;
305}
306
307static const struct drm_mode_config_funcs ast_mode_funcs = {
308 .fb_create = ast_user_framebuffer_create,
309};
310
311static u32 ast_get_vram_info(struct drm_device *dev)
312{
313 struct ast_private *ast = dev->dev_private;
314 u8 jreg;
315
316 ast_open_key(ast);
317
318 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
319 switch (jreg & 3) {
320 case 0: return AST_VIDMEM_SIZE_8M;
321 case 1: return AST_VIDMEM_SIZE_16M;
322 case 2: return AST_VIDMEM_SIZE_32M;
323 case 3: return AST_VIDMEM_SIZE_64M;
324 }
325 return AST_VIDMEM_DEFAULT_SIZE;
326}
327
328int ast_driver_load(struct drm_device *dev, unsigned long flags)
329{
330 struct ast_private *ast;
331 int ret = 0;
332
333 ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
334 if (!ast)
335 return -ENOMEM;
336
337 dev->dev_private = ast;
338 ast->dev = dev;
339
340 ast->regs = pci_iomap(dev->pdev, 1, 0);
341 if (!ast->regs) {
342 ret = -EIO;
343 goto out_free;
344 }
345 ast->ioregs = pci_iomap(dev->pdev, 2, 0);
346 if (!ast->ioregs) {
347 ret = -EIO;
348 goto out_free;
349 }
350
351 ast_detect_chip(dev);
352
353 if (ast->chip != AST1180) {
354 ast_get_dram_info(dev);
355 ast->vram_size = ast_get_vram_info(dev);
356 DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
357 }
358
359 ret = ast_mm_init(ast);
360 if (ret)
361 goto out_free;
362
363 drm_mode_config_init(dev);
364
365 dev->mode_config.funcs = (void *)&ast_mode_funcs;
366 dev->mode_config.min_width = 0;
367 dev->mode_config.min_height = 0;
368 dev->mode_config.preferred_depth = 24;
369 dev->mode_config.prefer_shadow = 1;
370
371 if (ast->chip == AST2100 ||
372 ast->chip == AST2200 ||
373 ast->chip == AST2300 ||
374 ast->chip == AST1180) {
375 dev->mode_config.max_width = 1920;
376 dev->mode_config.max_height = 2048;
377 } else {
378 dev->mode_config.max_width = 1600;
379 dev->mode_config.max_height = 1200;
380 }
381
382 ret = ast_mode_init(dev);
383 if (ret)
384 goto out_free;
385
386 ret = ast_fbdev_init(dev);
387 if (ret)
388 goto out_free;
389
390 return 0;
391out_free:
392 kfree(ast);
393 dev->dev_private = NULL;
394 return ret;
395}
396
397int ast_driver_unload(struct drm_device *dev)
398{
399 struct ast_private *ast = dev->dev_private;
400
401 ast_mode_fini(dev);
402 ast_fbdev_fini(dev);
403 drm_mode_config_cleanup(dev);
404
405 ast_mm_fini(ast);
406 pci_iounmap(dev->pdev, ast->ioregs);
407 pci_iounmap(dev->pdev, ast->regs);
408 kfree(ast);
409 return 0;
410}
411
412int ast_gem_create(struct drm_device *dev,
413 u32 size, bool iskernel,
414 struct drm_gem_object **obj)
415{
416 struct ast_bo *astbo;
417 int ret;
418
419 *obj = NULL;
420
421 size = roundup(size, PAGE_SIZE);
422 if (size == 0)
423 return -EINVAL;
424
425 ret = ast_bo_create(dev, size, 0, 0, &astbo);
426 if (ret) {
427 if (ret != -ERESTARTSYS)
428 DRM_ERROR("failed to allocate GEM object\n");
429 return ret;
430 }
431 *obj = &astbo->gem;
432 return 0;
433}
434
435int ast_dumb_create(struct drm_file *file,
436 struct drm_device *dev,
437 struct drm_mode_create_dumb *args)
438{
439 int ret;
440 struct drm_gem_object *gobj;
441 u32 handle;
442
443 args->pitch = args->width * ((args->bpp + 7) / 8);
444 args->size = args->pitch * args->height;
445
446 ret = ast_gem_create(dev, args->size, false,
447 &gobj);
448 if (ret)
449 return ret;
450
451 ret = drm_gem_handle_create(file, gobj, &handle);
452 drm_gem_object_unreference_unlocked(gobj);
453 if (ret)
454 return ret;
455
456 args->handle = handle;
457 return 0;
458}
459
460int ast_dumb_destroy(struct drm_file *file,
461 struct drm_device *dev,
462 uint32_t handle)
463{
464 return drm_gem_handle_delete(file, handle);
465}
466
467int ast_gem_init_object(struct drm_gem_object *obj)
468{
469 BUG();
470 return 0;
471}
472
473void ast_bo_unref(struct ast_bo **bo)
474{
475 struct ttm_buffer_object *tbo;
476
477 if ((*bo) == NULL)
478 return;
479
480 tbo = &((*bo)->bo);
481 ttm_bo_unref(&tbo);
482 if (tbo == NULL)
483 *bo = NULL;
484
485}
486void ast_gem_free_object(struct drm_gem_object *obj)
487{
488 struct ast_bo *ast_bo = gem_to_ast_bo(obj);
489
490 if (!ast_bo)
491 return;
492 ast_bo_unref(&ast_bo);
493}
494
495
496static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
497{
498 return bo->bo.addr_space_offset;
499}
500int
501ast_dumb_mmap_offset(struct drm_file *file,
502 struct drm_device *dev,
503 uint32_t handle,
504 uint64_t *offset)
505{
506 struct drm_gem_object *obj;
507 int ret;
508 struct ast_bo *bo;
509
510 mutex_lock(&dev->struct_mutex);
511 obj = drm_gem_object_lookup(dev, file, handle);
512 if (obj == NULL) {
513 ret = -ENOENT;
514 goto out_unlock;
515 }
516
517 bo = gem_to_ast_bo(obj);
518 *offset = ast_bo_mmap_offset(bo);
519
520 drm_gem_object_unreference(obj);
521 ret = 0;
522out_unlock:
523 mutex_unlock(&dev->struct_mutex);
524 return ret;
525
526}
527
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 000000000000..65f9d231af14
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 * Parts based on xf86-video-ast
4 * Copyright (c) 2005 ASPEED Technology Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 */
27/*
28 * Authors: Dave Airlie <airlied@redhat.com>
29 */
30#include <linux/export.h>
31#include "drmP.h"
32#include "drm_crtc.h"
33#include "drm_crtc_helper.h"
34#include "ast_drv.h"
35
36#include "ast_tables.h"
37
38static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
39static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
40static int ast_cursor_set(struct drm_crtc *crtc,
41 struct drm_file *file_priv,
42 uint32_t handle,
43 uint32_t width,
44 uint32_t height);
45static int ast_cursor_move(struct drm_crtc *crtc,
46 int x, int y);
47
48static inline void ast_load_palette_index(struct ast_private *ast,
49 u8 index, u8 red, u8 green,
50 u8 blue)
51{
52 ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
53 ast_io_read8(ast, AST_IO_SEQ_PORT);
54 ast_io_write8(ast, AST_IO_DAC_DATA, red);
55 ast_io_read8(ast, AST_IO_SEQ_PORT);
56 ast_io_write8(ast, AST_IO_DAC_DATA, green);
57 ast_io_read8(ast, AST_IO_SEQ_PORT);
58 ast_io_write8(ast, AST_IO_DAC_DATA, blue);
59 ast_io_read8(ast, AST_IO_SEQ_PORT);
60}
61
62static void ast_crtc_load_lut(struct drm_crtc *crtc)
63{
64 struct ast_private *ast = crtc->dev->dev_private;
65 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
66 int i;
67
68 if (!crtc->enabled)
69 return;
70
71 for (i = 0; i < 256; i++)
72 ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
73 ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
74}
75
76static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
77 struct drm_display_mode *adjusted_mode,
78 struct ast_vbios_mode_info *vbios_mode)
79{
80 struct ast_private *ast = crtc->dev->dev_private;
81 u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
82 u32 hborder, vborder;
83
84 switch (crtc->fb->bits_per_pixel) {
85 case 8:
86 vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
87 color_index = VGAModeIndex - 1;
88 break;
89 case 16:
90 vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
91 color_index = HiCModeIndex;
92 break;
93 case 24:
94 case 32:
95 vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
96 color_index = TrueCModeIndex;
97 break;
98 default:
99 return false;
100 }
101
102 switch (crtc->mode.crtc_hdisplay) {
103 case 640:
104 vbios_mode->enh_table = &res_640x480[refresh_rate_index];
105 break;
106 case 800:
107 vbios_mode->enh_table = &res_800x600[refresh_rate_index];
108 break;
109 case 1024:
110 vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
111 break;
112 case 1280:
113 if (crtc->mode.crtc_vdisplay == 800)
114 vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
115 else
116 vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
117 break;
118 case 1440:
119 vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
120 break;
121 case 1600:
122 vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
123 break;
124 case 1680:
125 vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
126 break;
127 case 1920:
128 if (crtc->mode.crtc_vdisplay == 1080)
129 vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
130 else
131 vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
132 break;
133 default:
134 return false;
135 }
136
137 refresh_rate = drm_mode_vrefresh(mode);
138 while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
139 vbios_mode->enh_table++;
140 if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
141 (vbios_mode->enh_table->refresh_rate == 0xff)) {
142 vbios_mode->enh_table--;
143 break;
144 }
145 }
146
147 hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
148 vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
149
150 adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
151 adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
152 adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
153 adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
154 vbios_mode->enh_table->hfp;
155 adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
156 vbios_mode->enh_table->hfp +
157 vbios_mode->enh_table->hsync);
158
159 adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
160 adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
161 adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
162 adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
163 vbios_mode->enh_table->vfp;
164 adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
165 vbios_mode->enh_table->vfp +
166 vbios_mode->enh_table->vsync);
167
168 refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
169 mode_id = vbios_mode->enh_table->mode_id;
170
171 if (ast->chip == AST1180) {
172 /* TODO 1180 */
173 } else {
174 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
175 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
176 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
177
178 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
179 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
181 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
182 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
183
184 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
185 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
186 }
187
188 return true;
189
190
191}
192static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
193 struct ast_vbios_mode_info *vbios_mode)
194{
195 struct ast_private *ast = crtc->dev->dev_private;
196 struct ast_vbios_stdtable *stdtable;
197 u32 i;
198 u8 jreg;
199
200 stdtable = vbios_mode->std_table;
201
202 jreg = stdtable->misc;
203 ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
204
205 /* Set SEQ */
206 ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
207 for (i = 0; i < 4; i++) {
208 jreg = stdtable->seq[i];
209 if (!i)
210 jreg |= 0x20;
211 ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
212 }
213
214 /* Set CRTC */
215 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
216 for (i = 0; i < 25; i++)
217 ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
218
219 /* set AR */
220 jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
221 for (i = 0; i < 20; i++) {
222 jreg = stdtable->ar[i];
223 ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
224 ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
225 }
226 ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
227 ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
228
229 jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
230 ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
231
232 /* Set GR */
233 for (i = 0; i < 9; i++)
234 ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
235}
236
237static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
238 struct ast_vbios_mode_info *vbios_mode)
239{
240 struct ast_private *ast = crtc->dev->dev_private;
241 u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
242 u16 temp;
243
244 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
245
246 temp = (mode->crtc_htotal >> 3) - 5;
247 if (temp & 0x100)
248 jregAC |= 0x01; /* HT D[8] */
249 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
250
251 temp = (mode->crtc_hdisplay >> 3) - 1;
252 if (temp & 0x100)
253 jregAC |= 0x04; /* HDE D[8] */
254 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
255
256 temp = (mode->crtc_hblank_start >> 3) - 1;
257 if (temp & 0x100)
258 jregAC |= 0x10; /* HBS D[8] */
259 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
260
261 temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
262 if (temp & 0x20)
263 jreg05 |= 0x80; /* HBE D[5] */
264 if (temp & 0x40)
265 jregAD |= 0x01; /* HBE D[5] */
266 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
267
268 temp = (mode->crtc_hsync_start >> 3) - 1;
269 if (temp & 0x100)
270 jregAC |= 0x40; /* HRS D[5] */
271 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
272
273 temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
274 if (temp & 0x20)
275 jregAD |= 0x04; /* HRE D[5] */
276 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
277
278 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
279 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
280
281 /* vert timings */
282 temp = (mode->crtc_vtotal) - 2;
283 if (temp & 0x100)
284 jreg07 |= 0x01;
285 if (temp & 0x200)
286 jreg07 |= 0x20;
287 if (temp & 0x400)
288 jregAE |= 0x01;
289 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
290
291 temp = (mode->crtc_vsync_start) - 1;
292 if (temp & 0x100)
293 jreg07 |= 0x04;
294 if (temp & 0x200)
295 jreg07 |= 0x80;
296 if (temp & 0x400)
297 jregAE |= 0x08;
298 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
299
300 temp = (mode->crtc_vsync_end - 1) & 0x3f;
301 if (temp & 0x10)
302 jregAE |= 0x20;
303 if (temp & 0x20)
304 jregAE |= 0x40;
305 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
306
307 temp = mode->crtc_vdisplay - 1;
308 if (temp & 0x100)
309 jreg07 |= 0x02;
310 if (temp & 0x200)
311 jreg07 |= 0x40;
312 if (temp & 0x400)
313 jregAE |= 0x02;
314 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
315
316 temp = mode->crtc_vblank_start - 1;
317 if (temp & 0x100)
318 jreg07 |= 0x08;
319 if (temp & 0x200)
320 jreg09 |= 0x20;
321 if (temp & 0x400)
322 jregAE |= 0x04;
323 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
324
325 temp = mode->crtc_vblank_end - 1;
326 if (temp & 0x100)
327 jregAE |= 0x10;
328 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
329
330 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
331 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
332 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
333
334 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
335}
336
337static void ast_set_offset_reg(struct drm_crtc *crtc)
338{
339 struct ast_private *ast = crtc->dev->dev_private;
340
341 u16 offset;
342
343 offset = crtc->fb->pitches[0] >> 3;
344 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
345 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
346}
347
348static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
349 struct ast_vbios_mode_info *vbios_mode)
350{
351 struct ast_private *ast = dev->dev_private;
352 struct ast_vbios_dclk_info *clk_info;
353
354 clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
355
356 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
357 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
358 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
359 (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
360}
361
362static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
363 struct ast_vbios_mode_info *vbios_mode)
364{
365 struct ast_private *ast = crtc->dev->dev_private;
366 u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
367
368 switch (crtc->fb->bits_per_pixel) {
369 case 8:
370 jregA0 = 0x70;
371 jregA3 = 0x01;
372 jregA8 = 0x00;
373 break;
374 case 15:
375 case 16:
376 jregA0 = 0x70;
377 jregA3 = 0x04;
378 jregA8 = 0x02;
379 break;
380 case 32:
381 jregA0 = 0x70;
382 jregA3 = 0x08;
383 jregA8 = 0x02;
384 break;
385 }
386
387 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
388 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
389 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
390
391 /* Set Threshold */
392 if (ast->chip == AST2300) {
393 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
394 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
395 } else if (ast->chip == AST2100 ||
396 ast->chip == AST1100 ||
397 ast->chip == AST2200 ||
398 ast->chip == AST2150) {
399 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
400 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
401 } else {
402 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
403 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
404 }
405}
406
407void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
408 struct ast_vbios_mode_info *vbios_mode)
409{
410 struct ast_private *ast = dev->dev_private;
411 u8 jreg;
412
413 jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
414 jreg |= (vbios_mode->enh_table->flags & SyncNN);
415 ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
416}
417
418bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
419 struct ast_vbios_mode_info *vbios_mode)
420{
421 switch (crtc->fb->bits_per_pixel) {
422 case 8:
423 break;
424 default:
425 return false;
426 }
427 return true;
428}
429
430void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
431{
432 struct ast_private *ast = crtc->dev->dev_private;
433 u32 addr;
434
435 addr = offset >> 2;
436 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
437 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
438 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
439
440}
441
442static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
443{
444 struct ast_private *ast = crtc->dev->dev_private;
445
446 if (ast->chip == AST1180)
447 return;
448
449 switch (mode) {
450 case DRM_MODE_DPMS_ON:
451 case DRM_MODE_DPMS_STANDBY:
452 case DRM_MODE_DPMS_SUSPEND:
453 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
454 ast_crtc_load_lut(crtc);
455 break;
456 case DRM_MODE_DPMS_OFF:
457 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
458 break;
459 }
460}
461
462static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
463 struct drm_display_mode *mode,
464 struct drm_display_mode *adjusted_mode)
465{
466 return true;
467}
468
469/* ast is different - we will force move buffers out of VRAM */
470static int ast_crtc_do_set_base(struct drm_crtc *crtc,
471 struct drm_framebuffer *fb,
472 int x, int y, int atomic)
473{
474 struct ast_private *ast = crtc->dev->dev_private;
475 struct drm_gem_object *obj;
476 struct ast_framebuffer *ast_fb;
477 struct ast_bo *bo;
478 int ret;
479 u64 gpu_addr;
480
481 /* push the previous fb to system ram */
482 if (!atomic && fb) {
483 ast_fb = to_ast_framebuffer(fb);
484 obj = ast_fb->obj;
485 bo = gem_to_ast_bo(obj);
486 ret = ast_bo_reserve(bo, false);
487 if (ret)
488 return ret;
489 ast_bo_push_sysram(bo);
490 ast_bo_unreserve(bo);
491 }
492
493 ast_fb = to_ast_framebuffer(crtc->fb);
494 obj = ast_fb->obj;
495 bo = gem_to_ast_bo(obj);
496
497 ret = ast_bo_reserve(bo, false);
498 if (ret)
499 return ret;
500
501 ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
502 if (ret) {
503 ast_bo_unreserve(bo);
504 return ret;
505 }
506
507 if (&ast->fbdev->afb == ast_fb) {
508 /* if pushing console in kmap it */
509 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
510 if (ret)
511 DRM_ERROR("failed to kmap fbcon\n");
512 }
513 ast_bo_unreserve(bo);
514
515 ast_set_start_address_crt1(crtc, (u32)gpu_addr);
516
517 return 0;
518}
519
520static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
521 struct drm_framebuffer *old_fb)
522{
523 return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
524}
525
526static int ast_crtc_mode_set(struct drm_crtc *crtc,
527 struct drm_display_mode *mode,
528 struct drm_display_mode *adjusted_mode,
529 int x, int y,
530 struct drm_framebuffer *old_fb)
531{
532 struct drm_device *dev = crtc->dev;
533 struct ast_private *ast = crtc->dev->dev_private;
534 struct ast_vbios_mode_info vbios_mode;
535 bool ret;
536 if (ast->chip == AST1180) {
537 DRM_ERROR("AST 1180 modesetting not supported\n");
538 return -EINVAL;
539 }
540
541 ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
542 if (ret == false)
543 return -EINVAL;
544 ast_open_key(ast);
545
546 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
547
548 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
549 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
550 ast_set_offset_reg(crtc);
551 ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
552 ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
553 ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
554 ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
555
556 ast_crtc_mode_set_base(crtc, x, y, old_fb);
557
558 return 0;
559}
560
561static void ast_crtc_disable(struct drm_crtc *crtc)
562{
563
564}
565
566static void ast_crtc_prepare(struct drm_crtc *crtc)
567{
568
569}
570
571static void ast_crtc_commit(struct drm_crtc *crtc)
572{
573 struct ast_private *ast = crtc->dev->dev_private;
574 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
575}
576
577
578static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
579 .dpms = ast_crtc_dpms,
580 .mode_fixup = ast_crtc_mode_fixup,
581 .mode_set = ast_crtc_mode_set,
582 .mode_set_base = ast_crtc_mode_set_base,
583 .disable = ast_crtc_disable,
584 .load_lut = ast_crtc_load_lut,
585 .disable = ast_crtc_disable,
586 .prepare = ast_crtc_prepare,
587 .commit = ast_crtc_commit,
588
589};
590
591static void ast_crtc_reset(struct drm_crtc *crtc)
592{
593
594}
595
596static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
597 u16 *blue, uint32_t start, uint32_t size)
598{
599 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
600 int end = (start + size > 256) ? 256 : start + size, i;
601
602 /* userspace palettes are always correct as is */
603 for (i = start; i < end; i++) {
604 ast_crtc->lut_r[i] = red[i] >> 8;
605 ast_crtc->lut_g[i] = green[i] >> 8;
606 ast_crtc->lut_b[i] = blue[i] >> 8;
607 }
608 ast_crtc_load_lut(crtc);
609}
610
611
612static void ast_crtc_destroy(struct drm_crtc *crtc)
613{
614 drm_crtc_cleanup(crtc);
615 kfree(crtc);
616}
617
618static const struct drm_crtc_funcs ast_crtc_funcs = {
619 .cursor_set = ast_cursor_set,
620 .cursor_move = ast_cursor_move,
621 .reset = ast_crtc_reset,
622 .set_config = drm_crtc_helper_set_config,
623 .gamma_set = ast_crtc_gamma_set,
624 .destroy = ast_crtc_destroy,
625};
626
627int ast_crtc_init(struct drm_device *dev)
628{
629 struct ast_crtc *crtc;
630 int i;
631
632 crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
633 if (!crtc)
634 return -ENOMEM;
635
636 drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
637 drm_mode_crtc_set_gamma_size(&crtc->base, 256);
638 drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
639
640 for (i = 0; i < 256; i++) {
641 crtc->lut_r[i] = i;
642 crtc->lut_g[i] = i;
643 crtc->lut_b[i] = i;
644 }
645 return 0;
646}
647
648static void ast_encoder_destroy(struct drm_encoder *encoder)
649{
650 drm_encoder_cleanup(encoder);
651 kfree(encoder);
652}
653
654
655static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
656{
657 int enc_id = connector->encoder_ids[0];
658 struct drm_mode_object *obj;
659 struct drm_encoder *encoder;
660
661 /* pick the encoder ids */
662 if (enc_id) {
663 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
664 if (!obj)
665 return NULL;
666 encoder = obj_to_encoder(obj);
667 return encoder;
668 }
669 return NULL;
670}
671
672
673static const struct drm_encoder_funcs ast_enc_funcs = {
674 .destroy = ast_encoder_destroy,
675};
676
677static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
678{
679
680}
681
682static bool ast_mode_fixup(struct drm_encoder *encoder,
683 struct drm_display_mode *mode,
684 struct drm_display_mode *adjusted_mode)
685{
686 return true;
687}
688
689static void ast_encoder_mode_set(struct drm_encoder *encoder,
690 struct drm_display_mode *mode,
691 struct drm_display_mode *adjusted_mode)
692{
693}
694
695static void ast_encoder_prepare(struct drm_encoder *encoder)
696{
697
698}
699
700static void ast_encoder_commit(struct drm_encoder *encoder)
701{
702
703}
704
705
706static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
707 .dpms = ast_encoder_dpms,
708 .mode_fixup = ast_mode_fixup,
709 .prepare = ast_encoder_prepare,
710 .commit = ast_encoder_commit,
711 .mode_set = ast_encoder_mode_set,
712};
713
714int ast_encoder_init(struct drm_device *dev)
715{
716 struct ast_encoder *ast_encoder;
717
718 ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
719 if (!ast_encoder)
720 return -ENOMEM;
721
722 drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
723 DRM_MODE_ENCODER_DAC);
724 drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
725
726 ast_encoder->base.possible_crtcs = 1;
727 return 0;
728}
729
730static int ast_get_modes(struct drm_connector *connector)
731{
732 struct ast_connector *ast_connector = to_ast_connector(connector);
733 struct edid *edid;
734 int ret;
735
736 edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
737 if (edid) {
738 drm_mode_connector_update_edid_property(&ast_connector->base, edid);
739 ret = drm_add_edid_modes(connector, edid);
740 return ret;
741 } else
742 drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
743 return 0;
744}
745
746static int ast_mode_valid(struct drm_connector *connector,
747 struct drm_display_mode *mode)
748{
749 return MODE_OK;
750}
751
752static void ast_connector_destroy(struct drm_connector *connector)
753{
754 struct ast_connector *ast_connector = to_ast_connector(connector);
755 ast_i2c_destroy(ast_connector->i2c);
756 drm_sysfs_connector_remove(connector);
757 drm_connector_cleanup(connector);
758 kfree(connector);
759}
760
761static enum drm_connector_status
762ast_connector_detect(struct drm_connector *connector, bool force)
763{
764 return connector_status_connected;
765}
766
767static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
768 .mode_valid = ast_mode_valid,
769 .get_modes = ast_get_modes,
770 .best_encoder = ast_best_single_encoder,
771};
772
773static const struct drm_connector_funcs ast_connector_funcs = {
774 .dpms = drm_helper_connector_dpms,
775 .detect = ast_connector_detect,
776 .fill_modes = drm_helper_probe_single_connector_modes,
777 .destroy = ast_connector_destroy,
778};
779
780int ast_connector_init(struct drm_device *dev)
781{
782 struct ast_connector *ast_connector;
783 struct drm_connector *connector;
784 struct drm_encoder *encoder;
785
786 ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
787 if (!ast_connector)
788 return -ENOMEM;
789
790 connector = &ast_connector->base;
791 drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
792
793 drm_connector_helper_add(connector, &ast_connector_helper_funcs);
794
795 connector->interlace_allowed = 0;
796 connector->doublescan_allowed = 0;
797
798 drm_sysfs_connector_add(connector);
799
800 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
801
802 encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
803 drm_mode_connector_attach_encoder(connector, encoder);
804
805 ast_connector->i2c = ast_i2c_create(dev);
806 if (!ast_connector->i2c)
807 DRM_ERROR("failed to add ddc bus for connector\n");
808
809 return 0;
810}
811
812/* allocate cursor cache and pin at start of VRAM */
813int ast_cursor_init(struct drm_device *dev)
814{
815 struct ast_private *ast = dev->dev_private;
816 int size;
817 int ret;
818 struct drm_gem_object *obj;
819 struct ast_bo *bo;
820 uint64_t gpu_addr;
821
822 size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
823
824 ret = ast_gem_create(dev, size, true, &obj);
825 if (ret)
826 return ret;
827 bo = gem_to_ast_bo(obj);
828 ret = ast_bo_reserve(bo, false);
829 if (unlikely(ret != 0))
830 goto fail;
831
832 ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
833 ast_bo_unreserve(bo);
834 if (ret)
835 goto fail;
836
837 /* kmap the object */
838 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
839 if (ret)
840 goto fail;
841
842 ast->cursor_cache = obj;
843 ast->cursor_cache_gpu_addr = gpu_addr;
844 DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
845 return 0;
846fail:
847 return ret;
848}
849
850void ast_cursor_fini(struct drm_device *dev)
851{
852 struct ast_private *ast = dev->dev_private;
853 ttm_bo_kunmap(&ast->cache_kmap);
854 drm_gem_object_unreference_unlocked(ast->cursor_cache);
855}
856
857int ast_mode_init(struct drm_device *dev)
858{
859 ast_cursor_init(dev);
860 ast_crtc_init(dev);
861 ast_encoder_init(dev);
862 ast_connector_init(dev);
863 return 0;
864}
865
866void ast_mode_fini(struct drm_device *dev)
867{
868 ast_cursor_fini(dev);
869}
870
871static int get_clock(void *i2c_priv)
872{
873 struct ast_i2c_chan *i2c = i2c_priv;
874 struct ast_private *ast = i2c->dev->dev_private;
875 uint32_t val;
876
877 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
878 return val & 1 ? 1 : 0;
879}
880
881static int get_data(void *i2c_priv)
882{
883 struct ast_i2c_chan *i2c = i2c_priv;
884 struct ast_private *ast = i2c->dev->dev_private;
885 uint32_t val;
886
887 val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
888 return val & 1 ? 1 : 0;
889}
890
891static void set_clock(void *i2c_priv, int clock)
892{
893 struct ast_i2c_chan *i2c = i2c_priv;
894 struct ast_private *ast = i2c->dev->dev_private;
895 int i;
896 u8 ujcrb7, jtemp;
897
898 for (i = 0; i < 0x10000; i++) {
899 ujcrb7 = ((clock & 0x01) ? 0 : 1);
900 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
901 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
902 if (ujcrb7 == jtemp)
903 break;
904 }
905}
906
907static void set_data(void *i2c_priv, int data)
908{
909 struct ast_i2c_chan *i2c = i2c_priv;
910 struct ast_private *ast = i2c->dev->dev_private;
911 int i;
912 u8 ujcrb7, jtemp;
913
914 for (i = 0; i < 0x10000; i++) {
915 ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
916 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
917 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
918 if (ujcrb7 == jtemp)
919 break;
920 }
921}
922
923static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
924{
925 struct ast_i2c_chan *i2c;
926 int ret;
927
928 i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
929 if (!i2c)
930 return NULL;
931
932 i2c->adapter.owner = THIS_MODULE;
933 i2c->adapter.class = I2C_CLASS_DDC;
934 i2c->adapter.dev.parent = &dev->pdev->dev;
935 i2c->dev = dev;
936 i2c_set_adapdata(&i2c->adapter, i2c);
937 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
938 "AST i2c bit bus");
939 i2c->adapter.algo_data = &i2c->bit;
940
941 i2c->bit.udelay = 20;
942 i2c->bit.timeout = 2;
943 i2c->bit.data = i2c;
944 i2c->bit.setsda = set_data;
945 i2c->bit.setscl = set_clock;
946 i2c->bit.getsda = get_data;
947 i2c->bit.getscl = get_clock;
948 ret = i2c_bit_add_bus(&i2c->adapter);
949 if (ret) {
950 DRM_ERROR("Failed to register bit i2c\n");
951 goto out_free;
952 }
953
954 return i2c;
955out_free:
956 kfree(i2c);
957 return NULL;
958}
959
960static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
961{
962 if (!i2c)
963 return;
964 i2c_del_adapter(&i2c->adapter);
965 kfree(i2c);
966}
967
968void ast_show_cursor(struct drm_crtc *crtc)
969{
970 struct ast_private *ast = crtc->dev->dev_private;
971 u8 jreg;
972
973 jreg = 0x2;
974 /* enable ARGB cursor */
975 jreg |= 1;
976 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
977}
978
979void ast_hide_cursor(struct drm_crtc *crtc)
980{
981 struct ast_private *ast = crtc->dev->dev_private;
982 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
983}
984
985static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
986{
987 union {
988 u32 ul;
989 u8 b[4];
990 } srcdata32[2], data32;
991 union {
992 u16 us;
993 u8 b[2];
994 } data16;
995 u32 csum = 0;
996 s32 alpha_dst_delta, last_alpha_dst_delta;
997 u8 *srcxor, *dstxor;
998 int i, j;
999 u32 per_pixel_copy, two_pixel_copy;
1000
1001 alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
1002 last_alpha_dst_delta = alpha_dst_delta - (width << 1);
1003
1004 srcxor = src;
1005 dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
1006 per_pixel_copy = width & 1;
1007 two_pixel_copy = width >> 1;
1008
1009 for (j = 0; j < height; j++) {
1010 for (i = 0; i < two_pixel_copy; i++) {
1011 srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
1012 srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
1013 data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
1014 data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
1015 data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
1016 data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
1017
1018 writel(data32.ul, dstxor);
1019 csum += data32.ul;
1020
1021 dstxor += 4;
1022 srcxor += 8;
1023
1024 }
1025
1026 for (i = 0; i < per_pixel_copy; i++) {
1027 srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
1028 data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
1029 data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
1030 writew(data16.us, dstxor);
1031 csum += (u32)data16.us;
1032
1033 dstxor += 2;
1034 srcxor += 4;
1035 }
1036 dstxor += last_alpha_dst_delta;
1037 }
1038 return csum;
1039}
1040
1041static int ast_cursor_set(struct drm_crtc *crtc,
1042 struct drm_file *file_priv,
1043 uint32_t handle,
1044 uint32_t width,
1045 uint32_t height)
1046{
1047 struct ast_private *ast = crtc->dev->dev_private;
1048 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
1049 struct drm_gem_object *obj;
1050 struct ast_bo *bo;
1051 uint64_t gpu_addr;
1052 u32 csum;
1053 int ret;
1054 struct ttm_bo_kmap_obj uobj_map;
1055 u8 *src, *dst;
1056 bool src_isiomem, dst_isiomem;
1057 if (!handle) {
1058 ast_hide_cursor(crtc);
1059 return 0;
1060 }
1061
1062 if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
1063 return -EINVAL;
1064
1065 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
1066 if (!obj) {
1067 DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
1068 return -ENOENT;
1069 }
1070 bo = gem_to_ast_bo(obj);
1071
1072 ret = ast_bo_reserve(bo, false);
1073 if (ret)
1074 goto fail;
1075
1076 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
1077
1078 src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
1079 dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
1080
1081 if (src_isiomem == true)
1082 DRM_ERROR("src cursor bo should be in main memory\n");
1083 if (dst_isiomem == false)
1084 DRM_ERROR("dst bo should be in VRAM\n");
1085
1086 dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
1087
1088 /* do data transfer to cursor cache */
1089 csum = copy_cursor_image(src, dst, width, height);
1090
1091 /* write checksum + signature */
1092 ttm_bo_kunmap(&uobj_map);
1093 ast_bo_unreserve(bo);
1094 {
1095 u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
1096 writel(csum, dst);
1097 writel(width, dst + AST_HWC_SIGNATURE_SizeX);
1098 writel(height, dst + AST_HWC_SIGNATURE_SizeY);
1099 writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
1100 writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
1101
1102 /* set pattern offset */
1103 gpu_addr = ast->cursor_cache_gpu_addr;
1104 gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
1105 gpu_addr >>= 3;
1106 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
1107 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
1108 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
1109 }
1110 ast_crtc->cursor_width = width;
1111 ast_crtc->cursor_height = height;
1112 ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
1113 ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
1114
1115 ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
1116
1117 ast_show_cursor(crtc);
1118
1119 drm_gem_object_unreference_unlocked(obj);
1120 return 0;
1121fail:
1122 drm_gem_object_unreference_unlocked(obj);
1123 return ret;
1124}
1125
1126static int ast_cursor_move(struct drm_crtc *crtc,
1127 int x, int y)
1128{
1129 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
1130 struct ast_private *ast = crtc->dev->dev_private;
1131 int x_offset, y_offset;
1132 u8 *sig;
1133
1134 sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
1135 writel(x, sig + AST_HWC_SIGNATURE_X);
1136 writel(y, sig + AST_HWC_SIGNATURE_Y);
1137
1138 x_offset = ast_crtc->offset_x;
1139 y_offset = ast_crtc->offset_y;
1140 if (x < 0) {
1141 x_offset = (-x) + ast_crtc->offset_x;
1142 x = 0;
1143 }
1144
1145 if (y < 0) {
1146 y_offset = (-y) + ast_crtc->offset_y;
1147 y = 0;
1148 }
1149 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
1150 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
1151 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
1152 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
1153 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
1154 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
1155
1156 /* dummy write to fire HWC */
1157 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
1158
1159 return 0;
1160}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 000000000000..6edbee63b0cb
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28
29#include "drmP.h"
30#include "ast_drv.h"
31
32#include "ast_dram_tables.h"
33
34static void ast_init_dram_2300(struct drm_device *dev);
35
36static void
37ast_enable_vga(struct drm_device *dev)
38{
39 struct ast_private *ast = dev->dev_private;
40
41 ast_io_write8(ast, 0x43, 0x01);
42 ast_io_write8(ast, 0x42, 0x01);
43}
44
45#if 0 /* will use later */
46static bool
47ast_is_vga_enabled(struct drm_device *dev)
48{
49 struct ast_private *ast = dev->dev_private;
50 u8 ch;
51
52 if (ast->chip == AST1180) {
53 /* TODO 1180 */
54 } else {
55 ch = ast_io_read8(ast, 0x43);
56 if (ch) {
57 ast_open_key(ast);
58 ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
59 return ch & 0x04;
60 }
61 }
62 return 0;
63}
64#endif
65
66static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
67static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
68static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
69
70static void
71ast_set_def_ext_reg(struct drm_device *dev)
72{
73 struct ast_private *ast = dev->dev_private;
74 u8 i, index, reg;
75 const u8 *ext_reg_info;
76
77 /* reset scratch */
78 for (i = 0x81; i <= 0x8f; i++)
79 ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
80
81 if (ast->chip == AST2300) {
82 if (dev->pdev->revision >= 0x20)
83 ext_reg_info = extreginfo_ast2300;
84 else
85 ext_reg_info = extreginfo_ast2300a0;
86 } else
87 ext_reg_info = extreginfo;
88
89 index = 0xa0;
90 while (*ext_reg_info != 0xff) {
91 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
92 index++;
93 ext_reg_info++;
94 }
95
96 /* disable standard IO/MEM decode if secondary */
97 /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
98
99 /* Set Ext. Default */
100 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
101 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
102
103 /* Enable RAMDAC for A1 */
104 reg = 0x04;
105 if (ast->chip == AST2300)
106 reg |= 0x20;
107 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
108}
109
110static inline u32 mindwm(struct ast_private *ast, u32 r)
111{
112 ast_write32(ast, 0xf004, r & 0xffff0000);
113 ast_write32(ast, 0xf000, 0x1);
114
115 return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
116}
117
118static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
119{
120 ast_write32(ast, 0xf004, r & 0xffff0000);
121 ast_write32(ast, 0xf000, 0x1);
122 ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
123}
124
125/*
126 * AST2100/2150 DLL CBR Setting
127 */
128#define CBR_SIZE_AST2150 ((16 << 10) - 1)
129#define CBR_PASSNUM_AST2150 5
130#define CBR_THRESHOLD_AST2150 10
131#define CBR_THRESHOLD2_AST2150 10
132#define TIMEOUT_AST2150 5000000
133
134#define CBR_PATNUM_AST2150 8
135
136static const u32 pattern_AST2150[14] = {
137 0xFF00FF00,
138 0xCC33CC33,
139 0xAA55AA55,
140 0xFFFE0001,
141 0x683501FE,
142 0x0F1929B0,
143 0x2D0B4346,
144 0x60767F02,
145 0x6FBE36A6,
146 0x3A253035,
147 0x3019686D,
148 0x41C6167E,
149 0x620152BF,
150 0x20F050E0
151};
152
153static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
154{
155 u32 data, timeout;
156
157 moutdwm(ast, 0x1e6e0070, 0x00000000);
158 moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
159 timeout = 0;
160 do {
161 data = mindwm(ast, 0x1e6e0070) & 0x40;
162 if (++timeout > TIMEOUT_AST2150) {
163 moutdwm(ast, 0x1e6e0070, 0x00000000);
164 return 0xffffffff;
165 }
166 } while (!data);
167 moutdwm(ast, 0x1e6e0070, 0x00000000);
168 moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
169 timeout = 0;
170 do {
171 data = mindwm(ast, 0x1e6e0070) & 0x40;
172 if (++timeout > TIMEOUT_AST2150) {
173 moutdwm(ast, 0x1e6e0070, 0x00000000);
174 return 0xffffffff;
175 }
176 } while (!data);
177 data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
178 moutdwm(ast, 0x1e6e0070, 0x00000000);
179 return data;
180}
181
182#if 0 /* unused in DDX driver - here for completeness */
183static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
184{
185 u32 data, timeout;
186
187 moutdwm(ast, 0x1e6e0070, 0x00000000);
188 moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
189 timeout = 0;
190 do {
191 data = mindwm(ast, 0x1e6e0070) & 0x40;
192 if (++timeout > TIMEOUT_AST2150) {
193 moutdwm(ast, 0x1e6e0070, 0x00000000);
194 return 0xffffffff;
195 }
196 } while (!data);
197 data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
198 moutdwm(ast, 0x1e6e0070, 0x00000000);
199 return data;
200}
201#endif
202
203static int cbrtest_ast2150(struct ast_private *ast)
204{
205 int i;
206
207 for (i = 0; i < 8; i++)
208 if (mmctestburst2_ast2150(ast, i))
209 return 0;
210 return 1;
211}
212
213static int cbrscan_ast2150(struct ast_private *ast, int busw)
214{
215 u32 patcnt, loop;
216
217 for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
218 moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
219 for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
220 if (cbrtest_ast2150(ast))
221 break;
222 }
223 if (loop == CBR_PASSNUM_AST2150)
224 return 0;
225 }
226 return 1;
227}
228
229
230static void cbrdlli_ast2150(struct ast_private *ast, int busw)
231{
232 u32 dll_min[4], dll_max[4], dlli, data, passcnt;
233
234cbr_start:
235 dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
236 dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
237 passcnt = 0;
238
239 for (dlli = 0; dlli < 100; dlli++) {
240 moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
241 data = cbrscan_ast2150(ast, busw);
242 if (data != 0) {
243 if (data & 0x1) {
244 if (dll_min[0] > dlli)
245 dll_min[0] = dlli;
246 if (dll_max[0] < dlli)
247 dll_max[0] = dlli;
248 }
249 passcnt++;
250 } else if (passcnt >= CBR_THRESHOLD_AST2150)
251 goto cbr_start;
252 }
253 if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
254 goto cbr_start;
255
256 dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
257 moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
258}
259
260
261
262static void ast_init_dram_reg(struct drm_device *dev)
263{
264 struct ast_private *ast = dev->dev_private;
265 u8 j;
266 u32 data, temp, i;
267 const struct ast_dramstruct *dram_reg_info;
268
269 j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
270
271 if ((j & 0x80) == 0) { /* VGA only */
272 if (ast->chip == AST2000) {
273 dram_reg_info = ast2000_dram_table_data;
274 ast_write32(ast, 0xf004, 0x1e6e0000);
275 ast_write32(ast, 0xf000, 0x1);
276 ast_write32(ast, 0x10100, 0xa8);
277
278 do {
279 ;
280 } while (ast_read32(ast, 0x10100) != 0xa8);
281 } else {/* AST2100/1100 */
282 if (ast->chip == AST2100 || ast->chip == 2200)
283 dram_reg_info = ast2100_dram_table_data;
284 else
285 dram_reg_info = ast1100_dram_table_data;
286
287 ast_write32(ast, 0xf004, 0x1e6e0000);
288 ast_write32(ast, 0xf000, 0x1);
289 ast_write32(ast, 0x12000, 0x1688A8A8);
290 do {
291 ;
292 } while (ast_read32(ast, 0x12000) != 0x01);
293
294 ast_write32(ast, 0x10000, 0xfc600309);
295 do {
296 ;
297 } while (ast_read32(ast, 0x10000) != 0x01);
298 }
299
300 while (dram_reg_info->index != 0xffff) {
301 if (dram_reg_info->index == 0xff00) {/* delay fn */
302 for (i = 0; i < 15; i++)
303 udelay(dram_reg_info->data);
304 } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
305 data = dram_reg_info->data;
306 if (ast->dram_type == AST_DRAM_1Gx16)
307 data = 0x00000d89;
308 else if (ast->dram_type == AST_DRAM_1Gx32)
309 data = 0x00000c8d;
310
311 temp = ast_read32(ast, 0x12070);
312 temp &= 0xc;
313 temp <<= 2;
314 ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
315 } else
316 ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
317 dram_reg_info++;
318 }
319
320 /* AST 2100/2150 DRAM calibration */
321 data = ast_read32(ast, 0x10120);
322 if (data == 0x5061) { /* 266Mhz */
323 data = ast_read32(ast, 0x10004);
324 if (data & 0x40)
325 cbrdlli_ast2150(ast, 16); /* 16 bits */
326 else
327 cbrdlli_ast2150(ast, 32); /* 32 bits */
328 }
329
330 switch (ast->chip) {
331 case AST2000:
332 temp = ast_read32(ast, 0x10140);
333 ast_write32(ast, 0x10140, temp | 0x40);
334 break;
335 case AST1100:
336 case AST2100:
337 case AST2200:
338 case AST2150:
339 temp = ast_read32(ast, 0x1200c);
340 ast_write32(ast, 0x1200c, temp & 0xfffffffd);
341 temp = ast_read32(ast, 0x12040);
342 ast_write32(ast, 0x12040, temp | 0x40);
343 break;
344 default:
345 break;
346 }
347 }
348
349 /* wait ready */
350 do {
351 j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
352 } while ((j & 0x40) == 0);
353}
354
355void ast_post_gpu(struct drm_device *dev)
356{
357 u32 reg;
358 struct ast_private *ast = dev->dev_private;
359
360 pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
361 reg |= 0x3;
362 pci_write_config_dword(ast->dev->pdev, 0x04, reg);
363
364 ast_enable_vga(dev);
365 ast_open_key(ast);
366 ast_set_def_ext_reg(dev);
367
368 if (ast->chip == AST2300)
369 ast_init_dram_2300(dev);
370 else
371 ast_init_dram_reg(dev);
372}
373
374/* AST 2300 DRAM settings */
375#define AST_DDR3 0
376#define AST_DDR2 1
377
378struct ast2300_dram_param {
379 u32 dram_type;
380 u32 dram_chipid;
381 u32 dram_freq;
382 u32 vram_size;
383 u32 odt;
384 u32 wodt;
385 u32 rodt;
386 u32 dram_config;
387 u32 reg_PERIOD;
388 u32 reg_MADJ;
389 u32 reg_SADJ;
390 u32 reg_MRS;
391 u32 reg_EMRS;
392 u32 reg_AC1;
393 u32 reg_AC2;
394 u32 reg_DQSIC;
395 u32 reg_DRV;
396 u32 reg_IOZ;
397 u32 reg_DQIDLY;
398 u32 reg_FREQ;
399 u32 madj_max;
400 u32 dll2_finetune_step;
401};
402
403/*
404 * DQSI DLL CBR Setting
405 */
406#define CBR_SIZE1 ((4 << 10) - 1)
407#define CBR_SIZE2 ((64 << 10) - 1)
408#define CBR_PASSNUM 5
409#define CBR_PASSNUM2 5
410#define CBR_THRESHOLD 10
411#define CBR_THRESHOLD2 10
412#define TIMEOUT 5000000
413#define CBR_PATNUM 8
414
415static const u32 pattern[8] = {
416 0xFF00FF00,
417 0xCC33CC33,
418 0xAA55AA55,
419 0x88778877,
420 0x92CC4D6E,
421 0x543D3CDE,
422 0xF1E843C7,
423 0x7C61D253
424};
425
426#if 0 /* unused in DDX, included for completeness */
427static int mmc_test_burst(struct ast_private *ast, u32 datagen)
428{
429 u32 data, timeout;
430
431 moutdwm(ast, 0x1e6e0070, 0x00000000);
432 moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
433 timeout = 0;
434 do {
435 data = mindwm(ast, 0x1e6e0070) & 0x3000;
436 if (data & 0x2000) {
437 return 0;
438 }
439 if (++timeout > TIMEOUT) {
440 moutdwm(ast, 0x1e6e0070, 0x00000000);
441 return 0;
442 }
443 } while (!data);
444 moutdwm(ast, 0x1e6e0070, 0x00000000);
445 return 1;
446}
447#endif
448
449static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
450{
451 u32 data, timeout;
452
453 moutdwm(ast, 0x1e6e0070, 0x00000000);
454 moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
455 timeout = 0;
456 do {
457 data = mindwm(ast, 0x1e6e0070) & 0x1000;
458 if (++timeout > TIMEOUT) {
459 moutdwm(ast, 0x1e6e0070, 0x0);
460 return -1;
461 }
462 } while (!data);
463 data = mindwm(ast, 0x1e6e0078);
464 data = (data | (data >> 16)) & 0xffff;
465 moutdwm(ast, 0x1e6e0070, 0x0);
466 return data;
467}
468
469#if 0 /* Unused in DDX here for completeness */
470static int mmc_test_single(struct ast_private *ast, u32 datagen)
471{
472 u32 data, timeout;
473
474 moutdwm(ast, 0x1e6e0070, 0x00000000);
475 moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
476 timeout = 0;
477 do {
478 data = mindwm(ast, 0x1e6e0070) & 0x3000;
479 if (data & 0x2000)
480 return 0;
481 if (++timeout > TIMEOUT) {
482 moutdwm(ast, 0x1e6e0070, 0x0);
483 return 0;
484 }
485 } while (!data);
486 moutdwm(ast, 0x1e6e0070, 0x0);
487 return 1;
488}
489#endif
490
491static int mmc_test_single2(struct ast_private *ast, u32 datagen)
492{
493 u32 data, timeout;
494
495 moutdwm(ast, 0x1e6e0070, 0x00000000);
496 moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
497 timeout = 0;
498 do {
499 data = mindwm(ast, 0x1e6e0070) & 0x1000;
500 if (++timeout > TIMEOUT) {
501 moutdwm(ast, 0x1e6e0070, 0x0);
502 return -1;
503 }
504 } while (!data);
505 data = mindwm(ast, 0x1e6e0078);
506 data = (data | (data >> 16)) & 0xffff;
507 moutdwm(ast, 0x1e6e0070, 0x0);
508 return data;
509}
510
511static int cbr_test(struct ast_private *ast)
512{
513 u32 data;
514 int i;
515 data = mmc_test_single2(ast, 0);
516 if ((data & 0xff) && (data & 0xff00))
517 return 0;
518 for (i = 0; i < 8; i++) {
519 data = mmc_test_burst2(ast, i);
520 if ((data & 0xff) && (data & 0xff00))
521 return 0;
522 }
523 if (!data)
524 return 3;
525 else if (data & 0xff)
526 return 2;
527 return 1;
528}
529
530static int cbr_scan(struct ast_private *ast)
531{
532 u32 data, data2, patcnt, loop;
533
534 data2 = 3;
535 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
536 moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
537 for (loop = 0; loop < CBR_PASSNUM2; loop++) {
538 if ((data = cbr_test(ast)) != 0) {
539 data2 &= data;
540 if (!data2)
541 return 0;
542 break;
543 }
544 }
545 if (loop == CBR_PASSNUM2)
546 return 0;
547 }
548 return data2;
549}
550
551static u32 cbr_test2(struct ast_private *ast)
552{
553 u32 data;
554
555 data = mmc_test_burst2(ast, 0);
556 if (data == 0xffff)
557 return 0;
558 data |= mmc_test_single2(ast, 0);
559 if (data == 0xffff)
560 return 0;
561
562 return ~data & 0xffff;
563}
564
565static u32 cbr_scan2(struct ast_private *ast)
566{
567 u32 data, data2, patcnt, loop;
568
569 data2 = 0xffff;
570 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
571 moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
572 for (loop = 0; loop < CBR_PASSNUM2; loop++) {
573 if ((data = cbr_test2(ast)) != 0) {
574 data2 &= data;
575 if (!data)
576 return 0;
577 break;
578 }
579 }
580 if (loop == CBR_PASSNUM2)
581 return 0;
582 }
583 return data2;
584}
585
586#if 0 /* unused in DDX - added for completeness */
587static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
588{
589 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
590
591 gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
592 gold_sadj[1] = gold_sadj[0] >> 8;
593 gold_sadj[0] = gold_sadj[0] & 0xff;
594 gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
595 gold_sadj[1] = gold_sadj[0];
596
597 for (cnt = 0; cnt < 16; cnt++) {
598 dllmin[cnt] = 0xff;
599 dllmax[cnt] = 0x0;
600 }
601 passcnt = 0;
602 for (dlli = 0; dlli < 76; dlli++) {
603 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
604 /* Wait DQSI latch phase calibration */
605 moutdwm(ast, 0x1E6E0074, 0x00000010);
606 moutdwm(ast, 0x1E6E0070, 0x00000003);
607 do {
608 data = mindwm(ast, 0x1E6E0070);
609 } while (!(data & 0x00001000));
610 moutdwm(ast, 0x1E6E0070, 0x00000000);
611
612 moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
613 data = cbr_scan2(ast);
614 if (data != 0) {
615 mask = 0x00010001;
616 for (cnt = 0; cnt < 16; cnt++) {
617 if (data & mask) {
618 if (dllmin[cnt] > dlli) {
619 dllmin[cnt] = dlli;
620 }
621 if (dllmax[cnt] < dlli) {
622 dllmax[cnt] = dlli;
623 }
624 }
625 mask <<= 1;
626 }
627 passcnt++;
628 } else if (passcnt >= CBR_THRESHOLD) {
629 break;
630 }
631 }
632 data = 0;
633 for (cnt = 0; cnt < 8; cnt++) {
634 data >>= 3;
635 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
636 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
637 if (gold_sadj[0] >= dlli) {
638 dlli = (gold_sadj[0] - dlli) >> 1;
639 if (dlli > 3) {
640 dlli = 3;
641 }
642 } else {
643 dlli = (dlli - gold_sadj[0]) >> 1;
644 if (dlli > 4) {
645 dlli = 4;
646 }
647 dlli = (8 - dlli) & 0x7;
648 }
649 data |= dlli << 21;
650 }
651 }
652 moutdwm(ast, 0x1E6E0080, data);
653
654 data = 0;
655 for (cnt = 8; cnt < 16; cnt++) {
656 data >>= 3;
657 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
658 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
659 if (gold_sadj[1] >= dlli) {
660 dlli = (gold_sadj[1] - dlli) >> 1;
661 if (dlli > 3) {
662 dlli = 3;
663 } else {
664 dlli = (dlli - 1) & 0x7;
665 }
666 } else {
667 dlli = (dlli - gold_sadj[1]) >> 1;
668 dlli += 1;
669 if (dlli > 4) {
670 dlli = 4;
671 }
672 dlli = (8 - dlli) & 0x7;
673 }
674 data |= dlli << 21;
675 }
676 }
677 moutdwm(ast, 0x1E6E0084, data);
678
679} /* finetuneDQI */
680#endif
681
682static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
683{
684 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
685
686FINETUNE_START:
687 for (cnt = 0; cnt < 16; cnt++) {
688 dllmin[cnt] = 0xff;
689 dllmax[cnt] = 0x0;
690 }
691 passcnt = 0;
692 for (dlli = 0; dlli < 76; dlli++) {
693 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
694 /* Wait DQSI latch phase calibration */
695 moutdwm(ast, 0x1E6E0074, 0x00000010);
696 moutdwm(ast, 0x1E6E0070, 0x00000003);
697 do {
698 data = mindwm(ast, 0x1E6E0070);
699 } while (!(data & 0x00001000));
700 moutdwm(ast, 0x1E6E0070, 0x00000000);
701
702 moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
703 data = cbr_scan2(ast);
704 if (data != 0) {
705 mask = 0x00010001;
706 for (cnt = 0; cnt < 16; cnt++) {
707 if (data & mask) {
708 if (dllmin[cnt] > dlli) {
709 dllmin[cnt] = dlli;
710 }
711 if (dllmax[cnt] < dlli) {
712 dllmax[cnt] = dlli;
713 }
714 }
715 mask <<= 1;
716 }
717 passcnt++;
718 } else if (passcnt >= CBR_THRESHOLD2) {
719 break;
720 }
721 }
722 gold_sadj[0] = 0x0;
723 passcnt = 0;
724 for (cnt = 0; cnt < 16; cnt++) {
725 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
726 gold_sadj[0] += dllmin[cnt];
727 passcnt++;
728 }
729 }
730 if (passcnt != 16) {
731 goto FINETUNE_START;
732 }
733 gold_sadj[0] = gold_sadj[0] >> 4;
734 gold_sadj[1] = gold_sadj[0];
735
736 data = 0;
737 for (cnt = 0; cnt < 8; cnt++) {
738 data >>= 3;
739 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
740 dlli = dllmin[cnt];
741 if (gold_sadj[0] >= dlli) {
742 dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
743 if (dlli > 3) {
744 dlli = 3;
745 }
746 } else {
747 dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
748 if (dlli > 4) {
749 dlli = 4;
750 }
751 dlli = (8 - dlli) & 0x7;
752 }
753 data |= dlli << 21;
754 }
755 }
756 moutdwm(ast, 0x1E6E0080, data);
757
758 data = 0;
759 for (cnt = 8; cnt < 16; cnt++) {
760 data >>= 3;
761 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
762 dlli = dllmin[cnt];
763 if (gold_sadj[1] >= dlli) {
764 dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
765 if (dlli > 3) {
766 dlli = 3;
767 } else {
768 dlli = (dlli - 1) & 0x7;
769 }
770 } else {
771 dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
772 dlli += 1;
773 if (dlli > 4) {
774 dlli = 4;
775 }
776 dlli = (8 - dlli) & 0x7;
777 }
778 data |= dlli << 21;
779 }
780 }
781 moutdwm(ast, 0x1E6E0084, data);
782
783} /* finetuneDQI_L */
784
785static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
786{
787 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
788
789 for (cnt = 0; cnt < 16; cnt++) {
790 dllmin[cnt] = 0xff;
791 dllmax[cnt] = 0x0;
792 }
793 passcnt = 0;
794 for (dlli = 0; dlli < 76; dlli++) {
795 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
796 /* Wait DQSI latch phase calibration */
797 moutdwm(ast, 0x1E6E0074, 0x00000010);
798 moutdwm(ast, 0x1E6E0070, 0x00000003);
799 do {
800 data = mindwm(ast, 0x1E6E0070);
801 } while (!(data & 0x00001000));
802 moutdwm(ast, 0x1E6E0070, 0x00000000);
803
804 moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
805 data = cbr_scan2(ast);
806 if (data != 0) {
807 mask = 0x00010001;
808 for (cnt = 0; cnt < 16; cnt++) {
809 if (data & mask) {
810 if (dllmin[cnt] > dlli) {
811 dllmin[cnt] = dlli;
812 }
813 if (dllmax[cnt] < dlli) {
814 dllmax[cnt] = dlli;
815 }
816 }
817 mask <<= 1;
818 }
819 passcnt++;
820 } else if (passcnt >= CBR_THRESHOLD2) {
821 break;
822 }
823 }
824 gold_sadj[0] = 0x0;
825 gold_sadj[1] = 0xFF;
826 for (cnt = 0; cnt < 8; cnt++) {
827 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
828 if (gold_sadj[0] < dllmin[cnt]) {
829 gold_sadj[0] = dllmin[cnt];
830 }
831 if (gold_sadj[1] > dllmax[cnt]) {
832 gold_sadj[1] = dllmax[cnt];
833 }
834 }
835 }
836 gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
837 gold_sadj[1] = mindwm(ast, 0x1E6E0080);
838
839 data = 0;
840 for (cnt = 0; cnt < 8; cnt++) {
841 data >>= 3;
842 data2 = gold_sadj[1] & 0x7;
843 gold_sadj[1] >>= 3;
844 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
845 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
846 if (gold_sadj[0] >= dlli) {
847 dlli = (gold_sadj[0] - dlli) >> 1;
848 if (dlli > 0) {
849 dlli = 1;
850 }
851 if (data2 != 3) {
852 data2 = (data2 + dlli) & 0x7;
853 }
854 } else {
855 dlli = (dlli - gold_sadj[0]) >> 1;
856 if (dlli > 0) {
857 dlli = 1;
858 }
859 if (data2 != 4) {
860 data2 = (data2 - dlli) & 0x7;
861 }
862 }
863 }
864 data |= data2 << 21;
865 }
866 moutdwm(ast, 0x1E6E0080, data);
867
868 gold_sadj[0] = 0x0;
869 gold_sadj[1] = 0xFF;
870 for (cnt = 8; cnt < 16; cnt++) {
871 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
872 if (gold_sadj[0] < dllmin[cnt]) {
873 gold_sadj[0] = dllmin[cnt];
874 }
875 if (gold_sadj[1] > dllmax[cnt]) {
876 gold_sadj[1] = dllmax[cnt];
877 }
878 }
879 }
880 gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
881 gold_sadj[1] = mindwm(ast, 0x1E6E0084);
882
883 data = 0;
884 for (cnt = 8; cnt < 16; cnt++) {
885 data >>= 3;
886 data2 = gold_sadj[1] & 0x7;
887 gold_sadj[1] >>= 3;
888 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
889 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
890 if (gold_sadj[0] >= dlli) {
891 dlli = (gold_sadj[0] - dlli) >> 1;
892 if (dlli > 0) {
893 dlli = 1;
894 }
895 if (data2 != 3) {
896 data2 = (data2 + dlli) & 0x7;
897 }
898 } else {
899 dlli = (dlli - gold_sadj[0]) >> 1;
900 if (dlli > 0) {
901 dlli = 1;
902 }
903 if (data2 != 4) {
904 data2 = (data2 - dlli) & 0x7;
905 }
906 }
907 }
908 data |= data2 << 21;
909 }
910 moutdwm(ast, 0x1E6E0084, data);
911
912} /* finetuneDQI_L2 */
913
914static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
915{
916 u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
917
918
919 finetuneDQI_L(ast, param);
920 finetuneDQI_L2(ast, param);
921
922CBR_START2:
923 dllmin[0] = dllmin[1] = 0xff;
924 dllmax[0] = dllmax[1] = 0x0;
925 passcnt = 0;
926 for (dlli = 0; dlli < 76; dlli++) {
927 moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
928 /* Wait DQSI latch phase calibration */
929 moutdwm(ast, 0x1E6E0074, 0x00000010);
930 moutdwm(ast, 0x1E6E0070, 0x00000003);
931 do {
932 data = mindwm(ast, 0x1E6E0070);
933 } while (!(data & 0x00001000));
934 moutdwm(ast, 0x1E6E0070, 0x00000000);
935
936 moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
937 data = cbr_scan(ast);
938 if (data != 0) {
939 if (data & 0x1) {
940 if (dllmin[0] > dlli) {
941 dllmin[0] = dlli;
942 }
943 if (dllmax[0] < dlli) {
944 dllmax[0] = dlli;
945 }
946 }
947 if (data & 0x2) {
948 if (dllmin[1] > dlli) {
949 dllmin[1] = dlli;
950 }
951 if (dllmax[1] < dlli) {
952 dllmax[1] = dlli;
953 }
954 }
955 passcnt++;
956 } else if (passcnt >= CBR_THRESHOLD) {
957 break;
958 }
959 }
960 if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
961 goto CBR_START2;
962 }
963 if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
964 goto CBR_START2;
965 }
966 dlli = (dllmin[1] + dllmax[1]) >> 1;
967 dlli <<= 8;
968 dlli += (dllmin[0] + dllmax[0]) >> 1;
969 moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
970
971 data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
972 data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
973 moutdwm(ast, 0x1E6E0018, data2);
974 moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
975
976 /* Wait DQSI latch phase calibration */
977 moutdwm(ast, 0x1E6E0074, 0x00000010);
978 moutdwm(ast, 0x1E6E0070, 0x00000003);
979 do {
980 data = mindwm(ast, 0x1E6E0070);
981 } while (!(data & 0x00001000));
982 moutdwm(ast, 0x1E6E0070, 0x00000000);
983 moutdwm(ast, 0x1E6E0070, 0x00000003);
984 do {
985 data = mindwm(ast, 0x1E6E0070);
986 } while (!(data & 0x00001000));
987 moutdwm(ast, 0x1E6E0070, 0x00000000);
988} /* CBRDLL2 */
989
990static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
991{
992 u32 trap, trap_AC2, trap_MRS;
993
994 moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
995
996 /* Ger trap info */
997 trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
998 trap_AC2 = 0x00020000 + (trap << 16);
999 trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
1000 trap_MRS = 0x00000010 + (trap << 4);
1001 trap_MRS |= ((trap & 0x2) << 18);
1002
1003 param->reg_MADJ = 0x00034C4C;
1004 param->reg_SADJ = 0x00001800;
1005 param->reg_DRV = 0x000000F0;
1006 param->reg_PERIOD = param->dram_freq;
1007 param->rodt = 0;
1008
1009 switch (param->dram_freq) {
1010 case 336:
1011 moutdwm(ast, 0x1E6E2020, 0x0190);
1012 param->wodt = 0;
1013 param->reg_AC1 = 0x22202725;
1014 param->reg_AC2 = 0xAA007613 | trap_AC2;
1015 param->reg_DQSIC = 0x000000BA;
1016 param->reg_MRS = 0x04001400 | trap_MRS;
1017 param->reg_EMRS = 0x00000000;
1018 param->reg_IOZ = 0x00000034;
1019 param->reg_DQIDLY = 0x00000074;
1020 param->reg_FREQ = 0x00004DC0;
1021 param->madj_max = 96;
1022 param->dll2_finetune_step = 3;
1023 break;
1024 default:
1025 case 396:
1026 moutdwm(ast, 0x1E6E2020, 0x03F1);
1027 param->wodt = 1;
1028 param->reg_AC1 = 0x33302825;
1029 param->reg_AC2 = 0xCC009617 | trap_AC2;
1030 param->reg_DQSIC = 0x000000E2;
1031 param->reg_MRS = 0x04001600 | trap_MRS;
1032 param->reg_EMRS = 0x00000000;
1033 param->reg_IOZ = 0x00000034;
1034 param->reg_DRV = 0x000000FA;
1035 param->reg_DQIDLY = 0x00000089;
1036 param->reg_FREQ = 0x000050C0;
1037 param->madj_max = 96;
1038 param->dll2_finetune_step = 4;
1039
1040 switch (param->dram_chipid) {
1041 default:
1042 case AST_DRAM_512Mx16:
1043 case AST_DRAM_1Gx16:
1044 param->reg_AC2 = 0xCC009617 | trap_AC2;
1045 break;
1046 case AST_DRAM_2Gx16:
1047 param->reg_AC2 = 0xCC009622 | trap_AC2;
1048 break;
1049 case AST_DRAM_4Gx16:
1050 param->reg_AC2 = 0xCC00963F | trap_AC2;
1051 break;
1052 }
1053 break;
1054
1055 case 408:
1056 moutdwm(ast, 0x1E6E2020, 0x01F0);
1057 param->wodt = 1;
1058 param->reg_AC1 = 0x33302825;
1059 param->reg_AC2 = 0xCC009617 | trap_AC2;
1060 param->reg_DQSIC = 0x000000E2;
1061 param->reg_MRS = 0x04001600 | trap_MRS;
1062 param->reg_EMRS = 0x00000000;
1063 param->reg_IOZ = 0x00000034;
1064 param->reg_DRV = 0x000000FA;
1065 param->reg_DQIDLY = 0x00000089;
1066 param->reg_FREQ = 0x000050C0;
1067 param->madj_max = 96;
1068 param->dll2_finetune_step = 4;
1069
1070 switch (param->dram_chipid) {
1071 default:
1072 case AST_DRAM_512Mx16:
1073 case AST_DRAM_1Gx16:
1074 param->reg_AC2 = 0xCC009617 | trap_AC2;
1075 break;
1076 case AST_DRAM_2Gx16:
1077 param->reg_AC2 = 0xCC009622 | trap_AC2;
1078 break;
1079 case AST_DRAM_4Gx16:
1080 param->reg_AC2 = 0xCC00963F | trap_AC2;
1081 break;
1082 }
1083
1084 break;
1085 case 456:
1086 moutdwm(ast, 0x1E6E2020, 0x0230);
1087 param->wodt = 0;
1088 param->reg_AC1 = 0x33302926;
1089 param->reg_AC2 = 0xCD44961A;
1090 param->reg_DQSIC = 0x000000FC;
1091 param->reg_MRS = 0x00081830;
1092 param->reg_EMRS = 0x00000000;
1093 param->reg_IOZ = 0x00000045;
1094 param->reg_DQIDLY = 0x00000097;
1095 param->reg_FREQ = 0x000052C0;
1096 param->madj_max = 88;
1097 param->dll2_finetune_step = 4;
1098 break;
1099 case 504:
1100 moutdwm(ast, 0x1E6E2020, 0x0270);
1101 param->wodt = 1;
1102 param->reg_AC1 = 0x33302926;
1103 param->reg_AC2 = 0xDE44A61D;
1104 param->reg_DQSIC = 0x00000117;
1105 param->reg_MRS = 0x00081A30;
1106 param->reg_EMRS = 0x00000000;
1107 param->reg_IOZ = 0x070000BB;
1108 param->reg_DQIDLY = 0x000000A0;
1109 param->reg_FREQ = 0x000054C0;
1110 param->madj_max = 79;
1111 param->dll2_finetune_step = 4;
1112 break;
1113 case 528:
1114 moutdwm(ast, 0x1E6E2020, 0x0290);
1115 param->wodt = 1;
1116 param->rodt = 1;
1117 param->reg_AC1 = 0x33302926;
1118 param->reg_AC2 = 0xEF44B61E;
1119 param->reg_DQSIC = 0x00000125;
1120 param->reg_MRS = 0x00081A30;
1121 param->reg_EMRS = 0x00000040;
1122 param->reg_DRV = 0x000000F5;
1123 param->reg_IOZ = 0x00000023;
1124 param->reg_DQIDLY = 0x00000088;
1125 param->reg_FREQ = 0x000055C0;
1126 param->madj_max = 76;
1127 param->dll2_finetune_step = 3;
1128 break;
1129 case 576:
1130 moutdwm(ast, 0x1E6E2020, 0x0140);
1131 param->reg_MADJ = 0x00136868;
1132 param->reg_SADJ = 0x00004534;
1133 param->wodt = 1;
1134 param->rodt = 1;
1135 param->reg_AC1 = 0x33302A37;
1136 param->reg_AC2 = 0xEF56B61E;
1137 param->reg_DQSIC = 0x0000013F;
1138 param->reg_MRS = 0x00101A50;
1139 param->reg_EMRS = 0x00000040;
1140 param->reg_DRV = 0x000000FA;
1141 param->reg_IOZ = 0x00000023;
1142 param->reg_DQIDLY = 0x00000078;
1143 param->reg_FREQ = 0x000057C0;
1144 param->madj_max = 136;
1145 param->dll2_finetune_step = 3;
1146 break;
1147 case 600:
1148 moutdwm(ast, 0x1E6E2020, 0x02E1);
1149 param->reg_MADJ = 0x00136868;
1150 param->reg_SADJ = 0x00004534;
1151 param->wodt = 1;
1152 param->rodt = 1;
1153 param->reg_AC1 = 0x32302A37;
1154 param->reg_AC2 = 0xDF56B61F;
1155 param->reg_DQSIC = 0x0000014D;
1156 param->reg_MRS = 0x00101A50;
1157 param->reg_EMRS = 0x00000004;
1158 param->reg_DRV = 0x000000F5;
1159 param->reg_IOZ = 0x00000023;
1160 param->reg_DQIDLY = 0x00000078;
1161 param->reg_FREQ = 0x000058C0;
1162 param->madj_max = 132;
1163 param->dll2_finetune_step = 3;
1164 break;
1165 case 624:
1166 moutdwm(ast, 0x1E6E2020, 0x0160);
1167 param->reg_MADJ = 0x00136868;
1168 param->reg_SADJ = 0x00004534;
1169 param->wodt = 1;
1170 param->rodt = 1;
1171 param->reg_AC1 = 0x32302A37;
1172 param->reg_AC2 = 0xEF56B621;
1173 param->reg_DQSIC = 0x0000015A;
1174 param->reg_MRS = 0x02101A50;
1175 param->reg_EMRS = 0x00000004;
1176 param->reg_DRV = 0x000000F5;
1177 param->reg_IOZ = 0x00000034;
1178 param->reg_DQIDLY = 0x00000078;
1179 param->reg_FREQ = 0x000059C0;
1180 param->madj_max = 128;
1181 param->dll2_finetune_step = 3;
1182 break;
1183 } /* switch freq */
1184
1185 switch (param->dram_chipid) {
1186 case AST_DRAM_512Mx16:
1187 param->dram_config = 0x130;
1188 break;
1189 default:
1190 case AST_DRAM_1Gx16:
1191 param->dram_config = 0x131;
1192 break;
1193 case AST_DRAM_2Gx16:
1194 param->dram_config = 0x132;
1195 break;
1196 case AST_DRAM_4Gx16:
1197 param->dram_config = 0x133;
1198 break;
1199 }; /* switch size */
1200
1201 switch (param->vram_size) {
1202 default:
1203 case AST_VIDMEM_SIZE_8M:
1204 param->dram_config |= 0x00;
1205 break;
1206 case AST_VIDMEM_SIZE_16M:
1207 param->dram_config |= 0x04;
1208 break;
1209 case AST_VIDMEM_SIZE_32M:
1210 param->dram_config |= 0x08;
1211 break;
1212 case AST_VIDMEM_SIZE_64M:
1213 param->dram_config |= 0x0c;
1214 break;
1215 }
1216
1217}
1218
1219static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
1220{
1221 u32 data, data2;
1222
1223 moutdwm(ast, 0x1E6E0000, 0xFC600309);
1224 moutdwm(ast, 0x1E6E0018, 0x00000100);
1225 moutdwm(ast, 0x1E6E0024, 0x00000000);
1226 moutdwm(ast, 0x1E6E0034, 0x00000000);
1227 udelay(10);
1228 moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
1229 moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
1230 udelay(10);
1231 moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
1232 udelay(10);
1233
1234 moutdwm(ast, 0x1E6E0004, param->dram_config);
1235 moutdwm(ast, 0x1E6E0008, 0x90040f);
1236 moutdwm(ast, 0x1E6E0010, param->reg_AC1);
1237 moutdwm(ast, 0x1E6E0014, param->reg_AC2);
1238 moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
1239 moutdwm(ast, 0x1E6E0080, 0x00000000);
1240 moutdwm(ast, 0x1E6E0084, 0x00000000);
1241 moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
1242 moutdwm(ast, 0x1E6E0018, 0x4040A170);
1243 moutdwm(ast, 0x1E6E0018, 0x20402370);
1244 moutdwm(ast, 0x1E6E0038, 0x00000000);
1245 moutdwm(ast, 0x1E6E0040, 0xFF444444);
1246 moutdwm(ast, 0x1E6E0044, 0x22222222);
1247 moutdwm(ast, 0x1E6E0048, 0x22222222);
1248 moutdwm(ast, 0x1E6E004C, 0x00000002);
1249 moutdwm(ast, 0x1E6E0050, 0x80000000);
1250 moutdwm(ast, 0x1E6E0050, 0x00000000);
1251 moutdwm(ast, 0x1E6E0054, 0);
1252 moutdwm(ast, 0x1E6E0060, param->reg_DRV);
1253 moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
1254 moutdwm(ast, 0x1E6E0070, 0x00000000);
1255 moutdwm(ast, 0x1E6E0074, 0x00000000);
1256 moutdwm(ast, 0x1E6E0078, 0x00000000);
1257 moutdwm(ast, 0x1E6E007C, 0x00000000);
1258 /* Wait MCLK2X lock to MCLK */
1259 do {
1260 data = mindwm(ast, 0x1E6E001C);
1261 } while (!(data & 0x08000000));
1262 moutdwm(ast, 0x1E6E0034, 0x00000001);
1263 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1264 udelay(10);
1265 moutdwm(ast, 0x1E6E000C, 0x00000000);
1266 moutdwm(ast, 0x1E6E0034, 0x00000000);
1267 data = mindwm(ast, 0x1E6E001C);
1268 data = (data >> 8) & 0xff;
1269 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
1270 data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
1271 if ((data2 & 0xff) > param->madj_max) {
1272 break;
1273 }
1274 moutdwm(ast, 0x1E6E0064, data2);
1275 if (data2 & 0x00100000) {
1276 data2 = ((data2 & 0xff) >> 3) + 3;
1277 } else {
1278 data2 = ((data2 & 0xff) >> 2) + 5;
1279 }
1280 data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
1281 data2 += data & 0xff;
1282 data = data | (data2 << 8);
1283 moutdwm(ast, 0x1E6E0068, data);
1284 udelay(10);
1285 moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
1286 udelay(10);
1287 data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
1288 moutdwm(ast, 0x1E6E0018, data);
1289 data = data | 0x200;
1290 moutdwm(ast, 0x1E6E0018, data);
1291 do {
1292 data = mindwm(ast, 0x1E6E001C);
1293 } while (!(data & 0x08000000));
1294
1295 moutdwm(ast, 0x1E6E0034, 0x00000001);
1296 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1297 udelay(10);
1298 moutdwm(ast, 0x1E6E000C, 0x00000000);
1299 moutdwm(ast, 0x1E6E0034, 0x00000000);
1300 data = mindwm(ast, 0x1E6E001C);
1301 data = (data >> 8) & 0xff;
1302 }
1303 data = mindwm(ast, 0x1E6E0018) | 0xC00;
1304 moutdwm(ast, 0x1E6E0018, data);
1305
1306 moutdwm(ast, 0x1E6E0034, 0x00000001);
1307 moutdwm(ast, 0x1E6E000C, 0x00000040);
1308 udelay(50);
1309 /* Mode Register Setting */
1310 moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
1311 moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1312 moutdwm(ast, 0x1E6E0028, 0x00000005);
1313 moutdwm(ast, 0x1E6E0028, 0x00000007);
1314 moutdwm(ast, 0x1E6E0028, 0x00000003);
1315 moutdwm(ast, 0x1E6E0028, 0x00000001);
1316 moutdwm(ast, 0x1E6E002C, param->reg_MRS);
1317 moutdwm(ast, 0x1E6E000C, 0x00005C08);
1318 moutdwm(ast, 0x1E6E0028, 0x00000001);
1319
1320 moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
1321 data = 0;
1322 if (param->wodt) {
1323 data = 0x300;
1324 }
1325 if (param->rodt) {
1326 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
1327 }
1328 moutdwm(ast, 0x1E6E0034, data | 0x3);
1329
1330 /* Wait DQI delay lock */
1331 do {
1332 data = mindwm(ast, 0x1E6E0080);
1333 } while (!(data & 0x40000000));
1334 /* Wait DQSI delay lock */
1335 do {
1336 data = mindwm(ast, 0x1E6E0020);
1337 } while (!(data & 0x00000800));
1338 /* Calibrate the DQSI delay */
1339 cbr_dll2(ast, param);
1340
1341 moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
1342 /* ECC Memory Initialization */
1343#ifdef ECC
1344 moutdwm(ast, 0x1E6E007C, 0x00000000);
1345 moutdwm(ast, 0x1E6E0070, 0x221);
1346 do {
1347 data = mindwm(ast, 0x1E6E0070);
1348 } while (!(data & 0x00001000));
1349 moutdwm(ast, 0x1E6E0070, 0x00000000);
1350 moutdwm(ast, 0x1E6E0050, 0x80000000);
1351 moutdwm(ast, 0x1E6E0050, 0x00000000);
1352#endif
1353
1354
1355}
1356
1357static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
1358{
1359 u32 trap, trap_AC2, trap_MRS;
1360
1361 moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
1362
1363 /* Ger trap info */
1364 trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
1365 trap_AC2 = (trap << 20) | (trap << 16);
1366 trap_AC2 += 0x00110000;
1367 trap_MRS = 0x00000040 | (trap << 4);
1368
1369
1370 param->reg_MADJ = 0x00034C4C;
1371 param->reg_SADJ = 0x00001800;
1372 param->reg_DRV = 0x000000F0;
1373 param->reg_PERIOD = param->dram_freq;
1374 param->rodt = 0;
1375
1376 switch (param->dram_freq) {
1377 case 264:
1378 moutdwm(ast, 0x1E6E2020, 0x0130);
1379 param->wodt = 0;
1380 param->reg_AC1 = 0x11101513;
1381 param->reg_AC2 = 0x78117011;
1382 param->reg_DQSIC = 0x00000092;
1383 param->reg_MRS = 0x00000842;
1384 param->reg_EMRS = 0x00000000;
1385 param->reg_DRV = 0x000000F0;
1386 param->reg_IOZ = 0x00000034;
1387 param->reg_DQIDLY = 0x0000005A;
1388 param->reg_FREQ = 0x00004AC0;
1389 param->madj_max = 138;
1390 param->dll2_finetune_step = 3;
1391 break;
1392 case 336:
1393 moutdwm(ast, 0x1E6E2020, 0x0190);
1394 param->wodt = 1;
1395 param->reg_AC1 = 0x22202613;
1396 param->reg_AC2 = 0xAA009016 | trap_AC2;
1397 param->reg_DQSIC = 0x000000BA;
1398 param->reg_MRS = 0x00000A02 | trap_MRS;
1399 param->reg_EMRS = 0x00000040;
1400 param->reg_DRV = 0x000000FA;
1401 param->reg_IOZ = 0x00000034;
1402 param->reg_DQIDLY = 0x00000074;
1403 param->reg_FREQ = 0x00004DC0;
1404 param->madj_max = 96;
1405 param->dll2_finetune_step = 3;
1406 break;
1407 default:
1408 case 396:
1409 moutdwm(ast, 0x1E6E2020, 0x03F1);
1410 param->wodt = 1;
1411 param->rodt = 0;
1412 param->reg_AC1 = 0x33302714;
1413 param->reg_AC2 = 0xCC00B01B | trap_AC2;
1414 param->reg_DQSIC = 0x000000E2;
1415 param->reg_MRS = 0x00000C02 | trap_MRS;
1416 param->reg_EMRS = 0x00000040;
1417 param->reg_DRV = 0x000000FA;
1418 param->reg_IOZ = 0x00000034;
1419 param->reg_DQIDLY = 0x00000089;
1420 param->reg_FREQ = 0x000050C0;
1421 param->madj_max = 96;
1422 param->dll2_finetune_step = 4;
1423
1424 switch (param->dram_chipid) {
1425 case AST_DRAM_512Mx16:
1426 param->reg_AC2 = 0xCC00B016 | trap_AC2;
1427 break;
1428 default:
1429 case AST_DRAM_1Gx16:
1430 param->reg_AC2 = 0xCC00B01B | trap_AC2;
1431 break;
1432 case AST_DRAM_2Gx16:
1433 param->reg_AC2 = 0xCC00B02B | trap_AC2;
1434 break;
1435 case AST_DRAM_4Gx16:
1436 param->reg_AC2 = 0xCC00B03F | trap_AC2;
1437 break;
1438 }
1439
1440 break;
1441
1442 case 408:
1443 moutdwm(ast, 0x1E6E2020, 0x01F0);
1444 param->wodt = 1;
1445 param->rodt = 0;
1446 param->reg_AC1 = 0x33302714;
1447 param->reg_AC2 = 0xCC00B01B | trap_AC2;
1448 param->reg_DQSIC = 0x000000E2;
1449 param->reg_MRS = 0x00000C02 | trap_MRS;
1450 param->reg_EMRS = 0x00000040;
1451 param->reg_DRV = 0x000000FA;
1452 param->reg_IOZ = 0x00000034;
1453 param->reg_DQIDLY = 0x00000089;
1454 param->reg_FREQ = 0x000050C0;
1455 param->madj_max = 96;
1456 param->dll2_finetune_step = 4;
1457
1458 switch (param->dram_chipid) {
1459 case AST_DRAM_512Mx16:
1460 param->reg_AC2 = 0xCC00B016 | trap_AC2;
1461 break;
1462 default:
1463 case AST_DRAM_1Gx16:
1464 param->reg_AC2 = 0xCC00B01B | trap_AC2;
1465 break;
1466 case AST_DRAM_2Gx16:
1467 param->reg_AC2 = 0xCC00B02B | trap_AC2;
1468 break;
1469 case AST_DRAM_4Gx16:
1470 param->reg_AC2 = 0xCC00B03F | trap_AC2;
1471 break;
1472 }
1473
1474 break;
1475 case 456:
1476 moutdwm(ast, 0x1E6E2020, 0x0230);
1477 param->wodt = 0;
1478 param->reg_AC1 = 0x33302815;
1479 param->reg_AC2 = 0xCD44B01E;
1480 param->reg_DQSIC = 0x000000FC;
1481 param->reg_MRS = 0x00000E72;
1482 param->reg_EMRS = 0x00000000;
1483 param->reg_DRV = 0x00000000;
1484 param->reg_IOZ = 0x00000034;
1485 param->reg_DQIDLY = 0x00000097;
1486 param->reg_FREQ = 0x000052C0;
1487 param->madj_max = 88;
1488 param->dll2_finetune_step = 3;
1489 break;
1490 case 504:
1491 moutdwm(ast, 0x1E6E2020, 0x0261);
1492 param->wodt = 1;
1493 param->rodt = 1;
1494 param->reg_AC1 = 0x33302815;
1495 param->reg_AC2 = 0xDE44C022;
1496 param->reg_DQSIC = 0x00000117;
1497 param->reg_MRS = 0x00000E72;
1498 param->reg_EMRS = 0x00000040;
1499 param->reg_DRV = 0x0000000A;
1500 param->reg_IOZ = 0x00000045;
1501 param->reg_DQIDLY = 0x000000A0;
1502 param->reg_FREQ = 0x000054C0;
1503 param->madj_max = 79;
1504 param->dll2_finetune_step = 3;
1505 break;
1506 case 528:
1507 moutdwm(ast, 0x1E6E2020, 0x0120);
1508 param->wodt = 1;
1509 param->rodt = 1;
1510 param->reg_AC1 = 0x33302815;
1511 param->reg_AC2 = 0xEF44D024;
1512 param->reg_DQSIC = 0x00000125;
1513 param->reg_MRS = 0x00000E72;
1514 param->reg_EMRS = 0x00000004;
1515 param->reg_DRV = 0x000000F9;
1516 param->reg_IOZ = 0x00000045;
1517 param->reg_DQIDLY = 0x000000A7;
1518 param->reg_FREQ = 0x000055C0;
1519 param->madj_max = 76;
1520 param->dll2_finetune_step = 3;
1521 break;
1522 case 552:
1523 moutdwm(ast, 0x1E6E2020, 0x02A1);
1524 param->wodt = 1;
1525 param->rodt = 1;
1526 param->reg_AC1 = 0x43402915;
1527 param->reg_AC2 = 0xFF44E025;
1528 param->reg_DQSIC = 0x00000132;
1529 param->reg_MRS = 0x00000E72;
1530 param->reg_EMRS = 0x00000040;
1531 param->reg_DRV = 0x0000000A;
1532 param->reg_IOZ = 0x00000045;
1533 param->reg_DQIDLY = 0x000000AD;
1534 param->reg_FREQ = 0x000056C0;
1535 param->madj_max = 76;
1536 param->dll2_finetune_step = 3;
1537 break;
1538 case 576:
1539 moutdwm(ast, 0x1E6E2020, 0x0140);
1540 param->wodt = 1;
1541 param->rodt = 1;
1542 param->reg_AC1 = 0x43402915;
1543 param->reg_AC2 = 0xFF44E027;
1544 param->reg_DQSIC = 0x0000013F;
1545 param->reg_MRS = 0x00000E72;
1546 param->reg_EMRS = 0x00000004;
1547 param->reg_DRV = 0x000000F5;
1548 param->reg_IOZ = 0x00000045;
1549 param->reg_DQIDLY = 0x000000B3;
1550 param->reg_FREQ = 0x000057C0;
1551 param->madj_max = 76;
1552 param->dll2_finetune_step = 3;
1553 break;
1554 }
1555
1556 switch (param->dram_chipid) {
1557 case AST_DRAM_512Mx16:
1558 param->dram_config = 0x100;
1559 break;
1560 default:
1561 case AST_DRAM_1Gx16:
1562 param->dram_config = 0x121;
1563 break;
1564 case AST_DRAM_2Gx16:
1565 param->dram_config = 0x122;
1566 break;
1567 case AST_DRAM_4Gx16:
1568 param->dram_config = 0x123;
1569 break;
1570 }; /* switch size */
1571
1572 switch (param->vram_size) {
1573 default:
1574 case AST_VIDMEM_SIZE_8M:
1575 param->dram_config |= 0x00;
1576 break;
1577 case AST_VIDMEM_SIZE_16M:
1578 param->dram_config |= 0x04;
1579 break;
1580 case AST_VIDMEM_SIZE_32M:
1581 param->dram_config |= 0x08;
1582 break;
1583 case AST_VIDMEM_SIZE_64M:
1584 param->dram_config |= 0x0c;
1585 break;
1586 }
1587}
1588
1589static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
1590{
1591 u32 data, data2;
1592
1593 moutdwm(ast, 0x1E6E0000, 0xFC600309);
1594 moutdwm(ast, 0x1E6E0018, 0x00000100);
1595 moutdwm(ast, 0x1E6E0024, 0x00000000);
1596 moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
1597 moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
1598 udelay(10);
1599 moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
1600 udelay(10);
1601
1602 moutdwm(ast, 0x1E6E0004, param->dram_config);
1603 moutdwm(ast, 0x1E6E0008, 0x90040f);
1604 moutdwm(ast, 0x1E6E0010, param->reg_AC1);
1605 moutdwm(ast, 0x1E6E0014, param->reg_AC2);
1606 moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
1607 moutdwm(ast, 0x1E6E0080, 0x00000000);
1608 moutdwm(ast, 0x1E6E0084, 0x00000000);
1609 moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
1610 moutdwm(ast, 0x1E6E0018, 0x4040A130);
1611 moutdwm(ast, 0x1E6E0018, 0x20402330);
1612 moutdwm(ast, 0x1E6E0038, 0x00000000);
1613 moutdwm(ast, 0x1E6E0040, 0xFF808000);
1614 moutdwm(ast, 0x1E6E0044, 0x88848466);
1615 moutdwm(ast, 0x1E6E0048, 0x44440008);
1616 moutdwm(ast, 0x1E6E004C, 0x00000000);
1617 moutdwm(ast, 0x1E6E0050, 0x80000000);
1618 moutdwm(ast, 0x1E6E0050, 0x00000000);
1619 moutdwm(ast, 0x1E6E0054, 0);
1620 moutdwm(ast, 0x1E6E0060, param->reg_DRV);
1621 moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
1622 moutdwm(ast, 0x1E6E0070, 0x00000000);
1623 moutdwm(ast, 0x1E6E0074, 0x00000000);
1624 moutdwm(ast, 0x1E6E0078, 0x00000000);
1625 moutdwm(ast, 0x1E6E007C, 0x00000000);
1626
1627 /* Wait MCLK2X lock to MCLK */
1628 do {
1629 data = mindwm(ast, 0x1E6E001C);
1630 } while (!(data & 0x08000000));
1631 moutdwm(ast, 0x1E6E0034, 0x00000001);
1632 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1633 udelay(10);
1634 moutdwm(ast, 0x1E6E000C, 0x00000000);
1635 moutdwm(ast, 0x1E6E0034, 0x00000000);
1636 data = mindwm(ast, 0x1E6E001C);
1637 data = (data >> 8) & 0xff;
1638 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
1639 data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
1640 if ((data2 & 0xff) > param->madj_max) {
1641 break;
1642 }
1643 moutdwm(ast, 0x1E6E0064, data2);
1644 if (data2 & 0x00100000) {
1645 data2 = ((data2 & 0xff) >> 3) + 3;
1646 } else {
1647 data2 = ((data2 & 0xff) >> 2) + 5;
1648 }
1649 data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
1650 data2 += data & 0xff;
1651 data = data | (data2 << 8);
1652 moutdwm(ast, 0x1E6E0068, data);
1653 udelay(10);
1654 moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
1655 udelay(10);
1656 data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
1657 moutdwm(ast, 0x1E6E0018, data);
1658 data = data | 0x200;
1659 moutdwm(ast, 0x1E6E0018, data);
1660 do {
1661 data = mindwm(ast, 0x1E6E001C);
1662 } while (!(data & 0x08000000));
1663
1664 moutdwm(ast, 0x1E6E0034, 0x00000001);
1665 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1666 udelay(10);
1667 moutdwm(ast, 0x1E6E000C, 0x00000000);
1668 moutdwm(ast, 0x1E6E0034, 0x00000000);
1669 data = mindwm(ast, 0x1E6E001C);
1670 data = (data >> 8) & 0xff;
1671 }
1672 data = mindwm(ast, 0x1E6E0018) | 0xC00;
1673 moutdwm(ast, 0x1E6E0018, data);
1674
1675 moutdwm(ast, 0x1E6E0034, 0x00000001);
1676 moutdwm(ast, 0x1E6E000C, 0x00000000);
1677 udelay(50);
1678 /* Mode Register Setting */
1679 moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
1680 moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1681 moutdwm(ast, 0x1E6E0028, 0x00000005);
1682 moutdwm(ast, 0x1E6E0028, 0x00000007);
1683 moutdwm(ast, 0x1E6E0028, 0x00000003);
1684 moutdwm(ast, 0x1E6E0028, 0x00000001);
1685
1686 moutdwm(ast, 0x1E6E000C, 0x00005C08);
1687 moutdwm(ast, 0x1E6E002C, param->reg_MRS);
1688 moutdwm(ast, 0x1E6E0028, 0x00000001);
1689 moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
1690 moutdwm(ast, 0x1E6E0028, 0x00000003);
1691 moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1692 moutdwm(ast, 0x1E6E0028, 0x00000003);
1693
1694 moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
1695 data = 0;
1696 if (param->wodt) {
1697 data = 0x500;
1698 }
1699 if (param->rodt) {
1700 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
1701 }
1702 moutdwm(ast, 0x1E6E0034, data | 0x3);
1703 moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
1704
1705 /* Wait DQI delay lock */
1706 do {
1707 data = mindwm(ast, 0x1E6E0080);
1708 } while (!(data & 0x40000000));
1709 /* Wait DQSI delay lock */
1710 do {
1711 data = mindwm(ast, 0x1E6E0020);
1712 } while (!(data & 0x00000800));
1713 /* Calibrate the DQSI delay */
1714 cbr_dll2(ast, param);
1715
1716 /* ECC Memory Initialization */
1717#ifdef ECC
1718 moutdwm(ast, 0x1E6E007C, 0x00000000);
1719 moutdwm(ast, 0x1E6E0070, 0x221);
1720 do {
1721 data = mindwm(ast, 0x1E6E0070);
1722 } while (!(data & 0x00001000));
1723 moutdwm(ast, 0x1E6E0070, 0x00000000);
1724 moutdwm(ast, 0x1E6E0050, 0x80000000);
1725 moutdwm(ast, 0x1E6E0050, 0x00000000);
1726#endif
1727
1728}
1729
1730static void ast_init_dram_2300(struct drm_device *dev)
1731{
1732 struct ast_private *ast = dev->dev_private;
1733 struct ast2300_dram_param param;
1734 u32 temp;
1735 u8 reg;
1736
1737 reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
1738 if ((reg & 0x80) == 0) {/* vga only */
1739 ast_write32(ast, 0xf004, 0x1e6e0000);
1740 ast_write32(ast, 0xf000, 0x1);
1741 ast_write32(ast, 0x12000, 0x1688a8a8);
1742 do {
1743 ;
1744 } while (ast_read32(ast, 0x12000) != 0x1);
1745
1746 ast_write32(ast, 0x10000, 0xfc600309);
1747 do {
1748 ;
1749 } while (ast_read32(ast, 0x10000) != 0x1);
1750
1751 /* Slow down CPU/AHB CLK in VGA only mode */
1752 temp = ast_read32(ast, 0x12008);
1753 temp |= 0x73;
1754 ast_write32(ast, 0x12008, temp);
1755
1756 param.dram_type = AST_DDR3;
1757 if (temp & 0x01000000)
1758 param.dram_type = AST_DDR2;
1759 param.dram_chipid = ast->dram_type;
1760 param.dram_freq = ast->mclk;
1761 param.vram_size = ast->vram_size;
1762
1763 if (param.dram_type == AST_DDR3) {
1764 get_ddr3_info(ast, &param);
1765 ddr3_init(ast, &param);
1766 } else {
1767 get_ddr2_info(ast, &param);
1768 ddr2_init(ast, &param);
1769 }
1770
1771 temp = mindwm(ast, 0x1e6e2040);
1772 moutdwm(ast, 0x1e6e2040, temp | 0x40);
1773 }
1774
1775 /* wait ready */
1776 do {
1777 reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
1778 } while ((reg & 0x40) == 0);
1779}
1780
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 000000000000..95fa6aba26bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
1/*
2 * Copyright (c) 2005 ASPEED Technology Inc.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that
7 * copyright notice and this permission notice appear in supporting
8 * documentation, and that the name of the authors not be used in
9 * advertising or publicity pertaining to distribution of the software without
10 * specific, written prior permission. The authors makes no representations
11 * about the suitability of this software for any purpose. It is provided
12 * "as is" without express or implied warranty.
13 *
14 * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
20 * PERFORMANCE OF THIS SOFTWARE.
21 */
22/* Ported from xf86-video-ast driver */
23
24#ifndef AST_TABLES_H
25#define AST_TABLES_H
26
27/* Std. Table Index Definition */
28#define TextModeIndex 0
29#define EGAModeIndex 1
30#define VGAModeIndex 2
31#define HiCModeIndex 3
32#define TrueCModeIndex 4
33
34#define Charx8Dot 0x00000001
35#define HalfDCLK 0x00000002
36#define DoubleScanMode 0x00000004
37#define LineCompareOff 0x00000008
38#define SyncPP 0x00000000
39#define SyncPN 0x00000040
40#define SyncNP 0x00000080
41#define SyncNN 0x000000C0
42#define HBorder 0x00000020
43#define VBorder 0x00000010
44#define WideScreenMode 0x00000100
45
46
47/* DCLK Index */
48#define VCLK25_175 0x00
49#define VCLK28_322 0x01
50#define VCLK31_5 0x02
51#define VCLK36 0x03
52#define VCLK40 0x04
53#define VCLK49_5 0x05
54#define VCLK50 0x06
55#define VCLK56_25 0x07
56#define VCLK65 0x08
57#define VCLK75 0x09
58#define VCLK78_75 0x0A
59#define VCLK94_5 0x0B
60#define VCLK108 0x0C
61#define VCLK135 0x0D
62#define VCLK157_5 0x0E
63#define VCLK162 0x0F
64/* #define VCLK193_25 0x10 */
65#define VCLK154 0x10
66#define VCLK83_5 0x11
67#define VCLK106_5 0x12
68#define VCLK146_25 0x13
69#define VCLK148_5 0x14
70
71static struct ast_vbios_dclk_info dclk_table[] = {
72 {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
73 {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
74 {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
75 {0x76, 0x63, 0x01}, /* 03: VCLK36 */
76 {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
77 {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
78 {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
79 {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
80 {0x80, 0x64, 0x00}, /* 08: VCLK65 */
81 {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
82 {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
83 {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
84 {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
85 {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
86 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
87 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
88 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
89 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
90 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
91 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
92 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
93};
94
95static struct ast_vbios_stdtable vbios_stdtable[] = {
96 /* MD_2_3_400 */
97 {
98 0x67,
99 {0x00,0x03,0x00,0x02},
100 {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
101 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
102 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
103 0xff},
104 {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
105 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
106 0x0c,0x00,0x0f,0x08},
107 {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
108 0xff}
109 },
110 /* Mode12/ExtEGATable */
111 {
112 0xe3,
113 {0x01,0x0f,0x00,0x06},
114 {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
115 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
116 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
117 0xff},
118 {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
119 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
120 0x01,0x00,0x0f,0x00},
121 {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
122 0xff}
123 },
124 /* ExtVGATable */
125 {
126 0x2f,
127 {0x01,0x0f,0x00,0x0e},
128 {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
129 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
130 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
131 0xff},
132 {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
133 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
134 0x01,0x00,0x00,0x00},
135 {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
136 0xff}
137 },
138 /* ExtHiCTable */
139 {
140 0x2f,
141 {0x01,0x0f,0x00,0x0e},
142 {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
143 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
144 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
145 0xff},
146 {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
147 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
148 0x01,0x00,0x00,0x00},
149 {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
150 0xff}
151 },
152 /* ExtTrueCTable */
153 {
154 0x2f,
155 {0x01,0x0f,0x00,0x0e},
156 {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
157 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
158 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
159 0xff},
160 {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
161 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
162 0x01,0x00,0x00,0x00},
163 {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
164 0xff}
165 },
166};
167
168static struct ast_vbios_enhtable res_640x480[] = {
169 { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
170 (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
171 { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
172 (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
173 { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
174 (SyncNN | Charx8Dot) , 75, 3, 0x2E },
175 { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
176 (SyncNN | Charx8Dot) , 85, 4, 0x2E },
177 { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
178 (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
179};
180
181static struct ast_vbios_enhtable res_800x600[] = {
182 {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
183 (SyncPP | Charx8Dot), 56, 1, 0x30 },
184 {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
185 (SyncPP | Charx8Dot), 60, 2, 0x30 },
186 {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
187 (SyncPP | Charx8Dot), 72, 3, 0x30 },
188 {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
189 (SyncPP | Charx8Dot), 75, 4, 0x30 },
190 {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
191 (SyncPP | Charx8Dot), 84, 5, 0x30 },
192 {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
193 (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
194};
195
196
197static struct ast_vbios_enhtable res_1024x768[] = {
198 {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
199 (SyncNN | Charx8Dot), 60, 1, 0x31 },
200 {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
201 (SyncNN | Charx8Dot), 70, 2, 0x31 },
202 {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
203 (SyncPP | Charx8Dot), 75, 3, 0x31 },
204 {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
205 (SyncPP | Charx8Dot), 84, 4, 0x31 },
206 {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
207 (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
208};
209
210static struct ast_vbios_enhtable res_1280x1024[] = {
211 {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
212 (SyncPP | Charx8Dot), 60, 1, 0x32 },
213 {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
214 (SyncPP | Charx8Dot), 75, 2, 0x32 },
215 {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
216 (SyncPP | Charx8Dot), 85, 3, 0x32 },
217 {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
218 (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
219};
220
221static struct ast_vbios_enhtable res_1600x1200[] = {
222 {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
223 (SyncPP | Charx8Dot), 60, 1, 0x33 },
224 {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
225 (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
226};
227
228static struct ast_vbios_enhtable res_1920x1200[] = {
229 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
230 (SyncNP | Charx8Dot), 60, 1, 0x34 },
231 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
232 (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
233};
234
235/* 16:10 */
236static struct ast_vbios_enhtable res_1280x800[] = {
237 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
238 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
239 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
240 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
241
242};
243
244static struct ast_vbios_enhtable res_1440x900[] = {
245 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
246 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
247 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
248 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
249};
250
251static struct ast_vbios_enhtable res_1680x1050[] = {
252 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
253 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
254 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
255 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
256};
257
258/* HDTV */
259static struct ast_vbios_enhtable res_1920x1080[] = {
260 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
261 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
262 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
263 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
264};
265#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 000000000000..6cf2adea66bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,453 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include "drmP.h"
29#include "ast_drv.h"
30#include <ttm/ttm_page_alloc.h>
31
32static inline struct ast_private *
33ast_bdev(struct ttm_bo_device *bd)
34{
35 return container_of(bd, struct ast_private, ttm.bdev);
36}
37
38static int
39ast_ttm_mem_global_init(struct drm_global_reference *ref)
40{
41 return ttm_mem_global_init(ref->object);
42}
43
44static void
45ast_ttm_mem_global_release(struct drm_global_reference *ref)
46{
47 ttm_mem_global_release(ref->object);
48}
49
50static int ast_ttm_global_init(struct ast_private *ast)
51{
52 struct drm_global_reference *global_ref;
53 int r;
54
55 global_ref = &ast->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &ast_ttm_mem_global_init;
59 global_ref->release = &ast_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
61 if (r != 0) {
62 DRM_ERROR("Failed setting up TTM memory accounting "
63 "subsystem.\n");
64 return r;
65 }
66
67 ast->ttm.bo_global_ref.mem_glob =
68 ast->ttm.mem_global_ref.object;
69 global_ref = &ast->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
75 if (r != 0) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&ast->ttm.mem_global_ref);
78 return r;
79 }
80 return 0;
81}
82
83void
84ast_ttm_global_release(struct ast_private *ast)
85{
86 if (ast->ttm.mem_global_ref.release == NULL)
87 return;
88
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&ast->ttm.mem_global_ref);
91 ast->ttm.mem_global_ref.release = NULL;
92}
93
94
95static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
96{
97 struct ast_bo *bo;
98
99 bo = container_of(tbo, struct ast_bo, bo);
100
101 drm_gem_object_release(&bo->gem);
102 kfree(bo);
103}
104
105bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
106{
107 if (bo->destroy == &ast_bo_ttm_destroy)
108 return true;
109 return false;
110}
111
112static int
113ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
115{
116 switch (type) {
117 case TTM_PL_SYSTEM:
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
121 break;
122 case TTM_PL_VRAM:
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
127 TTM_PL_FLAG_WC;
128 man->default_caching = TTM_PL_FLAG_WC;
129 break;
130 default:
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
132 return -EINVAL;
133 }
134 return 0;
135}
136
137static void
138ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
139{
140 struct ast_bo *astbo = ast_bo(bo);
141
142 if (!ast_ttm_bo_is_ast_bo(bo))
143 return;
144
145 ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
146 *pl = astbo->placement;
147}
148
149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{
151 return 0;
152}
153
154static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
155 struct ttm_mem_reg *mem)
156{
157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158 struct ast_private *ast = ast_bdev(bdev);
159
160 mem->bus.addr = NULL;
161 mem->bus.offset = 0;
162 mem->bus.size = mem->num_pages << PAGE_SHIFT;
163 mem->bus.base = 0;
164 mem->bus.is_iomem = false;
165 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
166 return -EINVAL;
167 switch (mem->mem_type) {
168 case TTM_PL_SYSTEM:
169 /* system memory */
170 return 0;
171 case TTM_PL_VRAM:
172 mem->bus.offset = mem->start << PAGE_SHIFT;
173 mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
174 mem->bus.is_iomem = true;
175 break;
176 default:
177 return -EINVAL;
178 break;
179 }
180 return 0;
181}
182
183static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
184{
185}
186
187static int ast_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem)
191{
192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
194 return r;
195}
196
197
198static void ast_ttm_backend_destroy(struct ttm_tt *tt)
199{
200 ttm_tt_fini(tt);
201 kfree(tt);
202}
203
204static struct ttm_backend_func ast_tt_backend_func = {
205 .destroy = &ast_ttm_backend_destroy,
206};
207
208
209struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
210 unsigned long size, uint32_t page_flags,
211 struct page *dummy_read_page)
212{
213 struct ttm_tt *tt;
214
215 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
216 if (tt == NULL)
217 return NULL;
218 tt->func = &ast_tt_backend_func;
219 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
220 kfree(tt);
221 return NULL;
222 }
223 return tt;
224}
225
226static int ast_ttm_tt_populate(struct ttm_tt *ttm)
227{
228 return ttm_pool_populate(ttm);
229}
230
231static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
232{
233 ttm_pool_unpopulate(ttm);
234}
235
236struct ttm_bo_driver ast_bo_driver = {
237 .ttm_tt_create = ast_ttm_tt_create,
238 .ttm_tt_populate = ast_ttm_tt_populate,
239 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
240 .init_mem_type = ast_bo_init_mem_type,
241 .evict_flags = ast_bo_evict_flags,
242 .move = ast_bo_move,
243 .verify_access = ast_bo_verify_access,
244 .io_mem_reserve = &ast_ttm_io_mem_reserve,
245 .io_mem_free = &ast_ttm_io_mem_free,
246};
247
248int ast_mm_init(struct ast_private *ast)
249{
250 int ret;
251 struct drm_device *dev = ast->dev;
252 struct ttm_bo_device *bdev = &ast->ttm.bdev;
253
254 ret = ast_ttm_global_init(ast);
255 if (ret)
256 return ret;
257
258 ret = ttm_bo_device_init(&ast->ttm.bdev,
259 ast->ttm.bo_global_ref.ref.object,
260 &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
261 true);
262 if (ret) {
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264 return ret;
265 }
266
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268 ast->vram_size >> PAGE_SHIFT);
269 if (ret) {
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
271 return ret;
272 }
273
274 ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
275 pci_resource_len(dev->pdev, 0),
276 DRM_MTRR_WC);
277
278 return 0;
279}
280
281void ast_mm_fini(struct ast_private *ast)
282{
283 struct drm_device *dev = ast->dev;
284 ttm_bo_device_release(&ast->ttm.bdev);
285
286 ast_ttm_global_release(ast);
287
288 if (ast->fb_mtrr >= 0) {
289 drm_mtrr_del(ast->fb_mtrr,
290 pci_resource_start(dev->pdev, 0),
291 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
292 ast->fb_mtrr = -1;
293 }
294}
295
296void ast_ttm_placement(struct ast_bo *bo, int domain)
297{
298 u32 c = 0;
299 bo->placement.fpfn = 0;
300 bo->placement.lpfn = 0;
301 bo->placement.placement = bo->placements;
302 bo->placement.busy_placement = bo->placements;
303 if (domain & TTM_PL_FLAG_VRAM)
304 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
305 if (domain & TTM_PL_FLAG_SYSTEM)
306 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
307 if (!c)
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
309 bo->placement.num_placement = c;
310 bo->placement.num_busy_placement = c;
311}
312
313int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
314{
315 int ret;
316
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
318 if (ret) {
319 if (ret != -ERESTARTSYS)
320 DRM_ERROR("reserve failed %p\n", bo);
321 return ret;
322 }
323 return 0;
324}
325
326void ast_bo_unreserve(struct ast_bo *bo)
327{
328 ttm_bo_unreserve(&bo->bo);
329}
330
331int ast_bo_create(struct drm_device *dev, int size, int align,
332 uint32_t flags, struct ast_bo **pastbo)
333{
334 struct ast_private *ast = dev->dev_private;
335 struct ast_bo *astbo;
336 size_t acc_size;
337 int ret;
338
339 astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
340 if (!astbo)
341 return -ENOMEM;
342
343 ret = drm_gem_object_init(dev, &astbo->gem, size);
344 if (ret) {
345 kfree(astbo);
346 return ret;
347 }
348
349 astbo->gem.driver_private = NULL;
350 astbo->bo.bdev = &ast->ttm.bdev;
351
352 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
353
354 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
355 sizeof(struct ast_bo));
356
357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
358 ttm_bo_type_device, &astbo->placement,
359 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
360 NULL, ast_bo_ttm_destroy);
361 if (ret)
362 return ret;
363
364 *pastbo = astbo;
365 return 0;
366}
367
368static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
369{
370 return bo->bo.offset;
371}
372
373int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
374{
375 int i, ret;
376
377 if (bo->pin_count) {
378 bo->pin_count++;
379 if (gpu_addr)
380 *gpu_addr = ast_bo_gpu_offset(bo);
381 }
382
383 ast_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
387 if (ret)
388 return ret;
389
390 bo->pin_count = 1;
391 if (gpu_addr)
392 *gpu_addr = ast_bo_gpu_offset(bo);
393 return 0;
394}
395
396int ast_bo_unpin(struct ast_bo *bo)
397{
398 int i, ret;
399 if (!bo->pin_count) {
400 DRM_ERROR("unpin bad %p\n", bo);
401 return 0;
402 }
403 bo->pin_count--;
404 if (bo->pin_count)
405 return 0;
406
407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
410 if (ret)
411 return ret;
412
413 return 0;
414}
415
416int ast_bo_push_sysram(struct ast_bo *bo)
417{
418 int i, ret;
419 if (!bo->pin_count) {
420 DRM_ERROR("unpin bad %p\n", bo);
421 return 0;
422 }
423 bo->pin_count--;
424 if (bo->pin_count)
425 return 0;
426
427 if (bo->kmap.virtual)
428 ttm_bo_kunmap(&bo->kmap);
429
430 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
433
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
435 if (ret) {
436 DRM_ERROR("pushing to VRAM failed\n");
437 return ret;
438 }
439 return 0;
440}
441
442int ast_mmap(struct file *filp, struct vm_area_struct *vma)
443{
444 struct drm_file *file_priv;
445 struct ast_private *ast;
446
447 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
448 return drm_mmap(filp, vma);
449
450 file_priv = filp->private_data;
451 ast = file_priv->minor->dev->dev_private;
452 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
453}
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 000000000000..fc154dd75296
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
1config DRM_CIRRUS_QEMU
2 tristate "Cirrus driver for QEMU emulated device"
3 depends on DRM && PCI && EXPERIMENTAL
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 This is a KMS driver for emulated cirrus device in qemu.
11 It is *NOT* intended for real cirrus devices. This requires
12 the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 000000000000..69ffe7006d55
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
1ccflags-y := -Iinclude/drm
2cirrus-y := cirrus_main.o cirrus_mode.o \
3 cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
4
5obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 000000000000..d7038230b71e
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2012 Red Hat <mjg@redhat.com>
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include <linux/module.h>
12#include <linux/console.h>
13#include "drmP.h"
14#include "drm.h"
15
16#include "cirrus_drv.h"
17
18int cirrus_modeset = -1;
19
20MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
21module_param_named(modeset, cirrus_modeset, int, 0400);
22
23/*
24 * This is the generic driver code. This binds the driver to the drm core,
25 * which then performs further device association and calls our graphics init
26 * functions
27 */
28
29static struct drm_driver driver;
30
31/* only bind to the cirrus chip in qemu */
32static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
33 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
34 0, 0 },
35 {0,}
36};
37
38static int __devinit
39cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40{
41 return drm_get_pci_dev(pdev, ent, &driver);
42}
43
44static void cirrus_pci_remove(struct pci_dev *pdev)
45{
46 struct drm_device *dev = pci_get_drvdata(pdev);
47
48 drm_put_dev(dev);
49}
50
51static const struct file_operations cirrus_driver_fops = {
52 .owner = THIS_MODULE,
53 .open = drm_open,
54 .release = drm_release,
55 .unlocked_ioctl = drm_ioctl,
56 .mmap = cirrus_mmap,
57 .poll = drm_poll,
58 .fasync = drm_fasync,
59};
60static struct drm_driver driver = {
61 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
62 .load = cirrus_driver_load,
63 .unload = cirrus_driver_unload,
64 .fops = &cirrus_driver_fops,
65 .name = DRIVER_NAME,
66 .desc = DRIVER_DESC,
67 .date = DRIVER_DATE,
68 .major = DRIVER_MAJOR,
69 .minor = DRIVER_MINOR,
70 .patchlevel = DRIVER_PATCHLEVEL,
71 .gem_init_object = cirrus_gem_init_object,
72 .gem_free_object = cirrus_gem_free_object,
73 .dumb_create = cirrus_dumb_create,
74 .dumb_map_offset = cirrus_dumb_mmap_offset,
75 .dumb_destroy = cirrus_dumb_destroy,
76};
77
78static struct pci_driver cirrus_pci_driver = {
79 .name = DRIVER_NAME,
80 .id_table = pciidlist,
81 .probe = cirrus_pci_probe,
82 .remove = cirrus_pci_remove,
83};
84
85static int __init cirrus_init(void)
86{
87#ifdef CONFIG_VGA_CONSOLE
88 if (vgacon_text_force() && cirrus_modeset == -1)
89 return -EINVAL;
90#endif
91
92 if (cirrus_modeset == 0)
93 return -EINVAL;
94 return drm_pci_init(&driver, &cirrus_pci_driver);
95}
96
97static void __exit cirrus_exit(void)
98{
99 drm_pci_exit(&driver, &cirrus_pci_driver);
100}
101
102module_init(cirrus_init);
103module_exit(cirrus_exit);
104
105MODULE_DEVICE_TABLE(pci, pciidlist);
106MODULE_AUTHOR(DRIVER_AUTHOR);
107MODULE_DESCRIPTION(DRIVER_DESC);
108MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 000000000000..21bdfa8836f7
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#ifndef __CIRRUS_DRV_H__
12#define __CIRRUS_DRV_H__
13
14#include <video/vga.h>
15
16#include <drm/drm_fb_helper.h>
17
18#include "ttm/ttm_bo_api.h"
19#include "ttm/ttm_bo_driver.h"
20#include "ttm/ttm_placement.h"
21#include "ttm/ttm_memory.h"
22#include "ttm/ttm_module.h"
23
24#define DRIVER_AUTHOR "Matthew Garrett"
25
26#define DRIVER_NAME "cirrus"
27#define DRIVER_DESC "qemu Cirrus emulation"
28#define DRIVER_DATE "20110418"
29
30#define DRIVER_MAJOR 1
31#define DRIVER_MINOR 0
32#define DRIVER_PATCHLEVEL 0
33
34#define CIRRUSFB_CONN_LIMIT 1
35
36#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
37#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
38#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
39#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
40
41#define SEQ_INDEX 4
42#define SEQ_DATA 5
43
44#define WREG_SEQ(reg, v) \
45 do { \
46 WREG8(SEQ_INDEX, reg); \
47 WREG8(SEQ_DATA, v); \
48 } while (0) \
49
50#define CRT_INDEX 0x14
51#define CRT_DATA 0x15
52
53#define WREG_CRT(reg, v) \
54 do { \
55 WREG8(CRT_INDEX, reg); \
56 WREG8(CRT_DATA, v); \
57 } while (0) \
58
59#define GFX_INDEX 0xe
60#define GFX_DATA 0xf
61
62#define WREG_GFX(reg, v) \
63 do { \
64 WREG8(GFX_INDEX, reg); \
65 WREG8(GFX_DATA, v); \
66 } while (0) \
67
68/*
69 * Cirrus has a "hidden" DAC register that can be accessed by writing to
70 * the pixel mask register to reset the state, then reading from the register
71 * four times. The next write will then pass to the DAC
72 */
73#define VGA_DAC_MASK 0x6
74
75#define WREG_HDR(v) \
76 do { \
77 RREG8(VGA_DAC_MASK); \
78 RREG8(VGA_DAC_MASK); \
79 RREG8(VGA_DAC_MASK); \
80 RREG8(VGA_DAC_MASK); \
81 WREG8(VGA_DAC_MASK, v); \
82 } while (0) \
83
84
85#define CIRRUS_MAX_FB_HEIGHT 4096
86#define CIRRUS_MAX_FB_WIDTH 4096
87
88#define CIRRUS_DPMS_CLEARED (-1)
89
90#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
91#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
92#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
93
94struct cirrus_crtc {
95 struct drm_crtc base;
96 u8 lut_r[256], lut_g[256], lut_b[256];
97 int last_dpms;
98 bool enabled;
99};
100
101struct cirrus_fbdev;
102struct cirrus_mode_info {
103 bool mode_config_initialized;
104 struct cirrus_crtc *crtc;
105 /* pointer to fbdev info structure */
106 struct cirrus_fbdev *gfbdev;
107};
108
109struct cirrus_encoder {
110 struct drm_encoder base;
111 int last_dpms;
112};
113
114struct cirrus_connector {
115 struct drm_connector base;
116};
117
118struct cirrus_framebuffer {
119 struct drm_framebuffer base;
120 struct drm_gem_object *obj;
121};
122
123struct cirrus_mc {
124 resource_size_t vram_size;
125 resource_size_t vram_base;
126};
127
128struct cirrus_device {
129 struct drm_device *dev;
130 unsigned long flags;
131
132 resource_size_t rmmio_base;
133 resource_size_t rmmio_size;
134 void __iomem *rmmio;
135
136 struct cirrus_mc mc;
137 struct cirrus_mode_info mode_info;
138
139 int num_crtc;
140 int fb_mtrr;
141
142 struct {
143 struct drm_global_reference mem_global_ref;
144 struct ttm_bo_global_ref bo_global_ref;
145 struct ttm_bo_device bdev;
146 atomic_t validate_sequence;
147 } ttm;
148
149};
150
151
152struct cirrus_fbdev {
153 struct drm_fb_helper helper;
154 struct cirrus_framebuffer gfb;
155 struct list_head fbdev_list;
156 void *sysram;
157 int size;
158};
159
160struct cirrus_bo {
161 struct ttm_buffer_object bo;
162 struct ttm_placement placement;
163 struct ttm_bo_kmap_obj kmap;
164 struct drm_gem_object gem;
165 u32 placements[3];
166 int pin_count;
167};
168#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
169
170static inline struct cirrus_bo *
171cirrus_bo(struct ttm_buffer_object *bo)
172{
173 return container_of(bo, struct cirrus_bo, bo);
174}
175
176
177#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
178#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
179
180 /* cirrus_mode.c */
181void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
182 u16 blue, int regno);
183void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
184 u16 *blue, int regno);
185
186
187 /* cirrus_main.c */
188int cirrus_device_init(struct cirrus_device *cdev,
189 struct drm_device *ddev,
190 struct pci_dev *pdev,
191 uint32_t flags);
192void cirrus_device_fini(struct cirrus_device *cdev);
193int cirrus_gem_init_object(struct drm_gem_object *obj);
194void cirrus_gem_free_object(struct drm_gem_object *obj);
195int cirrus_dumb_mmap_offset(struct drm_file *file,
196 struct drm_device *dev,
197 uint32_t handle,
198 uint64_t *offset);
199int cirrus_gem_create(struct drm_device *dev,
200 u32 size, bool iskernel,
201 struct drm_gem_object **obj);
202int cirrus_dumb_create(struct drm_file *file,
203 struct drm_device *dev,
204 struct drm_mode_create_dumb *args);
205int cirrus_dumb_destroy(struct drm_file *file,
206 struct drm_device *dev,
207 uint32_t handle);
208
209int cirrus_framebuffer_init(struct drm_device *dev,
210 struct cirrus_framebuffer *gfb,
211 struct drm_mode_fb_cmd2 *mode_cmd,
212 struct drm_gem_object *obj);
213
214 /* cirrus_display.c */
215int cirrus_modeset_init(struct cirrus_device *cdev);
216void cirrus_modeset_fini(struct cirrus_device *cdev);
217
218 /* cirrus_fbdev.c */
219int cirrus_fbdev_init(struct cirrus_device *cdev);
220void cirrus_fbdev_fini(struct cirrus_device *cdev);
221
222
223
224 /* cirrus_irq.c */
225void cirrus_driver_irq_preinstall(struct drm_device *dev);
226int cirrus_driver_irq_postinstall(struct drm_device *dev);
227void cirrus_driver_irq_uninstall(struct drm_device *dev);
228irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
229
230 /* cirrus_kms.c */
231int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
232int cirrus_driver_unload(struct drm_device *dev);
233extern struct drm_ioctl_desc cirrus_ioctls[];
234extern int cirrus_max_ioctl;
235
236int cirrus_mm_init(struct cirrus_device *cirrus);
237void cirrus_mm_fini(struct cirrus_device *cirrus);
238void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
239int cirrus_bo_create(struct drm_device *dev, int size, int align,
240 uint32_t flags, struct cirrus_bo **pcirrusbo);
241int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
242int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
243void cirrus_bo_unreserve(struct cirrus_bo *bo);
244int cirrus_bo_push_sysram(struct cirrus_bo *bo);
245int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
246#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 000000000000..9a276a536992
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,307 @@
1/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include <linux/module.h>
12#include "drmP.h"
13#include "drm.h"
14#include "drm_fb_helper.h"
15
16#include <linux/fb.h>
17
18#include "cirrus_drv.h"
19
20static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
21 int x, int y, int width, int height)
22{
23 int i;
24 struct drm_gem_object *obj;
25 struct cirrus_bo *bo;
26 int src_offset, dst_offset;
27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
28 int ret;
29 bool unmap = false;
30
31 obj = afbdev->gfb.obj;
32 bo = gem_to_cirrus_bo(obj);
33
34 ret = cirrus_bo_reserve(bo, true);
35 if (ret) {
36 DRM_ERROR("failed to reserve fb bo\n");
37 return;
38 }
39
40 if (!bo->kmap.virtual) {
41 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
42 if (ret) {
43 DRM_ERROR("failed to kmap fb updates\n");
44 cirrus_bo_unreserve(bo);
45 return;
46 }
47 unmap = true;
48 }
49 for (i = y; i < y + height; i++) {
50 /* assume equal stride for now */
51 src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
52 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
53
54 }
55 if (unmap)
56 ttm_bo_kunmap(&bo->kmap);
57
58 cirrus_bo_unreserve(bo);
59}
60
61static void cirrus_fillrect(struct fb_info *info,
62 const struct fb_fillrect *rect)
63{
64 struct cirrus_fbdev *afbdev = info->par;
65 sys_fillrect(info, rect);
66 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
67 rect->height);
68}
69
70static void cirrus_copyarea(struct fb_info *info,
71 const struct fb_copyarea *area)
72{
73 struct cirrus_fbdev *afbdev = info->par;
74 sys_copyarea(info, area);
75 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
76 area->height);
77}
78
79static void cirrus_imageblit(struct fb_info *info,
80 const struct fb_image *image)
81{
82 struct cirrus_fbdev *afbdev = info->par;
83 sys_imageblit(info, image);
84 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
85 image->height);
86}
87
88
89static struct fb_ops cirrusfb_ops = {
90 .owner = THIS_MODULE,
91 .fb_check_var = drm_fb_helper_check_var,
92 .fb_set_par = drm_fb_helper_set_par,
93 .fb_fillrect = cirrus_fillrect,
94 .fb_copyarea = cirrus_copyarea,
95 .fb_imageblit = cirrus_imageblit,
96 .fb_pan_display = drm_fb_helper_pan_display,
97 .fb_blank = drm_fb_helper_blank,
98 .fb_setcmap = drm_fb_helper_setcmap,
99};
100
101static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
102 struct drm_mode_fb_cmd2 *mode_cmd,
103 struct drm_gem_object **gobj_p)
104{
105 struct drm_device *dev = afbdev->helper.dev;
106 u32 bpp, depth;
107 u32 size;
108 struct drm_gem_object *gobj;
109
110 int ret = 0;
111 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
112
113 if (bpp > 24)
114 return -EINVAL;
115 size = mode_cmd->pitches[0] * mode_cmd->height;
116 ret = cirrus_gem_create(dev, size, true, &gobj);
117 if (ret)
118 return ret;
119
120 *gobj_p = gobj;
121 return ret;
122}
123
124static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
125 struct drm_fb_helper_surface_size *sizes)
126{
127 struct drm_device *dev = gfbdev->helper.dev;
128 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
129 struct fb_info *info;
130 struct drm_framebuffer *fb;
131 struct drm_mode_fb_cmd2 mode_cmd;
132 struct device *device = &dev->pdev->dev;
133 void *sysram;
134 struct drm_gem_object *gobj = NULL;
135 struct cirrus_bo *bo = NULL;
136 int size, ret;
137
138 mode_cmd.width = sizes->surface_width;
139 mode_cmd.height = sizes->surface_height;
140 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
141 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
142 sizes->surface_depth);
143 size = mode_cmd.pitches[0] * mode_cmd.height;
144
145 ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
146 if (ret) {
147 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
148 return ret;
149 }
150
151 bo = gem_to_cirrus_bo(gobj);
152
153 sysram = vmalloc(size);
154 if (!sysram)
155 return -ENOMEM;
156
157 info = framebuffer_alloc(0, device);
158 if (info == NULL)
159 return -ENOMEM;
160
161 info->par = gfbdev;
162
163 ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
164 if (ret)
165 return ret;
166
167 gfbdev->sysram = sysram;
168 gfbdev->size = size;
169
170 fb = &gfbdev->gfb.base;
171 if (!fb) {
172 DRM_INFO("fb is NULL\n");
173 return -EINVAL;
174 }
175
176 /* setup helper */
177 gfbdev->helper.fb = fb;
178 gfbdev->helper.fbdev = info;
179
180 strcpy(info->fix.id, "cirrusdrmfb");
181
182
183 info->flags = FBINFO_DEFAULT;
184 info->fbops = &cirrusfb_ops;
185
186 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
187 drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
188 sizes->fb_height);
189
190 /* setup aperture base/size for vesafb takeover */
191 info->apertures = alloc_apertures(1);
192 if (!info->apertures) {
193 ret = -ENOMEM;
194 goto out_iounmap;
195 }
196 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
197 info->apertures->ranges[0].size = cdev->mc.vram_size;
198
199 info->screen_base = sysram;
200 info->screen_size = size;
201
202 info->fix.mmio_start = 0;
203 info->fix.mmio_len = 0;
204
205 ret = fb_alloc_cmap(&info->cmap, 256, 0);
206 if (ret) {
207 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
208 ret = -ENOMEM;
209 goto out_iounmap;
210 }
211
212 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
213 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
214 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
215 DRM_INFO("fb depth is %d\n", fb->depth);
216 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
217
218 return 0;
219out_iounmap:
220 return ret;
221}
222
223static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
224 struct drm_fb_helper_surface_size
225 *sizes)
226{
227 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
228 int new_fb = 0;
229 int ret;
230
231 if (!helper->fb) {
232 ret = cirrusfb_create(gfbdev, sizes);
233 if (ret)
234 return ret;
235 new_fb = 1;
236 }
237 return new_fb;
238}
239
240static int cirrus_fbdev_destroy(struct drm_device *dev,
241 struct cirrus_fbdev *gfbdev)
242{
243 struct fb_info *info;
244 struct cirrus_framebuffer *gfb = &gfbdev->gfb;
245
246 if (gfbdev->helper.fbdev) {
247 info = gfbdev->helper.fbdev;
248
249 unregister_framebuffer(info);
250 if (info->cmap.len)
251 fb_dealloc_cmap(&info->cmap);
252 framebuffer_release(info);
253 }
254
255 if (gfb->obj) {
256 drm_gem_object_unreference_unlocked(gfb->obj);
257 gfb->obj = NULL;
258 }
259
260 vfree(gfbdev->sysram);
261 drm_fb_helper_fini(&gfbdev->helper);
262 drm_framebuffer_cleanup(&gfb->base);
263
264 return 0;
265}
266
267static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
268 .gamma_set = cirrus_crtc_fb_gamma_set,
269 .gamma_get = cirrus_crtc_fb_gamma_get,
270 .fb_probe = cirrus_fb_find_or_create_single,
271};
272
273int cirrus_fbdev_init(struct cirrus_device *cdev)
274{
275 struct cirrus_fbdev *gfbdev;
276 int ret;
277 int bpp_sel = 24;
278
279 /*bpp_sel = 8;*/
280 gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
281 if (!gfbdev)
282 return -ENOMEM;
283
284 cdev->mode_info.gfbdev = gfbdev;
285 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
286
287 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
288 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
289 if (ret) {
290 kfree(gfbdev);
291 return ret;
292 }
293 drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
294 drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
295
296 return 0;
297}
298
299void cirrus_fbdev_fini(struct cirrus_device *cdev)
300{
301 if (!cdev->mode_info.gfbdev)
302 return;
303
304 cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
305 kfree(cdev->mode_info.gfbdev);
306 cdev->mode_info.gfbdev = NULL;
307}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 000000000000..e3c122578417
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include "drmP.h"
12#include "drm.h"
13#include "drm_crtc_helper.h"
14
15#include "cirrus_drv.h"
16
17
18static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
19{
20 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
21 if (cirrus_fb->obj)
22 drm_gem_object_unreference_unlocked(cirrus_fb->obj);
23 drm_framebuffer_cleanup(fb);
24 kfree(fb);
25}
26
27static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
28 struct drm_file *file_priv,
29 unsigned int *handle)
30{
31 return 0;
32}
33
34static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
35 .destroy = cirrus_user_framebuffer_destroy,
36 .create_handle = cirrus_user_framebuffer_create_handle,
37};
38
39int cirrus_framebuffer_init(struct drm_device *dev,
40 struct cirrus_framebuffer *gfb,
41 struct drm_mode_fb_cmd2 *mode_cmd,
42 struct drm_gem_object *obj)
43{
44 int ret;
45
46 ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
47 if (ret) {
48 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
49 return ret;
50 }
51 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
52 gfb->obj = obj;
53 return 0;
54}
55
56static struct drm_framebuffer *
57cirrus_user_framebuffer_create(struct drm_device *dev,
58 struct drm_file *filp,
59 struct drm_mode_fb_cmd2 *mode_cmd)
60{
61 struct drm_gem_object *obj;
62 struct cirrus_framebuffer *cirrus_fb;
63 int ret;
64 u32 bpp, depth;
65
66 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
67 /* cirrus can't handle > 24bpp framebuffers at all */
68 if (bpp > 24)
69 return ERR_PTR(-EINVAL);
70
71 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
72 if (obj == NULL)
73 return ERR_PTR(-ENOENT);
74
75 cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
76 if (!cirrus_fb) {
77 drm_gem_object_unreference_unlocked(obj);
78 return ERR_PTR(-ENOMEM);
79 }
80
81 ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
82 if (ret) {
83 drm_gem_object_unreference_unlocked(obj);
84 kfree(cirrus_fb);
85 return ERR_PTR(ret);
86 }
87 return &cirrus_fb->base;
88}
89
90static const struct drm_mode_config_funcs cirrus_mode_funcs = {
91 .fb_create = cirrus_user_framebuffer_create,
92};
93
94/* Unmap the framebuffer from the core and release the memory */
95static void cirrus_vram_fini(struct cirrus_device *cdev)
96{
97 iounmap(cdev->rmmio);
98 cdev->rmmio = NULL;
99 if (cdev->mc.vram_base)
100 release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
101}
102
103/* Map the framebuffer from the card and configure the core */
104static int cirrus_vram_init(struct cirrus_device *cdev)
105{
106 /* BAR 0 is VRAM */
107 cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
108 /* We have 4MB of VRAM */
109 cdev->mc.vram_size = 4 * 1024 * 1024;
110
111 if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
112 "cirrusdrmfb_vram")) {
113 DRM_ERROR("can't reserve VRAM\n");
114 return -ENXIO;
115 }
116
117 return 0;
118}
119
120/*
121 * Our emulated hardware has two sets of memory. One is video RAM and can
122 * simply be used as a linear framebuffer - the other provides mmio access
123 * to the display registers. The latter can also be accessed via IO port
124 * access, but we map the range and use mmio to program them instead
125 */
126
127int cirrus_device_init(struct cirrus_device *cdev,
128 struct drm_device *ddev,
129 struct pci_dev *pdev, uint32_t flags)
130{
131 int ret;
132
133 cdev->dev = ddev;
134 cdev->flags = flags;
135
136 /* Hardcode the number of CRTCs to 1 */
137 cdev->num_crtc = 1;
138
139 /* BAR 0 is the framebuffer, BAR 1 contains registers */
140 cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
141 cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
142
143 if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
144 "cirrusdrmfb_mmio")) {
145 DRM_ERROR("can't reserve mmio registers\n");
146 return -ENOMEM;
147 }
148
149 cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
150
151 if (cdev->rmmio == NULL)
152 return -ENOMEM;
153
154 ret = cirrus_vram_init(cdev);
155 if (ret) {
156 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
157 return ret;
158 }
159
160 return 0;
161}
162
163void cirrus_device_fini(struct cirrus_device *cdev)
164{
165 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
166 cirrus_vram_fini(cdev);
167}
168
169/*
170 * Functions here will be called by the core once it's bound the driver to
171 * a PCI device
172 */
173
174int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
175{
176 struct cirrus_device *cdev;
177 int r;
178
179 cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
180 if (cdev == NULL)
181 return -ENOMEM;
182 dev->dev_private = (void *)cdev;
183
184 r = cirrus_device_init(cdev, dev, dev->pdev, flags);
185 if (r) {
186 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
187 goto out;
188 }
189
190 r = cirrus_mm_init(cdev);
191 if (r)
192 dev_err(&dev->pdev->dev, "fatal err on mm init\n");
193
194 r = cirrus_modeset_init(cdev);
195 if (r)
196 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
197
198 dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
199out:
200 if (r)
201 cirrus_driver_unload(dev);
202 return r;
203}
204
205int cirrus_driver_unload(struct drm_device *dev)
206{
207 struct cirrus_device *cdev = dev->dev_private;
208
209 if (cdev == NULL)
210 return 0;
211 cirrus_modeset_fini(cdev);
212 cirrus_mm_fini(cdev);
213 cirrus_device_fini(cdev);
214 kfree(cdev);
215 dev->dev_private = NULL;
216 return 0;
217}
218
219int cirrus_gem_create(struct drm_device *dev,
220 u32 size, bool iskernel,
221 struct drm_gem_object **obj)
222{
223 struct cirrus_bo *cirrusbo;
224 int ret;
225
226 *obj = NULL;
227
228 size = roundup(size, PAGE_SIZE);
229 if (size == 0)
230 return -EINVAL;
231
232 ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
233 if (ret) {
234 if (ret != -ERESTARTSYS)
235 DRM_ERROR("failed to allocate GEM object\n");
236 return ret;
237 }
238 *obj = &cirrusbo->gem;
239 return 0;
240}
241
242int cirrus_dumb_create(struct drm_file *file,
243 struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
245{
246 int ret;
247 struct drm_gem_object *gobj;
248 u32 handle;
249
250 args->pitch = args->width * ((args->bpp + 7) / 8);
251 args->size = args->pitch * args->height;
252
253 ret = cirrus_gem_create(dev, args->size, false,
254 &gobj);
255 if (ret)
256 return ret;
257
258 ret = drm_gem_handle_create(file, gobj, &handle);
259 drm_gem_object_unreference_unlocked(gobj);
260 if (ret)
261 return ret;
262
263 args->handle = handle;
264 return 0;
265}
266
267int cirrus_dumb_destroy(struct drm_file *file,
268 struct drm_device *dev,
269 uint32_t handle)
270{
271 return drm_gem_handle_delete(file, handle);
272}
273
274int cirrus_gem_init_object(struct drm_gem_object *obj)
275{
276 BUG();
277 return 0;
278}
279
280void cirrus_bo_unref(struct cirrus_bo **bo)
281{
282 struct ttm_buffer_object *tbo;
283
284 if ((*bo) == NULL)
285 return;
286
287 tbo = &((*bo)->bo);
288 ttm_bo_unref(&tbo);
289 if (tbo == NULL)
290 *bo = NULL;
291
292}
293
294void cirrus_gem_free_object(struct drm_gem_object *obj)
295{
296 struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
297
298 if (!cirrus_bo)
299 return;
300 cirrus_bo_unref(&cirrus_bo);
301}
302
303
304static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
305{
306 return bo->bo.addr_space_offset;
307}
308
309int
310cirrus_dumb_mmap_offset(struct drm_file *file,
311 struct drm_device *dev,
312 uint32_t handle,
313 uint64_t *offset)
314{
315 struct drm_gem_object *obj;
316 int ret;
317 struct cirrus_bo *bo;
318
319 mutex_lock(&dev->struct_mutex);
320 obj = drm_gem_object_lookup(dev, file, handle);
321 if (obj == NULL) {
322 ret = -ENOENT;
323 goto out_unlock;
324 }
325
326 bo = gem_to_cirrus_bo(obj);
327 *offset = cirrus_bo_mmap_offset(bo);
328
329 drm_gem_object_unreference(obj);
330 ret = 0;
331out_unlock:
332 mutex_unlock(&dev->struct_mutex);
333 return ret;
334
335}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 000000000000..100f6308c509
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,629 @@
1
2/*
3 * Copyright 2012 Red Hat
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
8 *
9 * Authors: Matthew Garrett
10 * Dave Airlie
11 *
12 * Portions of this code derived from cirrusfb.c:
13 * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
14 *
15 * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
16 */
17#include "drmP.h"
18#include "drm.h"
19#include "drm_crtc_helper.h"
20
21#include <video/cirrus.h>
22
23#include "cirrus_drv.h"
24
25#define CIRRUS_LUT_SIZE 256
26
27#define PALETTE_INDEX 0x8
28#define PALETTE_DATA 0x9
29
30/*
31 * This file contains setup code for the CRTC.
32 */
33
34static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
35{
36 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
37 struct drm_device *dev = crtc->dev;
38 struct cirrus_device *cdev = dev->dev_private;
39 int i;
40
41 if (!crtc->enabled)
42 return;
43
44 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
45 /* VGA registers */
46 WREG8(PALETTE_INDEX, i);
47 WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
48 WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
49 WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
50 }
51}
52
53/*
54 * The DRM core requires DPMS functions, but they make little sense in our
55 * case and so are just stubs
56 */
57
58static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
59{
60 struct drm_device *dev = crtc->dev;
61 struct cirrus_device *cdev = dev->dev_private;
62 u8 sr01, gr0e;
63
64 switch (mode) {
65 case DRM_MODE_DPMS_ON:
66 sr01 = 0x00;
67 gr0e = 0x00;
68 break;
69 case DRM_MODE_DPMS_STANDBY:
70 sr01 = 0x20;
71 gr0e = 0x02;
72 break;
73 case DRM_MODE_DPMS_SUSPEND:
74 sr01 = 0x20;
75 gr0e = 0x04;
76 break;
77 case DRM_MODE_DPMS_OFF:
78 sr01 = 0x20;
79 gr0e = 0x06;
80 break;
81 default:
82 return;
83 }
84
85 WREG8(SEQ_INDEX, 0x1);
86 sr01 |= RREG8(SEQ_DATA) & ~0x20;
87 WREG_SEQ(0x1, sr01);
88
89 WREG8(GFX_INDEX, 0xe);
90 gr0e |= RREG8(GFX_DATA) & ~0x06;
91 WREG_GFX(0xe, gr0e);
92}
93
94/*
95 * The core passes the desired mode to the CRTC code to see whether any
96 * CRTC-specific modifications need to be made to it. We're in a position
97 * to just pass that straight through, so this does nothing
98 */
99static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
100 struct drm_display_mode *mode,
101 struct drm_display_mode *adjusted_mode)
102{
103 return true;
104}
105
106void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
107{
108 struct cirrus_device *cdev = crtc->dev->dev_private;
109 u32 addr;
110 u8 tmp;
111
112 addr = offset >> 2;
113 WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
114 WREG_CRT(0x0d, (u8)(addr & 0xff));
115
116 WREG8(CRT_INDEX, 0x1b);
117 tmp = RREG8(CRT_DATA);
118 tmp &= 0xf2;
119 tmp |= (addr >> 16) & 0x01;
120 tmp |= (addr >> 15) & 0x0c;
121 WREG_CRT(0x1b, tmp);
122 WREG8(CRT_INDEX, 0x1d);
123 tmp = RREG8(CRT_DATA);
124 tmp &= 0x7f;
125 tmp |= (addr >> 12) & 0x80;
126 WREG_CRT(0x1d, tmp);
127}
128
129/* cirrus is different - we will force move buffers out of VRAM */
130static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
131 struct drm_framebuffer *fb,
132 int x, int y, int atomic)
133{
134 struct cirrus_device *cdev = crtc->dev->dev_private;
135 struct drm_gem_object *obj;
136 struct cirrus_framebuffer *cirrus_fb;
137 struct cirrus_bo *bo;
138 int ret;
139 u64 gpu_addr;
140
141 /* push the previous fb to system ram */
142 if (!atomic && fb) {
143 cirrus_fb = to_cirrus_framebuffer(fb);
144 obj = cirrus_fb->obj;
145 bo = gem_to_cirrus_bo(obj);
146 ret = cirrus_bo_reserve(bo, false);
147 if (ret)
148 return ret;
149 cirrus_bo_push_sysram(bo);
150 cirrus_bo_unreserve(bo);
151 }
152
153 cirrus_fb = to_cirrus_framebuffer(crtc->fb);
154 obj = cirrus_fb->obj;
155 bo = gem_to_cirrus_bo(obj);
156
157 ret = cirrus_bo_reserve(bo, false);
158 if (ret)
159 return ret;
160
161 ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
162 if (ret) {
163 cirrus_bo_unreserve(bo);
164 return ret;
165 }
166
167 if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
168 /* if pushing console in kmap it */
169 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
170 if (ret)
171 DRM_ERROR("failed to kmap fbcon\n");
172 }
173 cirrus_bo_unreserve(bo);
174
175 cirrus_set_start_address(crtc, (u32)gpu_addr);
176 return 0;
177}
178
179static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
180 struct drm_framebuffer *old_fb)
181{
182 return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
183}
184
185/*
186 * The meat of this driver. The core passes us a mode and we have to program
187 * it. The modesetting here is the bare minimum required to satisfy the qemu
188 * emulation of this hardware, and running this against a real device is
189 * likely to result in an inadequately programmed mode. We've already had
190 * the opportunity to modify the mode, so whatever we receive here should
191 * be something that can be correctly programmed and displayed
192 */
193static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
194 struct drm_display_mode *mode,
195 struct drm_display_mode *adjusted_mode,
196 int x, int y, struct drm_framebuffer *old_fb)
197{
198 struct drm_device *dev = crtc->dev;
199 struct cirrus_device *cdev = dev->dev_private;
200 int hsyncstart, hsyncend, htotal, hdispend;
201 int vtotal, vdispend;
202 int tmp;
203 int sr07 = 0, hdr = 0;
204
205 htotal = mode->htotal / 8;
206 hsyncend = mode->hsync_end / 8;
207 hsyncstart = mode->hsync_start / 8;
208 hdispend = mode->hdisplay / 8;
209
210 vtotal = mode->vtotal;
211 vdispend = mode->vdisplay;
212
213 vdispend -= 1;
214 vtotal -= 2;
215
216 htotal -= 5;
217 hdispend -= 1;
218 hsyncstart += 1;
219 hsyncend += 1;
220
221 WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
222 WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
223 WREG_CRT(VGA_CRTC_H_DISP, hdispend);
224 WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
225 WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
226 WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
227 WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
228
229 tmp = 0x40;
230 if ((vdispend + 1) & 512)
231 tmp |= 0x20;
232 WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
233
234 /*
235 * Overflow bits for values that don't fit in the standard registers
236 */
237 tmp = 16;
238 if (vtotal & 256)
239 tmp |= 1;
240 if (vdispend & 256)
241 tmp |= 2;
242 if ((vdispend + 1) & 256)
243 tmp |= 8;
244 if (vtotal & 512)
245 tmp |= 32;
246 if (vdispend & 512)
247 tmp |= 64;
248 WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
249
250 tmp = 0;
251
252 /* More overflow bits */
253
254 if ((htotal + 5) & 64)
255 tmp |= 16;
256 if ((htotal + 5) & 128)
257 tmp |= 32;
258 if (vtotal & 256)
259 tmp |= 64;
260 if (vtotal & 512)
261 tmp |= 128;
262
263 WREG_CRT(CL_CRT1A, tmp);
264
265 /* Disable Hercules/CGA compatibility */
266 WREG_CRT(VGA_CRTC_MODE, 0x03);
267
268 WREG8(SEQ_INDEX, 0x7);
269 sr07 = RREG8(SEQ_DATA);
270 sr07 &= 0xe0;
271 hdr = 0;
272 switch (crtc->fb->bits_per_pixel) {
273 case 8:
274 sr07 |= 0x11;
275 break;
276 case 16:
277 sr07 |= 0xc1;
278 hdr = 0xc0;
279 break;
280 case 24:
281 sr07 |= 0x15;
282 hdr = 0xc5;
283 break;
284 case 32:
285 sr07 |= 0x19;
286 hdr = 0xc5;
287 break;
288 default:
289 return -1;
290 }
291
292 WREG_SEQ(0x7, sr07);
293
294 /* Program the pitch */
295 tmp = crtc->fb->pitches[0] / 8;
296 WREG_CRT(VGA_CRTC_OFFSET, tmp);
297
298 /* Enable extended blanking and pitch bits, and enable full memory */
299 tmp = 0x22;
300 tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
301 tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
302 WREG_CRT(0x1b, tmp);
303
304 /* Enable high-colour modes */
305 WREG_GFX(VGA_GFX_MODE, 0x40);
306
307 /* And set graphics mode */
308 WREG_GFX(VGA_GFX_MISC, 0x01);
309
310 WREG_HDR(hdr);
311 cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
312 return 0;
313}
314
315/*
316 * This is called before a mode is programmed. A typical use might be to
317 * enable DPMS during the programming to avoid seeing intermediate stages,
318 * but that's not relevant to us
319 */
320static void cirrus_crtc_prepare(struct drm_crtc *crtc)
321{
322}
323
324/*
325 * This is called after a mode is programmed. It should reverse anything done
326 * by the prepare function
327 */
328static void cirrus_crtc_commit(struct drm_crtc *crtc)
329{
330}
331
332/*
333 * The core can pass us a set of gamma values to program. We actually only
334 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
335 * but it's a requirement that we provide the function
336 */
337static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
338 u16 *blue, uint32_t start, uint32_t size)
339{
340 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
341 int i;
342
343 if (size != CIRRUS_LUT_SIZE)
344 return;
345
346 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
347 cirrus_crtc->lut_r[i] = red[i];
348 cirrus_crtc->lut_g[i] = green[i];
349 cirrus_crtc->lut_b[i] = blue[i];
350 }
351 cirrus_crtc_load_lut(crtc);
352}
353
354/* Simple cleanup function */
355static void cirrus_crtc_destroy(struct drm_crtc *crtc)
356{
357 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
358
359 drm_crtc_cleanup(crtc);
360 kfree(cirrus_crtc);
361}
362
363/* These provide the minimum set of functions required to handle a CRTC */
364static const struct drm_crtc_funcs cirrus_crtc_funcs = {
365 .gamma_set = cirrus_crtc_gamma_set,
366 .set_config = drm_crtc_helper_set_config,
367 .destroy = cirrus_crtc_destroy,
368};
369
370static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
371 .dpms = cirrus_crtc_dpms,
372 .mode_fixup = cirrus_crtc_mode_fixup,
373 .mode_set = cirrus_crtc_mode_set,
374 .mode_set_base = cirrus_crtc_mode_set_base,
375 .prepare = cirrus_crtc_prepare,
376 .commit = cirrus_crtc_commit,
377 .load_lut = cirrus_crtc_load_lut,
378};
379
380/* CRTC setup */
381static void cirrus_crtc_init(struct drm_device *dev)
382{
383 struct cirrus_device *cdev = dev->dev_private;
384 struct cirrus_crtc *cirrus_crtc;
385 int i;
386
387 cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
388 (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
389 GFP_KERNEL);
390
391 if (cirrus_crtc == NULL)
392 return;
393
394 drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
395
396 drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
397 cdev->mode_info.crtc = cirrus_crtc;
398
399 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
400 cirrus_crtc->lut_r[i] = i;
401 cirrus_crtc->lut_g[i] = i;
402 cirrus_crtc->lut_b[i] = i;
403 }
404
405 drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
406}
407
408/** Sets the color ramps on behalf of fbcon */
409void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
410 u16 blue, int regno)
411{
412 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
413
414 cirrus_crtc->lut_r[regno] = red;
415 cirrus_crtc->lut_g[regno] = green;
416 cirrus_crtc->lut_b[regno] = blue;
417}
418
419/** Gets the color ramps on behalf of fbcon */
420void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
421 u16 *blue, int regno)
422{
423 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
424
425 *red = cirrus_crtc->lut_r[regno];
426 *green = cirrus_crtc->lut_g[regno];
427 *blue = cirrus_crtc->lut_b[regno];
428}
429
430
431static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
432 struct drm_display_mode *mode,
433 struct drm_display_mode *adjusted_mode)
434{
435 return true;
436}
437
438static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
439 struct drm_display_mode *mode,
440 struct drm_display_mode *adjusted_mode)
441{
442}
443
444static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
445{
446 return;
447}
448
449static void cirrus_encoder_prepare(struct drm_encoder *encoder)
450{
451}
452
453static void cirrus_encoder_commit(struct drm_encoder *encoder)
454{
455}
456
457void cirrus_encoder_destroy(struct drm_encoder *encoder)
458{
459 struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
460 drm_encoder_cleanup(encoder);
461 kfree(cirrus_encoder);
462}
463
464static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
465 .dpms = cirrus_encoder_dpms,
466 .mode_fixup = cirrus_encoder_mode_fixup,
467 .mode_set = cirrus_encoder_mode_set,
468 .prepare = cirrus_encoder_prepare,
469 .commit = cirrus_encoder_commit,
470};
471
472static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
473 .destroy = cirrus_encoder_destroy,
474};
475
476static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
477{
478 struct drm_encoder *encoder;
479 struct cirrus_encoder *cirrus_encoder;
480
481 cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
482 if (!cirrus_encoder)
483 return NULL;
484
485 encoder = &cirrus_encoder->base;
486 encoder->possible_crtcs = 0x1;
487
488 drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
489 DRM_MODE_ENCODER_DAC);
490 drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
491
492 return encoder;
493}
494
495
496int cirrus_vga_get_modes(struct drm_connector *connector)
497{
498 /* Just add a static list of modes */
499 drm_add_modes_noedid(connector, 640, 480);
500 drm_add_modes_noedid(connector, 800, 600);
501 drm_add_modes_noedid(connector, 1024, 768);
502 drm_add_modes_noedid(connector, 1280, 1024);
503
504 return 4;
505}
506
507static int cirrus_vga_mode_valid(struct drm_connector *connector,
508 struct drm_display_mode *mode)
509{
510 /* Any mode we've added is valid */
511 return MODE_OK;
512}
513
514struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
515 *connector)
516{
517 int enc_id = connector->encoder_ids[0];
518 struct drm_mode_object *obj;
519 struct drm_encoder *encoder;
520
521 /* pick the encoder ids */
522 if (enc_id) {
523 obj =
524 drm_mode_object_find(connector->dev, enc_id,
525 DRM_MODE_OBJECT_ENCODER);
526 if (!obj)
527 return NULL;
528 encoder = obj_to_encoder(obj);
529 return encoder;
530 }
531 return NULL;
532}
533
534static enum drm_connector_status cirrus_vga_detect(struct drm_connector
535 *connector, bool force)
536{
537 return connector_status_connected;
538}
539
540static void cirrus_connector_destroy(struct drm_connector *connector)
541{
542 drm_connector_cleanup(connector);
543 kfree(connector);
544}
545
546struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
547 .get_modes = cirrus_vga_get_modes,
548 .mode_valid = cirrus_vga_mode_valid,
549 .best_encoder = cirrus_connector_best_encoder,
550};
551
552struct drm_connector_funcs cirrus_vga_connector_funcs = {
553 .dpms = drm_helper_connector_dpms,
554 .detect = cirrus_vga_detect,
555 .fill_modes = drm_helper_probe_single_connector_modes,
556 .destroy = cirrus_connector_destroy,
557};
558
559static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
560{
561 struct drm_connector *connector;
562 struct cirrus_connector *cirrus_connector;
563
564 cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
565 if (!cirrus_connector)
566 return NULL;
567
568 connector = &cirrus_connector->base;
569
570 drm_connector_init(dev, connector,
571 &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
572
573 drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
574
575 return connector;
576}
577
578
579int cirrus_modeset_init(struct cirrus_device *cdev)
580{
581 struct drm_encoder *encoder;
582 struct drm_connector *connector;
583 int ret;
584
585 drm_mode_config_init(cdev->dev);
586 cdev->mode_info.mode_config_initialized = true;
587
588 cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
589 cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
590
591 cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
592 cdev->dev->mode_config.preferred_depth = 24;
593 /* don't prefer a shadow on virt GPU */
594 cdev->dev->mode_config.prefer_shadow = 0;
595
596 cirrus_crtc_init(cdev->dev);
597
598 encoder = cirrus_encoder_init(cdev->dev);
599 if (!encoder) {
600 DRM_ERROR("cirrus_encoder_init failed\n");
601 return -1;
602 }
603
604 connector = cirrus_vga_init(cdev->dev);
605 if (!connector) {
606 DRM_ERROR("cirrus_vga_init failed\n");
607 return -1;
608 }
609
610 drm_mode_connector_attach_encoder(connector, encoder);
611
612 ret = cirrus_fbdev_init(cdev);
613 if (ret) {
614 DRM_ERROR("cirrus_fbdev_init failed\n");
615 return ret;
616 }
617
618 return 0;
619}
620
621void cirrus_modeset_fini(struct cirrus_device *cdev)
622{
623 cirrus_fbdev_fini(cdev);
624
625 if (cdev->mode_info.mode_config_initialized) {
626 drm_mode_config_cleanup(cdev->dev);
627 cdev->mode_info.mode_config_initialized = false;
628 }
629}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 000000000000..2ebcd11a5023
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,453 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include "drmP.h"
29#include "cirrus_drv.h"
30#include <ttm/ttm_page_alloc.h>
31
32static inline struct cirrus_device *
33cirrus_bdev(struct ttm_bo_device *bd)
34{
35 return container_of(bd, struct cirrus_device, ttm.bdev);
36}
37
38static int
39cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
40{
41 return ttm_mem_global_init(ref->object);
42}
43
44static void
45cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
46{
47 ttm_mem_global_release(ref->object);
48}
49
50static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
51{
52 struct drm_global_reference *global_ref;
53 int r;
54
55 global_ref = &cirrus->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &cirrus_ttm_mem_global_init;
59 global_ref->release = &cirrus_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
61 if (r != 0) {
62 DRM_ERROR("Failed setting up TTM memory accounting "
63 "subsystem.\n");
64 return r;
65 }
66
67 cirrus->ttm.bo_global_ref.mem_glob =
68 cirrus->ttm.mem_global_ref.object;
69 global_ref = &cirrus->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
75 if (r != 0) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&cirrus->ttm.mem_global_ref);
78 return r;
79 }
80 return 0;
81}
82
83void
84cirrus_ttm_global_release(struct cirrus_device *cirrus)
85{
86 if (cirrus->ttm.mem_global_ref.release == NULL)
87 return;
88
89 drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&cirrus->ttm.mem_global_ref);
91 cirrus->ttm.mem_global_ref.release = NULL;
92}
93
94
95static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
96{
97 struct cirrus_bo *bo;
98
99 bo = container_of(tbo, struct cirrus_bo, bo);
100
101 drm_gem_object_release(&bo->gem);
102 kfree(bo);
103}
104
105bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
106{
107 if (bo->destroy == &cirrus_bo_ttm_destroy)
108 return true;
109 return false;
110}
111
112static int
113cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
115{
116 switch (type) {
117 case TTM_PL_SYSTEM:
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
121 break;
122 case TTM_PL_VRAM:
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
127 TTM_PL_FLAG_WC;
128 man->default_caching = TTM_PL_FLAG_WC;
129 break;
130 default:
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
132 return -EINVAL;
133 }
134 return 0;
135}
136
137static void
138cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
139{
140 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
141
142 if (!cirrus_ttm_bo_is_cirrus_bo(bo))
143 return;
144
145 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
146 *pl = cirrusbo->placement;
147}
148
149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{
151 return 0;
152}
153
154static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
155 struct ttm_mem_reg *mem)
156{
157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158 struct cirrus_device *cirrus = cirrus_bdev(bdev);
159
160 mem->bus.addr = NULL;
161 mem->bus.offset = 0;
162 mem->bus.size = mem->num_pages << PAGE_SHIFT;
163 mem->bus.base = 0;
164 mem->bus.is_iomem = false;
165 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
166 return -EINVAL;
167 switch (mem->mem_type) {
168 case TTM_PL_SYSTEM:
169 /* system memory */
170 return 0;
171 case TTM_PL_VRAM:
172 mem->bus.offset = mem->start << PAGE_SHIFT;
173 mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
174 mem->bus.is_iomem = true;
175 break;
176 default:
177 return -EINVAL;
178 break;
179 }
180 return 0;
181}
182
183static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
184{
185}
186
187static int cirrus_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem)
191{
192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
194 return r;
195}
196
197
198static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
199{
200 ttm_tt_fini(tt);
201 kfree(tt);
202}
203
204static struct ttm_backend_func cirrus_tt_backend_func = {
205 .destroy = &cirrus_ttm_backend_destroy,
206};
207
208
209struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
210 unsigned long size, uint32_t page_flags,
211 struct page *dummy_read_page)
212{
213 struct ttm_tt *tt;
214
215 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
216 if (tt == NULL)
217 return NULL;
218 tt->func = &cirrus_tt_backend_func;
219 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
220 kfree(tt);
221 return NULL;
222 }
223 return tt;
224}
225
226static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
227{
228 return ttm_pool_populate(ttm);
229}
230
231static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
232{
233 ttm_pool_unpopulate(ttm);
234}
235
236struct ttm_bo_driver cirrus_bo_driver = {
237 .ttm_tt_create = cirrus_ttm_tt_create,
238 .ttm_tt_populate = cirrus_ttm_tt_populate,
239 .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
240 .init_mem_type = cirrus_bo_init_mem_type,
241 .evict_flags = cirrus_bo_evict_flags,
242 .move = cirrus_bo_move,
243 .verify_access = cirrus_bo_verify_access,
244 .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
245 .io_mem_free = &cirrus_ttm_io_mem_free,
246};
247
248int cirrus_mm_init(struct cirrus_device *cirrus)
249{
250 int ret;
251 struct drm_device *dev = cirrus->dev;
252 struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
253
254 ret = cirrus_ttm_global_init(cirrus);
255 if (ret)
256 return ret;
257
258 ret = ttm_bo_device_init(&cirrus->ttm.bdev,
259 cirrus->ttm.bo_global_ref.ref.object,
260 &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
261 true);
262 if (ret) {
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264 return ret;
265 }
266
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268 cirrus->mc.vram_size >> PAGE_SHIFT);
269 if (ret) {
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
271 return ret;
272 }
273
274 cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
275 pci_resource_len(dev->pdev, 0),
276 DRM_MTRR_WC);
277
278 return 0;
279}
280
281void cirrus_mm_fini(struct cirrus_device *cirrus)
282{
283 struct drm_device *dev = cirrus->dev;
284 ttm_bo_device_release(&cirrus->ttm.bdev);
285
286 cirrus_ttm_global_release(cirrus);
287
288 if (cirrus->fb_mtrr >= 0) {
289 drm_mtrr_del(cirrus->fb_mtrr,
290 pci_resource_start(dev->pdev, 0),
291 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
292 cirrus->fb_mtrr = -1;
293 }
294}
295
296void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
297{
298 u32 c = 0;
299 bo->placement.fpfn = 0;
300 bo->placement.lpfn = 0;
301 bo->placement.placement = bo->placements;
302 bo->placement.busy_placement = bo->placements;
303 if (domain & TTM_PL_FLAG_VRAM)
304 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
305 if (domain & TTM_PL_FLAG_SYSTEM)
306 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
307 if (!c)
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
309 bo->placement.num_placement = c;
310 bo->placement.num_busy_placement = c;
311}
312
313int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
314{
315 int ret;
316
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
318 if (ret) {
319 if (ret != -ERESTARTSYS)
320 DRM_ERROR("reserve failed %p\n", bo);
321 return ret;
322 }
323 return 0;
324}
325
326void cirrus_bo_unreserve(struct cirrus_bo *bo)
327{
328 ttm_bo_unreserve(&bo->bo);
329}
330
331int cirrus_bo_create(struct drm_device *dev, int size, int align,
332 uint32_t flags, struct cirrus_bo **pcirrusbo)
333{
334 struct cirrus_device *cirrus = dev->dev_private;
335 struct cirrus_bo *cirrusbo;
336 size_t acc_size;
337 int ret;
338
339 cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
340 if (!cirrusbo)
341 return -ENOMEM;
342
343 ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
344 if (ret) {
345 kfree(cirrusbo);
346 return ret;
347 }
348
349 cirrusbo->gem.driver_private = NULL;
350 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
351
352 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
353
354 acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
355 sizeof(struct cirrus_bo));
356
357 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
358 ttm_bo_type_device, &cirrusbo->placement,
359 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
360 NULL, cirrus_bo_ttm_destroy);
361 if (ret)
362 return ret;
363
364 *pcirrusbo = cirrusbo;
365 return 0;
366}
367
368static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
369{
370 return bo->bo.offset;
371}
372
373int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
374{
375 int i, ret;
376
377 if (bo->pin_count) {
378 bo->pin_count++;
379 if (gpu_addr)
380 *gpu_addr = cirrus_bo_gpu_offset(bo);
381 }
382
383 cirrus_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
387 if (ret)
388 return ret;
389
390 bo->pin_count = 1;
391 if (gpu_addr)
392 *gpu_addr = cirrus_bo_gpu_offset(bo);
393 return 0;
394}
395
396int cirrus_bo_unpin(struct cirrus_bo *bo)
397{
398 int i, ret;
399 if (!bo->pin_count) {
400 DRM_ERROR("unpin bad %p\n", bo);
401 return 0;
402 }
403 bo->pin_count--;
404 if (bo->pin_count)
405 return 0;
406
407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
410 if (ret)
411 return ret;
412
413 return 0;
414}
415
416int cirrus_bo_push_sysram(struct cirrus_bo *bo)
417{
418 int i, ret;
419 if (!bo->pin_count) {
420 DRM_ERROR("unpin bad %p\n", bo);
421 return 0;
422 }
423 bo->pin_count--;
424 if (bo->pin_count)
425 return 0;
426
427 if (bo->kmap.virtual)
428 ttm_bo_kunmap(&bo->kmap);
429
430 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
433
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
435 if (ret) {
436 DRM_ERROR("pushing to VRAM failed\n");
437 return ret;
438 }
439 return 0;
440}
441
442int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
443{
444 struct drm_file *file_priv;
445 struct cirrus_device *cirrus;
446
447 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
448 return drm_mmap(filp, vma);
449
450 file_priv = filp->private_data;
451 cirrus = file_priv->minor->dev->dev_private;
452 return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
453}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4b8653b932f9..08758e061478 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
98#endif 98#endif
99} 99}
100EXPORT_SYMBOL(drm_clflush_pages); 100EXPORT_SYMBOL(drm_clflush_pages);
101
102void
103drm_clflush_virt_range(char *addr, unsigned long length)
104{
105#if defined(CONFIG_X86)
106 if (cpu_has_clflush) {
107 char *end = addr + length;
108 mb();
109 for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
110 clflush(addr);
111 clflush(end - 1);
112 mb();
113 return;
114 }
115
116 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
117 printk(KERN_ERR "Timed out waiting for cache flush.\n");
118#else
119 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
120 WARN_ON_ONCE(1);
121#endif
122}
123EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 325365f6d355..affa629589ac 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -85,11 +85,12 @@ again:
85 mutex_lock(&dev->struct_mutex); 85 mutex_lock(&dev->struct_mutex);
86 ret = idr_get_new_above(&dev->ctx_idr, NULL, 86 ret = idr_get_new_above(&dev->ctx_idr, NULL,
87 DRM_RESERVED_CONTEXTS, &new_id); 87 DRM_RESERVED_CONTEXTS, &new_id);
88 if (ret == -EAGAIN) {
89 mutex_unlock(&dev->struct_mutex);
90 goto again;
91 }
92 mutex_unlock(&dev->struct_mutex); 88 mutex_unlock(&dev->struct_mutex);
89 if (ret == -EAGAIN)
90 goto again;
91 else if (ret)
92 return ret;
93
93 return new_id; 94 return new_id;
94} 95}
95 96
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index c79870a75c2f..92cea9d77ec9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -227,7 +227,7 @@ static int drm_mode_object_get(struct drm_device *dev,
227again: 227again:
228 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 228 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
229 DRM_ERROR("Ran out memory getting a mode number\n"); 229 DRM_ERROR("Ran out memory getting a mode number\n");
230 return -EINVAL; 230 return -ENOMEM;
231 } 231 }
232 232
233 mutex_lock(&dev->mode_config.idr_mutex); 233 mutex_lock(&dev->mode_config.idr_mutex);
@@ -235,6 +235,8 @@ again:
235 mutex_unlock(&dev->mode_config.idr_mutex); 235 mutex_unlock(&dev->mode_config.idr_mutex);
236 if (ret == -EAGAIN) 236 if (ret == -EAGAIN)
237 goto again; 237 goto again;
238 else if (ret)
239 return ret;
238 240
239 obj->id = new_id; 241 obj->id = new_id;
240 obj->type = obj_type; 242 obj->type = obj_type;
@@ -361,7 +363,7 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
361 * @funcs: callbacks for the new CRTC 363 * @funcs: callbacks for the new CRTC
362 * 364 *
363 * LOCKING: 365 * LOCKING:
364 * Caller must hold mode config lock. 366 * Takes mode_config lock.
365 * 367 *
366 * Inits a new object created as base part of an driver crtc object. 368 * Inits a new object created as base part of an driver crtc object.
367 * 369 *
@@ -382,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
382 if (ret) 384 if (ret)
383 goto out; 385 goto out;
384 386
387 crtc->base.properties = &crtc->properties;
388
385 list_add_tail(&crtc->head, &dev->mode_config.crtc_list); 389 list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
386 dev->mode_config.num_crtc++; 390 dev->mode_config.num_crtc++;
387 391
@@ -481,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
481 if (ret) 485 if (ret)
482 goto out; 486 goto out;
483 487
488 connector->base.properties = &connector->properties;
484 connector->dev = dev; 489 connector->dev = dev;
485 connector->funcs = funcs; 490 connector->funcs = funcs;
486 connector->connector_type = connector_type; 491 connector->connector_type = connector_type;
@@ -603,6 +608,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
603 if (ret) 608 if (ret)
604 goto out; 609 goto out;
605 610
611 plane->base.properties = &plane->properties;
606 plane->dev = dev; 612 plane->dev = dev;
607 plane->funcs = funcs; 613 plane->funcs = funcs;
608 plane->format_types = kmalloc(sizeof(uint32_t) * format_count, 614 plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
@@ -1422,11 +1428,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1422 } 1428 }
1423 connector = obj_to_connector(obj); 1429 connector = obj_to_connector(obj);
1424 1430
1425 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 1431 props_count = connector->properties.count;
1426 if (connector->property_ids[i] != 0) {
1427 props_count++;
1428 }
1429 }
1430 1432
1431 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1433 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1432 if (connector->encoder_ids[i] != 0) { 1434 if (connector->encoder_ids[i] != 0) {
@@ -1479,21 +1481,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1479 copied = 0; 1481 copied = 0;
1480 prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr); 1482 prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
1481 prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr); 1483 prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
1482 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 1484 for (i = 0; i < connector->properties.count; i++) {
1483 if (connector->property_ids[i] != 0) { 1485 if (put_user(connector->properties.ids[i],
1484 if (put_user(connector->property_ids[i], 1486 prop_ptr + copied)) {
1485 prop_ptr + copied)) { 1487 ret = -EFAULT;
1486 ret = -EFAULT; 1488 goto out;
1487 goto out; 1489 }
1488 }
1489 1490
1490 if (put_user(connector->property_values[i], 1491 if (put_user(connector->properties.values[i],
1491 prop_values + copied)) { 1492 prop_values + copied)) {
1492 ret = -EFAULT; 1493 ret = -EFAULT;
1493 goto out; 1494 goto out;
1494 }
1495 copied++;
1496 } 1495 }
1496 copied++;
1497 } 1497 }
1498 } 1498 }
1499 out_resp->count_props = props_count; 1499 out_resp->count_props = props_count;
@@ -1830,7 +1830,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1830 struct drm_display_mode *mode = NULL; 1830 struct drm_display_mode *mode = NULL;
1831 struct drm_mode_set set; 1831 struct drm_mode_set set;
1832 uint32_t __user *set_connectors_ptr; 1832 uint32_t __user *set_connectors_ptr;
1833 int ret = 0; 1833 int ret;
1834 int i; 1834 int i;
1835 1835
1836 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1836 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2102,7 +2102,7 @@ int drm_mode_addfb(struct drm_device *dev,
2102 2102
2103 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2103 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2104 if (IS_ERR(fb)) { 2104 if (IS_ERR(fb)) {
2105 DRM_ERROR("could not create framebuffer\n"); 2105 DRM_DEBUG_KMS("could not create framebuffer\n");
2106 ret = PTR_ERR(fb); 2106 ret = PTR_ERR(fb);
2107 goto out; 2107 goto out;
2108 } 2108 }
@@ -2185,6 +2185,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
2185 } 2185 }
2186} 2186}
2187 2187
2188static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
2189{
2190 int ret, hsub, vsub, num_planes, i;
2191
2192 ret = format_check(r);
2193 if (ret) {
2194 DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
2195 return ret;
2196 }
2197
2198 hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
2199 vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
2200 num_planes = drm_format_num_planes(r->pixel_format);
2201
2202 if (r->width == 0 || r->width % hsub) {
2203 DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
2204 return -EINVAL;
2205 }
2206
2207 if (r->height == 0 || r->height % vsub) {
2208 DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
2209 return -EINVAL;
2210 }
2211
2212 for (i = 0; i < num_planes; i++) {
2213 unsigned int width = r->width / (i != 0 ? hsub : 1);
2214
2215 if (!r->handles[i]) {
2216 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
2217 return -EINVAL;
2218 }
2219
2220 if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
2221 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
2222 return -EINVAL;
2223 }
2224 }
2225
2226 return 0;
2227}
2228
2188/** 2229/**
2189 * drm_mode_addfb2 - add an FB to the graphics configuration 2230 * drm_mode_addfb2 - add an FB to the graphics configuration
2190 * @inode: inode from the ioctl 2231 * @inode: inode from the ioctl
@@ -2208,33 +2249,31 @@ int drm_mode_addfb2(struct drm_device *dev,
2208 struct drm_mode_fb_cmd2 *r = data; 2249 struct drm_mode_fb_cmd2 *r = data;
2209 struct drm_mode_config *config = &dev->mode_config; 2250 struct drm_mode_config *config = &dev->mode_config;
2210 struct drm_framebuffer *fb; 2251 struct drm_framebuffer *fb;
2211 int ret = 0; 2252 int ret;
2212 2253
2213 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2254 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2214 return -EINVAL; 2255 return -EINVAL;
2215 2256
2216 if ((config->min_width > r->width) || (r->width > config->max_width)) { 2257 if ((config->min_width > r->width) || (r->width > config->max_width)) {
2217 DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n", 2258 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
2218 r->width, config->min_width, config->max_width); 2259 r->width, config->min_width, config->max_width);
2219 return -EINVAL; 2260 return -EINVAL;
2220 } 2261 }
2221 if ((config->min_height > r->height) || (r->height > config->max_height)) { 2262 if ((config->min_height > r->height) || (r->height > config->max_height)) {
2222 DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n", 2263 DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
2223 r->height, config->min_height, config->max_height); 2264 r->height, config->min_height, config->max_height);
2224 return -EINVAL; 2265 return -EINVAL;
2225 } 2266 }
2226 2267
2227 ret = format_check(r); 2268 ret = framebuffer_check(r);
2228 if (ret) { 2269 if (ret)
2229 DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
2230 return ret; 2270 return ret;
2231 }
2232 2271
2233 mutex_lock(&dev->mode_config.mutex); 2272 mutex_lock(&dev->mode_config.mutex);
2234 2273
2235 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2274 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2236 if (IS_ERR(fb)) { 2275 if (IS_ERR(fb)) {
2237 DRM_ERROR("could not create framebuffer\n"); 2276 DRM_DEBUG_KMS("could not create framebuffer\n");
2238 ret = PTR_ERR(fb); 2277 ret = PTR_ERR(fb);
2239 goto out; 2278 goto out;
2240 } 2279 }
@@ -2365,7 +2404,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2365 struct drm_framebuffer *fb; 2404 struct drm_framebuffer *fb;
2366 unsigned flags; 2405 unsigned flags;
2367 int num_clips; 2406 int num_clips;
2368 int ret = 0; 2407 int ret;
2369 2408
2370 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2409 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2371 return -EINVAL; 2410 return -EINVAL;
@@ -2564,7 +2603,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
2564 struct drm_display_mode *mode; 2603 struct drm_display_mode *mode;
2565 struct drm_mode_object *obj; 2604 struct drm_mode_object *obj;
2566 struct drm_mode_modeinfo *umode = &mode_cmd->mode; 2605 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2567 int ret = 0; 2606 int ret;
2568 2607
2569 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2608 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2570 return -EINVAL; 2609 return -EINVAL;
@@ -2618,7 +2657,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
2618 struct drm_connector *connector; 2657 struct drm_connector *connector;
2619 struct drm_display_mode mode; 2658 struct drm_display_mode mode;
2620 struct drm_mode_modeinfo *umode = &mode_cmd->mode; 2659 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2621 int ret = 0; 2660 int ret;
2622 2661
2623 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2662 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2624 return -EINVAL; 2663 return -EINVAL;
@@ -2710,6 +2749,34 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
2710} 2749}
2711EXPORT_SYMBOL(drm_property_create_enum); 2750EXPORT_SYMBOL(drm_property_create_enum);
2712 2751
2752struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
2753 int flags, const char *name,
2754 const struct drm_prop_enum_list *props,
2755 int num_values)
2756{
2757 struct drm_property *property;
2758 int i, ret;
2759
2760 flags |= DRM_MODE_PROP_BITMASK;
2761
2762 property = drm_property_create(dev, flags, name, num_values);
2763 if (!property)
2764 return NULL;
2765
2766 for (i = 0; i < num_values; i++) {
2767 ret = drm_property_add_enum(property, i,
2768 props[i].type,
2769 props[i].name);
2770 if (ret) {
2771 drm_property_destroy(dev, property);
2772 return NULL;
2773 }
2774 }
2775
2776 return property;
2777}
2778EXPORT_SYMBOL(drm_property_create_bitmask);
2779
2713struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, 2780struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
2714 const char *name, 2781 const char *name,
2715 uint64_t min, uint64_t max) 2782 uint64_t min, uint64_t max)
@@ -2734,7 +2801,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
2734{ 2801{
2735 struct drm_property_enum *prop_enum; 2802 struct drm_property_enum *prop_enum;
2736 2803
2737 if (!(property->flags & DRM_MODE_PROP_ENUM)) 2804 if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
2805 return -EINVAL;
2806
2807 /*
2808 * Bitmask enum properties have the additional constraint of values
2809 * from 0 to 63
2810 */
2811 if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
2738 return -EINVAL; 2812 return -EINVAL;
2739 2813
2740 if (!list_empty(&property->enum_blob_list)) { 2814 if (!list_empty(&property->enum_blob_list)) {
@@ -2778,60 +2852,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
2778} 2852}
2779EXPORT_SYMBOL(drm_property_destroy); 2853EXPORT_SYMBOL(drm_property_destroy);
2780 2854
2781int drm_connector_attach_property(struct drm_connector *connector, 2855void drm_connector_attach_property(struct drm_connector *connector,
2782 struct drm_property *property, uint64_t init_val) 2856 struct drm_property *property, uint64_t init_val)
2783{ 2857{
2784 int i; 2858 drm_object_attach_property(&connector->base, property, init_val);
2785
2786 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
2787 if (connector->property_ids[i] == 0) {
2788 connector->property_ids[i] = property->base.id;
2789 connector->property_values[i] = init_val;
2790 break;
2791 }
2792 }
2793
2794 if (i == DRM_CONNECTOR_MAX_PROPERTY)
2795 return -EINVAL;
2796 return 0;
2797} 2859}
2798EXPORT_SYMBOL(drm_connector_attach_property); 2860EXPORT_SYMBOL(drm_connector_attach_property);
2799 2861
2800int drm_connector_property_set_value(struct drm_connector *connector, 2862int drm_connector_property_set_value(struct drm_connector *connector,
2801 struct drm_property *property, uint64_t value) 2863 struct drm_property *property, uint64_t value)
2802{ 2864{
2865 return drm_object_property_set_value(&connector->base, property, value);
2866}
2867EXPORT_SYMBOL(drm_connector_property_set_value);
2868
2869int drm_connector_property_get_value(struct drm_connector *connector,
2870 struct drm_property *property, uint64_t *val)
2871{
2872 return drm_object_property_get_value(&connector->base, property, val);
2873}
2874EXPORT_SYMBOL(drm_connector_property_get_value);
2875
2876void drm_object_attach_property(struct drm_mode_object *obj,
2877 struct drm_property *property,
2878 uint64_t init_val)
2879{
2880 int count = obj->properties->count;
2881
2882 if (count == DRM_OBJECT_MAX_PROPERTY) {
2883 WARN(1, "Failed to attach object property (type: 0x%x). Please "
2884 "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
2885 "you see this message on the same object type.\n",
2886 obj->type);
2887 return;
2888 }
2889
2890 obj->properties->ids[count] = property->base.id;
2891 obj->properties->values[count] = init_val;
2892 obj->properties->count++;
2893}
2894EXPORT_SYMBOL(drm_object_attach_property);
2895
2896int drm_object_property_set_value(struct drm_mode_object *obj,
2897 struct drm_property *property, uint64_t val)
2898{
2803 int i; 2899 int i;
2804 2900
2805 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 2901 for (i = 0; i < obj->properties->count; i++) {
2806 if (connector->property_ids[i] == property->base.id) { 2902 if (obj->properties->ids[i] == property->base.id) {
2807 connector->property_values[i] = value; 2903 obj->properties->values[i] = val;
2808 break; 2904 return 0;
2809 } 2905 }
2810 } 2906 }
2811 2907
2812 if (i == DRM_CONNECTOR_MAX_PROPERTY) 2908 return -EINVAL;
2813 return -EINVAL;
2814 return 0;
2815} 2909}
2816EXPORT_SYMBOL(drm_connector_property_set_value); 2910EXPORT_SYMBOL(drm_object_property_set_value);
2817 2911
2818int drm_connector_property_get_value(struct drm_connector *connector, 2912int drm_object_property_get_value(struct drm_mode_object *obj,
2819 struct drm_property *property, uint64_t *val) 2913 struct drm_property *property, uint64_t *val)
2820{ 2914{
2821 int i; 2915 int i;
2822 2916
2823 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 2917 for (i = 0; i < obj->properties->count; i++) {
2824 if (connector->property_ids[i] == property->base.id) { 2918 if (obj->properties->ids[i] == property->base.id) {
2825 *val = connector->property_values[i]; 2919 *val = obj->properties->values[i];
2826 break; 2920 return 0;
2827 } 2921 }
2828 } 2922 }
2829 2923
2830 if (i == DRM_CONNECTOR_MAX_PROPERTY) 2924 return -EINVAL;
2831 return -EINVAL;
2832 return 0;
2833} 2925}
2834EXPORT_SYMBOL(drm_connector_property_get_value); 2926EXPORT_SYMBOL(drm_object_property_get_value);
2835 2927
2836int drm_mode_getproperty_ioctl(struct drm_device *dev, 2928int drm_mode_getproperty_ioctl(struct drm_device *dev,
2837 void *data, struct drm_file *file_priv) 2929 void *data, struct drm_file *file_priv)
@@ -2862,7 +2954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2862 } 2954 }
2863 property = obj_to_property(obj); 2955 property = obj_to_property(obj);
2864 2956
2865 if (property->flags & DRM_MODE_PROP_ENUM) { 2957 if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
2866 list_for_each_entry(prop_enum, &property->enum_blob_list, head) 2958 list_for_each_entry(prop_enum, &property->enum_blob_list, head)
2867 enum_count++; 2959 enum_count++;
2868 } else if (property->flags & DRM_MODE_PROP_BLOB) { 2960 } else if (property->flags & DRM_MODE_PROP_BLOB) {
@@ -2887,7 +2979,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2887 } 2979 }
2888 out_resp->count_values = value_count; 2980 out_resp->count_values = value_count;
2889 2981
2890 if (property->flags & DRM_MODE_PROP_ENUM) { 2982 if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
2891 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { 2983 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
2892 copied = 0; 2984 copied = 0;
2893 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; 2985 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3009,7 +3101,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3009 struct edid *edid) 3101 struct edid *edid)
3010{ 3102{
3011 struct drm_device *dev = connector->dev; 3103 struct drm_device *dev = connector->dev;
3012 int ret = 0, size; 3104 int ret, size;
3013 3105
3014 if (connector->edid_blob_ptr) 3106 if (connector->edid_blob_ptr)
3015 drm_property_destroy_blob(dev, connector->edid_blob_ptr); 3107 drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -3033,75 +3125,202 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3033} 3125}
3034EXPORT_SYMBOL(drm_mode_connector_update_edid_property); 3126EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
3035 3127
3128static bool drm_property_change_is_valid(struct drm_property *property,
3129 __u64 value)
3130{
3131 if (property->flags & DRM_MODE_PROP_IMMUTABLE)
3132 return false;
3133 if (property->flags & DRM_MODE_PROP_RANGE) {
3134 if (value < property->values[0] || value > property->values[1])
3135 return false;
3136 return true;
3137 } else if (property->flags & DRM_MODE_PROP_BITMASK) {
3138 int i;
3139 __u64 valid_mask = 0;
3140 for (i = 0; i < property->num_values; i++)
3141 valid_mask |= (1ULL << property->values[i]);
3142 return !(value & ~valid_mask);
3143 } else {
3144 int i;
3145 for (i = 0; i < property->num_values; i++)
3146 if (property->values[i] == value)
3147 return true;
3148 return false;
3149 }
3150}
3151
3036int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 3152int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
3037 void *data, struct drm_file *file_priv) 3153 void *data, struct drm_file *file_priv)
3038{ 3154{
3039 struct drm_mode_connector_set_property *out_resp = data; 3155 struct drm_mode_connector_set_property *conn_set_prop = data;
3040 struct drm_mode_object *obj; 3156 struct drm_mode_obj_set_property obj_set_prop = {
3041 struct drm_property *property; 3157 .value = conn_set_prop->value,
3042 struct drm_connector *connector; 3158 .prop_id = conn_set_prop->prop_id,
3159 .obj_id = conn_set_prop->connector_id,
3160 .obj_type = DRM_MODE_OBJECT_CONNECTOR
3161 };
3162
3163 /* It does all the locking and checking we need */
3164 return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
3165}
3166
3167static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
3168 struct drm_property *property,
3169 uint64_t value)
3170{
3043 int ret = -EINVAL; 3171 int ret = -EINVAL;
3172 struct drm_connector *connector = obj_to_connector(obj);
3173
3174 /* Do DPMS ourselves */
3175 if (property == connector->dev->mode_config.dpms_property) {
3176 if (connector->funcs->dpms)
3177 (*connector->funcs->dpms)(connector, (int)value);
3178 ret = 0;
3179 } else if (connector->funcs->set_property)
3180 ret = connector->funcs->set_property(connector, property, value);
3181
3182 /* store the property value if successful */
3183 if (!ret)
3184 drm_connector_property_set_value(connector, property, value);
3185 return ret;
3186}
3187
3188static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
3189 struct drm_property *property,
3190 uint64_t value)
3191{
3192 int ret = -EINVAL;
3193 struct drm_crtc *crtc = obj_to_crtc(obj);
3194
3195 if (crtc->funcs->set_property)
3196 ret = crtc->funcs->set_property(crtc, property, value);
3197 if (!ret)
3198 drm_object_property_set_value(obj, property, value);
3199
3200 return ret;
3201}
3202
3203static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
3204 struct drm_property *property,
3205 uint64_t value)
3206{
3207 int ret = -EINVAL;
3208 struct drm_plane *plane = obj_to_plane(obj);
3209
3210 if (plane->funcs->set_property)
3211 ret = plane->funcs->set_property(plane, property, value);
3212 if (!ret)
3213 drm_object_property_set_value(obj, property, value);
3214
3215 return ret;
3216}
3217
3218int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3219 struct drm_file *file_priv)
3220{
3221 struct drm_mode_obj_get_properties *arg = data;
3222 struct drm_mode_object *obj;
3223 int ret = 0;
3044 int i; 3224 int i;
3225 int copied = 0;
3226 int props_count = 0;
3227 uint32_t __user *props_ptr;
3228 uint64_t __user *prop_values_ptr;
3045 3229
3046 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3230 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3047 return -EINVAL; 3231 return -EINVAL;
3048 3232
3049 mutex_lock(&dev->mode_config.mutex); 3233 mutex_lock(&dev->mode_config.mutex);
3050 3234
3051 obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); 3235 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3052 if (!obj) { 3236 if (!obj) {
3237 ret = -EINVAL;
3238 goto out;
3239 }
3240 if (!obj->properties) {
3241 ret = -EINVAL;
3053 goto out; 3242 goto out;
3054 } 3243 }
3055 connector = obj_to_connector(obj);
3056 3244
3057 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 3245 props_count = obj->properties->count;
3058 if (connector->property_ids[i] == out_resp->prop_id) 3246
3059 break; 3247 /* This ioctl is called twice, once to determine how much space is
3248 * needed, and the 2nd time to fill it. */
3249 if ((arg->count_props >= props_count) && props_count) {
3250 copied = 0;
3251 props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
3252 prop_values_ptr = (uint64_t __user *)(unsigned long)
3253 (arg->prop_values_ptr);
3254 for (i = 0; i < props_count; i++) {
3255 if (put_user(obj->properties->ids[i],
3256 props_ptr + copied)) {
3257 ret = -EFAULT;
3258 goto out;
3259 }
3260 if (put_user(obj->properties->values[i],
3261 prop_values_ptr + copied)) {
3262 ret = -EFAULT;
3263 goto out;
3264 }
3265 copied++;
3266 }
3060 } 3267 }
3268 arg->count_props = props_count;
3269out:
3270 mutex_unlock(&dev->mode_config.mutex);
3271 return ret;
3272}
3273
3274int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3275 struct drm_file *file_priv)
3276{
3277 struct drm_mode_obj_set_property *arg = data;
3278 struct drm_mode_object *arg_obj;
3279 struct drm_mode_object *prop_obj;
3280 struct drm_property *property;
3281 int ret = -EINVAL;
3282 int i;
3283
3284 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3285 return -EINVAL;
3286
3287 mutex_lock(&dev->mode_config.mutex);
3061 3288
3062 if (i == DRM_CONNECTOR_MAX_PROPERTY) { 3289 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3290 if (!arg_obj)
3291 goto out;
3292 if (!arg_obj->properties)
3063 goto out; 3293 goto out;
3064 }
3065 3294
3066 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 3295 for (i = 0; i < arg_obj->properties->count; i++)
3067 if (!obj) { 3296 if (arg_obj->properties->ids[i] == arg->prop_id)
3297 break;
3298
3299 if (i == arg_obj->properties->count)
3068 goto out; 3300 goto out;
3069 }
3070 property = obj_to_property(obj);
3071 3301
3072 if (property->flags & DRM_MODE_PROP_IMMUTABLE) 3302 prop_obj = drm_mode_object_find(dev, arg->prop_id,
3303 DRM_MODE_OBJECT_PROPERTY);
3304 if (!prop_obj)
3073 goto out; 3305 goto out;
3306 property = obj_to_property(prop_obj);
3074 3307
3075 if (property->flags & DRM_MODE_PROP_RANGE) { 3308 if (!drm_property_change_is_valid(property, arg->value))
3076 if (out_resp->value < property->values[0]) 3309 goto out;
3077 goto out;
3078 3310
3079 if (out_resp->value > property->values[1]) 3311 switch (arg_obj->type) {
3080 goto out; 3312 case DRM_MODE_OBJECT_CONNECTOR:
3081 } else { 3313 ret = drm_mode_connector_set_obj_prop(arg_obj, property,
3082 int found = 0; 3314 arg->value);
3083 for (i = 0; i < property->num_values; i++) { 3315 break;
3084 if (property->values[i] == out_resp->value) { 3316 case DRM_MODE_OBJECT_CRTC:
3085 found = 1; 3317 ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
3086 break; 3318 break;
3087 } 3319 case DRM_MODE_OBJECT_PLANE:
3088 } 3320 ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
3089 if (!found) { 3321 break;
3090 goto out;
3091 }
3092 } 3322 }
3093 3323
3094 /* Do DPMS ourselves */
3095 if (property == connector->dev->mode_config.dpms_property) {
3096 if (connector->funcs->dpms)
3097 (*connector->funcs->dpms)(connector, (int) out_resp->value);
3098 ret = 0;
3099 } else if (connector->funcs->set_property)
3100 ret = connector->funcs->set_property(connector, property, out_resp->value);
3101
3102 /* store the property value if successful */
3103 if (!ret)
3104 drm_connector_property_set_value(connector, property, out_resp->value);
3105out: 3324out:
3106 mutex_unlock(&dev->mode_config.mutex); 3325 mutex_unlock(&dev->mode_config.mutex);
3107 return ret; 3326 return ret;
@@ -3173,6 +3392,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3173 } 3392 }
3174 crtc = obj_to_crtc(obj); 3393 crtc = obj_to_crtc(obj);
3175 3394
3395 if (crtc->funcs->gamma_set == NULL) {
3396 ret = -ENOSYS;
3397 goto out;
3398 }
3399
3176 /* memcpy into gamma store */ 3400 /* memcpy into gamma store */
3177 if (crtc_lut->gamma_size != crtc->gamma_size) { 3401 if (crtc_lut->gamma_size != crtc->gamma_size) {
3178 ret = -EINVAL; 3402 ret = -EINVAL;
@@ -3468,3 +3692,140 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3468 } 3692 }
3469} 3693}
3470EXPORT_SYMBOL(drm_fb_get_bpp_depth); 3694EXPORT_SYMBOL(drm_fb_get_bpp_depth);
3695
3696/**
3697 * drm_format_num_planes - get the number of planes for format
3698 * @format: pixel format (DRM_FORMAT_*)
3699 *
3700 * RETURNS:
3701 * The number of planes used by the specified pixel format.
3702 */
3703int drm_format_num_planes(uint32_t format)
3704{
3705 switch (format) {
3706 case DRM_FORMAT_YUV410:
3707 case DRM_FORMAT_YVU410:
3708 case DRM_FORMAT_YUV411:
3709 case DRM_FORMAT_YVU411:
3710 case DRM_FORMAT_YUV420:
3711 case DRM_FORMAT_YVU420:
3712 case DRM_FORMAT_YUV422:
3713 case DRM_FORMAT_YVU422:
3714 case DRM_FORMAT_YUV444:
3715 case DRM_FORMAT_YVU444:
3716 return 3;
3717 case DRM_FORMAT_NV12:
3718 case DRM_FORMAT_NV21:
3719 case DRM_FORMAT_NV16:
3720 case DRM_FORMAT_NV61:
3721 return 2;
3722 default:
3723 return 1;
3724 }
3725}
3726EXPORT_SYMBOL(drm_format_num_planes);
3727
3728/**
3729 * drm_format_plane_cpp - determine the bytes per pixel value
3730 * @format: pixel format (DRM_FORMAT_*)
3731 * @plane: plane index
3732 *
3733 * RETURNS:
3734 * The bytes per pixel value for the specified plane.
3735 */
3736int drm_format_plane_cpp(uint32_t format, int plane)
3737{
3738 unsigned int depth;
3739 int bpp;
3740
3741 if (plane >= drm_format_num_planes(format))
3742 return 0;
3743
3744 switch (format) {
3745 case DRM_FORMAT_YUYV:
3746 case DRM_FORMAT_YVYU:
3747 case DRM_FORMAT_UYVY:
3748 case DRM_FORMAT_VYUY:
3749 return 2;
3750 case DRM_FORMAT_NV12:
3751 case DRM_FORMAT_NV21:
3752 case DRM_FORMAT_NV16:
3753 case DRM_FORMAT_NV61:
3754 return plane ? 2 : 1;
3755 case DRM_FORMAT_YUV410:
3756 case DRM_FORMAT_YVU410:
3757 case DRM_FORMAT_YUV411:
3758 case DRM_FORMAT_YVU411:
3759 case DRM_FORMAT_YUV420:
3760 case DRM_FORMAT_YVU420:
3761 case DRM_FORMAT_YUV422:
3762 case DRM_FORMAT_YVU422:
3763 case DRM_FORMAT_YUV444:
3764 case DRM_FORMAT_YVU444:
3765 return 1;
3766 default:
3767 drm_fb_get_bpp_depth(format, &depth, &bpp);
3768 return bpp >> 3;
3769 }
3770}
3771EXPORT_SYMBOL(drm_format_plane_cpp);
3772
3773/**
3774 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
3775 * @format: pixel format (DRM_FORMAT_*)
3776 *
3777 * RETURNS:
3778 * The horizontal chroma subsampling factor for the
3779 * specified pixel format.
3780 */
3781int drm_format_horz_chroma_subsampling(uint32_t format)
3782{
3783 switch (format) {
3784 case DRM_FORMAT_YUV411:
3785 case DRM_FORMAT_YVU411:
3786 case DRM_FORMAT_YUV410:
3787 case DRM_FORMAT_YVU410:
3788 return 4;
3789 case DRM_FORMAT_YUYV:
3790 case DRM_FORMAT_YVYU:
3791 case DRM_FORMAT_UYVY:
3792 case DRM_FORMAT_VYUY:
3793 case DRM_FORMAT_NV12:
3794 case DRM_FORMAT_NV21:
3795 case DRM_FORMAT_NV16:
3796 case DRM_FORMAT_NV61:
3797 case DRM_FORMAT_YUV422:
3798 case DRM_FORMAT_YVU422:
3799 case DRM_FORMAT_YUV420:
3800 case DRM_FORMAT_YVU420:
3801 return 2;
3802 default:
3803 return 1;
3804 }
3805}
3806EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
3807
3808/**
3809 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
3810 * @format: pixel format (DRM_FORMAT_*)
3811 *
3812 * RETURNS:
3813 * The vertical chroma subsampling factor for the
3814 * specified pixel format.
3815 */
3816int drm_format_vert_chroma_subsampling(uint32_t format)
3817{
3818 switch (format) {
3819 case DRM_FORMAT_YUV410:
3820 case DRM_FORMAT_YVU410:
3821 return 4;
3822 case DRM_FORMAT_YUV420:
3823 case DRM_FORMAT_YVU420:
3824 case DRM_FORMAT_NV12:
3825 case DRM_FORMAT_NV21:
3826 return 2;
3827 default:
3828 return 1;
3829 }
3830}
3831EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 81118893264c..3252e7067d8b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -518,7 +518,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
518 int count = 0, ro, fail = 0; 518 int count = 0, ro, fail = 0;
519 struct drm_crtc_helper_funcs *crtc_funcs; 519 struct drm_crtc_helper_funcs *crtc_funcs;
520 struct drm_mode_set save_set; 520 struct drm_mode_set save_set;
521 int ret = 0; 521 int ret;
522 int i; 522 int i;
523 523
524 DRM_DEBUG_KMS("\n"); 524 DRM_DEBUG_KMS("\n");
@@ -1023,36 +1023,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1023 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 1023 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
1024} 1024}
1025EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1025EXPORT_SYMBOL(drm_helper_hpd_irq_event);
1026
1027
1028/**
1029 * drm_format_num_planes - get the number of planes for format
1030 * @format: pixel format (DRM_FORMAT_*)
1031 *
1032 * RETURNS:
1033 * The number of planes used by the specified pixel format.
1034 */
1035int drm_format_num_planes(uint32_t format)
1036{
1037 switch (format) {
1038 case DRM_FORMAT_YUV410:
1039 case DRM_FORMAT_YVU410:
1040 case DRM_FORMAT_YUV411:
1041 case DRM_FORMAT_YVU411:
1042 case DRM_FORMAT_YUV420:
1043 case DRM_FORMAT_YVU420:
1044 case DRM_FORMAT_YUV422:
1045 case DRM_FORMAT_YVU422:
1046 case DRM_FORMAT_YUV444:
1047 case DRM_FORMAT_YVU444:
1048 return 3;
1049 case DRM_FORMAT_NV12:
1050 case DRM_FORMAT_NV21:
1051 case DRM_FORMAT_NV16:
1052 case DRM_FORMAT_NV61:
1053 return 2;
1054 default:
1055 return 1;
1056 }
1057}
1058EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..8a9d0792e4ec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) 166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167}; 169};
168 170
169#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a18b0df8285..608bddfc7e35 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -81,7 +81,7 @@ struct detailed_mode_closure {
81#define LEVEL_CVT 3 81#define LEVEL_CVT 3
82 82
83static struct edid_quirk { 83static struct edid_quirk {
84 char *vendor; 84 char vendor[4];
85 int product_id; 85 int product_id;
86 u32 quirks; 86 u32 quirks;
87} edid_quirk_list[] = { 87} edid_quirk_list[] = {
@@ -149,13 +149,13 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
149 * Sanity check the EDID block (base or extension). Return 0 if the block 149 * Sanity check the EDID block (base or extension). Return 0 if the block
150 * doesn't check out, or 1 if it's valid. 150 * doesn't check out, or 1 if it's valid.
151 */ 151 */
152bool drm_edid_block_valid(u8 *raw_edid) 152bool drm_edid_block_valid(u8 *raw_edid, int block)
153{ 153{
154 int i; 154 int i;
155 u8 csum = 0; 155 u8 csum = 0;
156 struct edid *edid = (struct edid *)raw_edid; 156 struct edid *edid = (struct edid *)raw_edid;
157 157
158 if (raw_edid[0] == 0x00) { 158 if (block == 0) {
159 int score = drm_edid_header_is_valid(raw_edid); 159 int score = drm_edid_header_is_valid(raw_edid);
160 if (score == 8) ; 160 if (score == 8) ;
161 else if (score >= 6) { 161 else if (score >= 6) {
@@ -219,7 +219,7 @@ bool drm_edid_is_valid(struct edid *edid)
219 return false; 219 return false;
220 220
221 for (i = 0; i <= edid->extensions; i++) 221 for (i = 0; i <= edid->extensions; i++)
222 if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) 222 if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
223 return false; 223 return false;
224 224
225 return true; 225 return true;
@@ -299,7 +299,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
299 for (i = 0; i < 4; i++) { 299 for (i = 0; i < 4; i++) {
300 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) 300 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
301 goto out; 301 goto out;
302 if (drm_edid_block_valid(block)) 302 if (drm_edid_block_valid(block, 0))
303 break; 303 break;
304 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { 304 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
305 connector->null_edid_counter++; 305 connector->null_edid_counter++;
@@ -324,7 +324,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
324 block + (valid_extensions + 1) * EDID_LENGTH, 324 block + (valid_extensions + 1) * EDID_LENGTH,
325 j, EDID_LENGTH)) 325 j, EDID_LENGTH))
326 goto out; 326 goto out;
327 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { 327 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
328 valid_extensions++; 328 valid_extensions++;
329 break; 329 break;
330 } 330 }
@@ -486,23 +486,47 @@ static void edid_fixup_preferred(struct drm_connector *connector,
486 preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; 486 preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
487} 487}
488 488
489static bool
490mode_is_rb(const struct drm_display_mode *mode)
491{
492 return (mode->htotal - mode->hdisplay == 160) &&
493 (mode->hsync_end - mode->hdisplay == 80) &&
494 (mode->hsync_end - mode->hsync_start == 32) &&
495 (mode->vsync_start - mode->vdisplay == 3);
496}
497
498/*
499 * drm_mode_find_dmt - Create a copy of a mode if present in DMT
500 * @dev: Device to duplicate against
501 * @hsize: Mode width
502 * @vsize: Mode height
503 * @fresh: Mode refresh rate
504 * @rb: Mode reduced-blanking-ness
505 *
506 * Walk the DMT mode list looking for a match for the given parameters.
507 * Return a newly allocated copy of the mode, or NULL if not found.
508 */
489struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, 509struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
490 int hsize, int vsize, int fresh) 510 int hsize, int vsize, int fresh,
511 bool rb)
491{ 512{
492 struct drm_display_mode *mode = NULL;
493 int i; 513 int i;
494 514
495 for (i = 0; i < drm_num_dmt_modes; i++) { 515 for (i = 0; i < drm_num_dmt_modes; i++) {
496 const struct drm_display_mode *ptr = &drm_dmt_modes[i]; 516 const struct drm_display_mode *ptr = &drm_dmt_modes[i];
497 if (hsize == ptr->hdisplay && 517 if (hsize != ptr->hdisplay)
498 vsize == ptr->vdisplay && 518 continue;
499 fresh == drm_mode_vrefresh(ptr)) { 519 if (vsize != ptr->vdisplay)
500 /* get the expected default mode */ 520 continue;
501 mode = drm_mode_duplicate(dev, ptr); 521 if (fresh != drm_mode_vrefresh(ptr))
502 break; 522 continue;
503 } 523 if (rb != mode_is_rb(ptr))
524 continue;
525
526 return drm_mode_duplicate(dev, ptr);
504 } 527 }
505 return mode; 528
529 return NULL;
506} 530}
507EXPORT_SYMBOL(drm_mode_find_dmt); 531EXPORT_SYMBOL(drm_mode_find_dmt);
508 532
@@ -731,10 +755,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
731 } 755 }
732 756
733 /* check whether it can be found in default mode table */ 757 /* check whether it can be found in default mode table */
734 mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); 758 if (drm_monitor_supports_rb(edid)) {
759 mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
760 true);
761 if (mode)
762 return mode;
763 }
764 mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
735 if (mode) 765 if (mode)
736 return mode; 766 return mode;
737 767
768 /* okay, generate it */
738 switch (timing_level) { 769 switch (timing_level) {
739 case LEVEL_DMT: 770 case LEVEL_DMT:
740 break; 771 break;
@@ -748,6 +779,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
748 * secondary GTF curve. Please don't do that. 779 * secondary GTF curve. Please don't do that.
749 */ 780 */
750 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 781 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
782 if (!mode)
783 return NULL;
751 if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { 784 if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
752 drm_mode_destroy(dev, mode); 785 drm_mode_destroy(dev, mode);
753 mode = drm_gtf_mode_complex(dev, hsize, vsize, 786 mode = drm_gtf_mode_complex(dev, hsize, vsize,
@@ -909,15 +942,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
909} 942}
910 943
911static bool 944static bool
912mode_is_rb(const struct drm_display_mode *mode)
913{
914 return (mode->htotal - mode->hdisplay == 160) &&
915 (mode->hsync_end - mode->hdisplay == 80) &&
916 (mode->hsync_end - mode->hsync_start == 32) &&
917 (mode->vsync_start - mode->vdisplay == 3);
918}
919
920static bool
921mode_in_hsync_range(const struct drm_display_mode *mode, 945mode_in_hsync_range(const struct drm_display_mode *mode,
922 struct edid *edid, u8 *t) 946 struct edid *edid, u8 *t)
923{ 947{
@@ -994,12 +1018,8 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
994 return true; 1018 return true;
995} 1019}
996 1020
997/*
998 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
999 * need to account for them.
1000 */
1001static int 1021static int
1002drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, 1022drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1003 struct detailed_timing *timing) 1023 struct detailed_timing *timing)
1004{ 1024{
1005 int i, modes = 0; 1025 int i, modes = 0;
@@ -1019,17 +1039,110 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1019 return modes; 1039 return modes;
1020} 1040}
1021 1041
1042/* fix up 1366x768 mode from 1368x768;
1043 * GFT/CVT can't express 1366 width which isn't dividable by 8
1044 */
1045static void fixup_mode_1366x768(struct drm_display_mode *mode)
1046{
1047 if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
1048 mode->hdisplay = 1366;
1049 mode->hsync_start--;
1050 mode->hsync_end--;
1051 drm_mode_set_name(mode);
1052 }
1053}
1054
1055static int
1056drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1057 struct detailed_timing *timing)
1058{
1059 int i, modes = 0;
1060 struct drm_display_mode *newmode;
1061 struct drm_device *dev = connector->dev;
1062
1063 for (i = 0; i < num_extra_modes; i++) {
1064 const struct minimode *m = &extra_modes[i];
1065 newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
1066 if (!newmode)
1067 return modes;
1068
1069 fixup_mode_1366x768(newmode);
1070 if (!mode_in_range(newmode, edid, timing)) {
1071 drm_mode_destroy(dev, newmode);
1072 continue;
1073 }
1074
1075 drm_mode_probed_add(connector, newmode);
1076 modes++;
1077 }
1078
1079 return modes;
1080}
1081
1082static int
1083drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1084 struct detailed_timing *timing)
1085{
1086 int i, modes = 0;
1087 struct drm_display_mode *newmode;
1088 struct drm_device *dev = connector->dev;
1089 bool rb = drm_monitor_supports_rb(edid);
1090
1091 for (i = 0; i < num_extra_modes; i++) {
1092 const struct minimode *m = &extra_modes[i];
1093 newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
1094 if (!newmode)
1095 return modes;
1096
1097 fixup_mode_1366x768(newmode);
1098 if (!mode_in_range(newmode, edid, timing)) {
1099 drm_mode_destroy(dev, newmode);
1100 continue;
1101 }
1102
1103 drm_mode_probed_add(connector, newmode);
1104 modes++;
1105 }
1106
1107 return modes;
1108}
1109
1022static void 1110static void
1023do_inferred_modes(struct detailed_timing *timing, void *c) 1111do_inferred_modes(struct detailed_timing *timing, void *c)
1024{ 1112{
1025 struct detailed_mode_closure *closure = c; 1113 struct detailed_mode_closure *closure = c;
1026 struct detailed_non_pixel *data = &timing->data.other_data; 1114 struct detailed_non_pixel *data = &timing->data.other_data;
1027 int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); 1115 struct detailed_data_monitor_range *range = &data->data.range;
1116
1117 if (data->type != EDID_DETAIL_MONITOR_RANGE)
1118 return;
1028 1119
1029 if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE) 1120 closure->modes += drm_dmt_modes_for_range(closure->connector,
1121 closure->edid,
1122 timing);
1123
1124 if (!version_greater(closure->edid, 1, 1))
1125 return; /* GTF not defined yet */
1126
1127 switch (range->flags) {
1128 case 0x02: /* secondary gtf, XXX could do more */
1129 case 0x00: /* default gtf */
1030 closure->modes += drm_gtf_modes_for_range(closure->connector, 1130 closure->modes += drm_gtf_modes_for_range(closure->connector,
1031 closure->edid, 1131 closure->edid,
1032 timing); 1132 timing);
1133 break;
1134 case 0x04: /* cvt, only in 1.4+ */
1135 if (!version_greater(closure->edid, 1, 3))
1136 break;
1137
1138 closure->modes += drm_cvt_modes_for_range(closure->connector,
1139 closure->edid,
1140 timing);
1141 break;
1142 case 0x01: /* just the ranges, no formula */
1143 default:
1144 break;
1145 }
1033} 1146}
1034 1147
1035static int 1148static int
@@ -1062,8 +1175,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
1062 mode = drm_mode_find_dmt(connector->dev, 1175 mode = drm_mode_find_dmt(connector->dev,
1063 est3_modes[m].w, 1176 est3_modes[m].w,
1064 est3_modes[m].h, 1177 est3_modes[m].h,
1065 est3_modes[m].r 1178 est3_modes[m].r,
1066 /*, est3_modes[m].rb */); 1179 est3_modes[m].rb);
1067 if (mode) { 1180 if (mode) {
1068 drm_mode_probed_add(connector, mode); 1181 drm_mode_probed_add(connector, mode);
1069 modes++; 1182 modes++;
@@ -1312,6 +1425,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
1312#define VENDOR_BLOCK 0x03 1425#define VENDOR_BLOCK 0x03
1313#define SPEAKER_BLOCK 0x04 1426#define SPEAKER_BLOCK 0x04
1314#define EDID_BASIC_AUDIO (1 << 6) 1427#define EDID_BASIC_AUDIO (1 << 6)
1428#define EDID_CEA_YCRCB444 (1 << 5)
1429#define EDID_CEA_YCRCB422 (1 << 4)
1315 1430
1316/** 1431/**
1317 * Search EDID for CEA extension block. 1432 * Search EDID for CEA extension block.
@@ -1666,13 +1781,29 @@ static void drm_add_display_info(struct edid *edid,
1666 info->bpc = 0; 1781 info->bpc = 0;
1667 info->color_formats = 0; 1782 info->color_formats = 0;
1668 1783
1669 /* Only defined for 1.4 with digital displays */ 1784 if (edid->revision < 3)
1670 if (edid->revision < 4)
1671 return; 1785 return;
1672 1786
1673 if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) 1787 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
1674 return; 1788 return;
1675 1789
1790 /* Get data from CEA blocks if present */
1791 edid_ext = drm_find_cea_extension(edid);
1792 if (edid_ext) {
1793 info->cea_rev = edid_ext[1];
1794
1795 /* The existence of a CEA block should imply RGB support */
1796 info->color_formats = DRM_COLOR_FORMAT_RGB444;
1797 if (edid_ext[3] & EDID_CEA_YCRCB444)
1798 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
1799 if (edid_ext[3] & EDID_CEA_YCRCB422)
1800 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
1801 }
1802
1803 /* Only defined for 1.4 with digital displays */
1804 if (edid->revision < 4)
1805 return;
1806
1676 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { 1807 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
1677 case DRM_EDID_DIGITAL_DEPTH_6: 1808 case DRM_EDID_DIGITAL_DEPTH_6:
1678 info->bpc = 6; 1809 info->bpc = 6;
@@ -1698,18 +1829,11 @@ static void drm_add_display_info(struct edid *edid,
1698 break; 1829 break;
1699 } 1830 }
1700 1831
1701 info->color_formats = DRM_COLOR_FORMAT_RGB444; 1832 info->color_formats |= DRM_COLOR_FORMAT_RGB444;
1702 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444) 1833 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
1703 info->color_formats = DRM_COLOR_FORMAT_YCRCB444; 1834 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
1704 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) 1835 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
1705 info->color_formats = DRM_COLOR_FORMAT_YCRCB422; 1836 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
1706
1707 /* Get data from CEA blocks if present */
1708 edid_ext = drm_find_cea_extension(edid);
1709 if (!edid_ext)
1710 return;
1711
1712 info->cea_rev = edid_ext[1];
1713} 1837}
1714 1838
1715/** 1839/**
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index da9acba2dd6c..66d4a28ad5a2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -173,7 +173,7 @@ static int edid_load(struct drm_connector *connector, char *name,
173 } 173 }
174 memcpy(edid, fwdata, fwsize); 174 memcpy(edid, fwdata, fwsize);
175 175
176 if (!drm_edid_block_valid(edid)) { 176 if (!drm_edid_block_valid(edid, 0)) {
177 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", 177 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
178 name); 178 name);
179 kfree(edid); 179 kfree(edid);
@@ -185,7 +185,7 @@ static int edid_load(struct drm_connector *connector, char *name,
185 if (i != valid_extensions + 1) 185 if (i != valid_extensions + 1)
186 memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, 186 memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
187 edid + i * EDID_LENGTH, EDID_LENGTH); 187 edid + i * EDID_LENGTH, EDID_LENGTH);
188 if (drm_edid_block_valid(edid + i * EDID_LENGTH)) 188 if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
189 valid_extensions++; 189 valid_extensions++;
190 } 190 }
191 191
@@ -220,18 +220,18 @@ int drm_load_edid_firmware(struct drm_connector *connector)
220{ 220{
221 char *connector_name = drm_get_connector_name(connector); 221 char *connector_name = drm_get_connector_name(connector);
222 char *edidname = edid_firmware, *last, *colon; 222 char *edidname = edid_firmware, *last, *colon;
223 int ret = 0; 223 int ret;
224 224
225 if (*edidname == '\0') 225 if (*edidname == '\0')
226 return ret; 226 return 0;
227 227
228 colon = strchr(edidname, ':'); 228 colon = strchr(edidname, ':');
229 if (colon != NULL) { 229 if (colon != NULL) {
230 if (strncmp(connector_name, edidname, colon - edidname)) 230 if (strncmp(connector_name, edidname, colon - edidname))
231 return ret; 231 return 0;
232 edidname = colon + 1; 232 edidname = colon + 1;
233 if (*edidname == '\0') 233 if (*edidname == '\0')
234 return ret; 234 return 0;
235 } 235 }
236 236
237 last = edidname + strlen(edidname) - 1; 237 last = edidname + strlen(edidname) - 1;
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index a91ffb117220..ff98a7eb38dd 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -30,7 +30,6 @@
30/* 30/*
31 * Autogenerated from the DMT spec. 31 * Autogenerated from the DMT spec.
32 * This table is copied from xfree86/modes/xf86EdidModes.c. 32 * This table is copied from xfree86/modes/xf86EdidModes.c.
33 * But the mode with Reduced blank feature is deleted.
34 */ 33 */
35static const struct drm_display_mode drm_dmt_modes[] = { 34static const struct drm_display_mode drm_dmt_modes[] = {
36 /* 640x350@85Hz */ 35 /* 640x350@85Hz */
@@ -81,6 +80,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
81 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, 80 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
82 896, 1048, 0, 600, 601, 604, 631, 0, 81 896, 1048, 0, 600, 601, 604, 631, 0,
83 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 82 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
83 /* 800x600@120Hz RB */
84 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
85 880, 960, 0, 600, 603, 607, 636, 0,
86 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
84 /* 848x480@60Hz */ 87 /* 848x480@60Hz */
85 { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, 88 { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
86 976, 1088, 0, 480, 486, 494, 517, 0, 89 976, 1088, 0, 480, 486, 494, 517, 0,
@@ -106,10 +109,18 @@ static const struct drm_display_mode drm_dmt_modes[] = {
106 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 109 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
107 1168, 1376, 0, 768, 769, 772, 808, 0, 110 1168, 1376, 0, 768, 769, 772, 808, 0,
108 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 111 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
112 /* 1024x768@120Hz RB */
113 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
114 1104, 1184, 0, 768, 771, 775, 813, 0,
115 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
109 /* 1152x864@75Hz */ 116 /* 1152x864@75Hz */
110 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 117 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
111 1344, 1600, 0, 864, 865, 868, 900, 0, 118 1344, 1600, 0, 864, 865, 868, 900, 0,
112 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 119 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
120 /* 1280x768@60Hz RB */
121 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
122 1360, 1440, 0, 768, 771, 778, 790, 0,
123 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
113 /* 1280x768@60Hz */ 124 /* 1280x768@60Hz */
114 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 125 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
115 1472, 1664, 0, 768, 771, 778, 798, 0, 126 1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -122,6 +133,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
122 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, 133 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
123 1496, 1712, 0, 768, 771, 778, 809, 0, 134 1496, 1712, 0, 768, 771, 778, 809, 0,
124 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 135 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
136 /* 1280x768@120Hz RB */
137 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
138 1360, 1440, 0, 768, 771, 778, 813, 0,
139 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
140 /* 1280x800@60Hz RB */
141 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
142 1360, 1440, 0, 800, 803, 809, 823, 0,
143 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
125 /* 1280x800@60Hz */ 144 /* 1280x800@60Hz */
126 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 145 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
127 1480, 1680, 0, 800, 803, 809, 831, 0, 146 1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -134,6 +153,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
134 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, 153 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
135 1496, 1712, 0, 800, 803, 809, 843, 0, 154 1496, 1712, 0, 800, 803, 809, 843, 0,
136 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 155 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
156 /* 1280x800@120Hz RB */
157 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
158 1360, 1440, 0, 800, 803, 809, 847, 0,
159 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
137 /* 1280x960@60Hz */ 160 /* 1280x960@60Hz */
138 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 161 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
139 1488, 1800, 0, 960, 961, 964, 1000, 0, 162 1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -142,6 +165,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
142 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, 165 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
143 1504, 1728, 0, 960, 961, 964, 1011, 0, 166 1504, 1728, 0, 960, 961, 964, 1011, 0,
144 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 167 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
168 /* 1280x960@120Hz RB */
169 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
170 1360, 1440, 0, 960, 963, 967, 1017, 0,
171 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
145 /* 1280x1024@60Hz */ 172 /* 1280x1024@60Hz */
146 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 173 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
147 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 174 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -154,22 +181,42 @@ static const struct drm_display_mode drm_dmt_modes[] = {
154 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, 181 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
155 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, 182 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
156 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 183 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
184 /* 1280x1024@120Hz RB */
185 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
186 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
187 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
157 /* 1360x768@60Hz */ 188 /* 1360x768@60Hz */
158 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 189 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
159 1536, 1792, 0, 768, 771, 777, 795, 0, 190 1536, 1792, 0, 768, 771, 777, 795, 0,
160 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 191 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
161 /* 1440x1050@60Hz */ 192 /* 1360x768@120Hz RB */
193 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
194 1440, 1520, 0, 768, 771, 776, 813, 0,
195 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
196 /* 1400x1050@60Hz RB */
197 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
198 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
199 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
200 /* 1400x1050@60Hz */
162 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 201 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
163 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 202 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
164 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 203 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
165 /* 1440x1050@75Hz */ 204 /* 1400x1050@75Hz */
166 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, 205 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
167 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, 206 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
168 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 207 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
169 /* 1440x1050@85Hz */ 208 /* 1400x1050@85Hz */
170 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 209 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
171 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, 210 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
172 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 211 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
212 /* 1400x1050@120Hz RB */
213 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
214 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
215 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
216 /* 1440x900@60Hz RB */
217 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
218 1520, 1600, 0, 900, 903, 909, 926, 0,
219 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
173 /* 1440x900@60Hz */ 220 /* 1440x900@60Hz */
174 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 221 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
175 1672, 1904, 0, 900, 903, 909, 934, 0, 222 1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -182,6 +229,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
182 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, 229 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
183 1696, 1952, 0, 900, 903, 909, 948, 0, 230 1696, 1952, 0, 900, 903, 909, 948, 0,
184 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 231 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
232 /* 1440x900@120Hz RB */
233 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
234 1520, 1600, 0, 900, 903, 909, 953, 0,
235 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
185 /* 1600x1200@60Hz */ 236 /* 1600x1200@60Hz */
186 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 237 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
187 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 238 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -202,6 +253,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
202 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, 253 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
203 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 254 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
204 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 255 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
256 /* 1600x1200@120Hz RB */
257 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
258 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
259 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
260 /* 1680x1050@60Hz RB */
261 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
262 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
263 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
205 /* 1680x1050@60Hz */ 264 /* 1680x1050@60Hz */
206 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 265 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
207 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 266 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -214,15 +273,23 @@ static const struct drm_display_mode drm_dmt_modes[] = {
214 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, 273 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
215 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, 274 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
216 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 275 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
276 /* 1680x1050@120Hz RB */
277 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
278 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
279 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
217 /* 1792x1344@60Hz */ 280 /* 1792x1344@60Hz */
218 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 281 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
219 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 282 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
220 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 283 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
221 /* 1729x1344@75Hz */ 284 /* 1792x1344@75Hz */
222 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 285 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
223 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, 286 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
224 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 287 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
225 /* 1853x1392@60Hz */ 288 /* 1792x1344@120Hz RB */
289 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
290 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
291 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
292 /* 1856x1392@60Hz */
226 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 293 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
227 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 294 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
228 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 295 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -230,6 +297,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
230 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, 297 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
231 2208, 2560, 0, 1392, 1395, 1399, 1500, 0, 298 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
232 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 299 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
300 /* 1856x1392@120Hz RB */
301 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
302 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
303 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
304 /* 1920x1200@60Hz RB */
305 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
306 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
307 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
233 /* 1920x1200@60Hz */ 308 /* 1920x1200@60Hz */
234 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 309 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
235 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 310 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -242,6 +317,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
242 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, 317 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
243 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, 318 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
244 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 319 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
320 /* 1920x1200@120Hz RB */
321 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
322 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
323 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
245 /* 1920x1440@60Hz */ 324 /* 1920x1440@60Hz */
246 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 325 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
247 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 326 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -250,6 +329,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
250 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, 329 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
251 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, 330 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
252 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 331 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
332 /* 1920x1440@120Hz RB */
333 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
334 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
336 /* 2560x1600@60Hz RB */
337 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
338 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
253 /* 2560x1600@60Hz */ 340 /* 2560x1600@60Hz */
254 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 341 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
255 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 342 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -262,6 +349,11 @@ static const struct drm_display_mode drm_dmt_modes[] = {
262 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, 349 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
263 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, 350 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
264 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 351 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
352 /* 2560x1600@120Hz RB */
353 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
354 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
356
265}; 357};
266static const int drm_num_dmt_modes = 358static const int drm_num_dmt_modes =
267 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); 359 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -320,12 +412,14 @@ static const struct drm_display_mode edid_est_modes[] = {
320 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ 412 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
321}; 413};
322 414
323static const struct { 415struct minimode {
324 short w; 416 short w;
325 short h; 417 short h;
326 short r; 418 short r;
327 short rb; 419 short rb;
328} est3_modes[] = { 420};
421
422static const struct minimode est3_modes[] = {
329 /* byte 6 */ 423 /* byte 6 */
330 { 640, 350, 85, 0 }, 424 { 640, 350, 85, 0 },
331 { 640, 400, 85, 0 }, 425 { 640, 400, 85, 0 },
@@ -377,288 +471,304 @@ static const struct {
377 { 1920, 1440, 60, 0 }, 471 { 1920, 1440, 60, 0 },
378 { 1920, 1440, 75, 0 }, 472 { 1920, 1440, 75, 0 },
379}; 473};
380static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); 474static const int num_est3_modes = ARRAY_SIZE(est3_modes);
475
476static const struct minimode extra_modes[] = {
477 { 1024, 576, 60, 0 },
478 { 1366, 768, 60, 0 },
479 { 1600, 900, 60, 0 },
480 { 1680, 945, 60, 0 },
481 { 1920, 1080, 60, 0 },
482 { 2048, 1152, 60, 0 },
483 { 2048, 1536, 60, 0 },
484};
485static const int num_extra_modes = ARRAY_SIZE(extra_modes);
381 486
382/* 487/*
383 * Probably taken from CEA-861 spec. 488 * Probably taken from CEA-861 spec.
384 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. 489 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
385 */ 490 */
386static const struct drm_display_mode edid_cea_modes[] = { 491static const struct drm_display_mode edid_cea_modes[] = {
387 /* 640x480@60Hz */ 492 /* 1 - 640x480@60Hz */
388 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 493 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
389 752, 800, 0, 480, 490, 492, 525, 0, 494 752, 800, 0, 480, 490, 492, 525, 0,
390 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 495 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
391 /* 720x480@60Hz */ 496 /* 2 - 720x480@60Hz */
392 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 497 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
393 798, 858, 0, 480, 489, 495, 525, 0, 498 798, 858, 0, 480, 489, 495, 525, 0,
394 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 499 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
395 /* 720x480@60Hz */ 500 /* 3 - 720x480@60Hz */
396 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 501 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
397 798, 858, 0, 480, 489, 495, 525, 0, 502 798, 858, 0, 480, 489, 495, 525, 0,
398 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 503 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
399 /* 1280x720@60Hz */ 504 /* 4 - 1280x720@60Hz */
400 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 505 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
401 1430, 1650, 0, 720, 725, 730, 750, 0, 506 1430, 1650, 0, 720, 725, 730, 750, 0,
402 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 507 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
403 /* 1920x1080i@60Hz */ 508 /* 5 - 1920x1080i@60Hz */
404 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 509 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
405 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 510 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
406 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 511 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
407 DRM_MODE_FLAG_INTERLACE) }, 512 DRM_MODE_FLAG_INTERLACE) },
408 /* 1440x480i@60Hz */ 513 /* 6 - 1440x480i@60Hz */
409 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 514 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
410 1602, 1716, 0, 480, 488, 494, 525, 0, 515 1602, 1716, 0, 480, 488, 494, 525, 0,
411 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
412 DRM_MODE_FLAG_INTERLACE) }, 517 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
413 /* 1440x480i@60Hz */ 518 /* 7 - 1440x480i@60Hz */
414 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 519 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
415 1602, 1716, 0, 480, 488, 494, 525, 0, 520 1602, 1716, 0, 480, 488, 494, 525, 0,
416 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 521 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
417 DRM_MODE_FLAG_INTERLACE) }, 522 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
418 /* 1440x240@60Hz */ 523 /* 8 - 1440x240@60Hz */
419 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 524 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
420 1602, 1716, 0, 240, 244, 247, 262, 0, 525 1602, 1716, 0, 240, 244, 247, 262, 0,
421 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
422 /* 1440x240@60Hz */ 527 DRM_MODE_FLAG_DBLCLK) },
528 /* 9 - 1440x240@60Hz */
423 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 529 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
424 1602, 1716, 0, 240, 244, 247, 262, 0, 530 1602, 1716, 0, 240, 244, 247, 262, 0,
425 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 531 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
426 /* 2880x480i@60Hz */ 532 DRM_MODE_FLAG_DBLCLK) },
533 /* 10 - 2880x480i@60Hz */
427 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 534 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
428 3204, 3432, 0, 480, 488, 494, 525, 0, 535 3204, 3432, 0, 480, 488, 494, 525, 0,
429 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
430 DRM_MODE_FLAG_INTERLACE) }, 537 DRM_MODE_FLAG_INTERLACE) },
431 /* 2880x480i@60Hz */ 538 /* 11 - 2880x480i@60Hz */
432 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 539 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
433 3204, 3432, 0, 480, 488, 494, 525, 0, 540 3204, 3432, 0, 480, 488, 494, 525, 0,
434 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 541 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
435 DRM_MODE_FLAG_INTERLACE) }, 542 DRM_MODE_FLAG_INTERLACE) },
436 /* 2880x240@60Hz */ 543 /* 12 - 2880x240@60Hz */
437 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 544 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
438 3204, 3432, 0, 240, 244, 247, 262, 0, 545 3204, 3432, 0, 240, 244, 247, 262, 0,
439 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 546 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
440 /* 2880x240@60Hz */ 547 /* 13 - 2880x240@60Hz */
441 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 548 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
442 3204, 3432, 0, 240, 244, 247, 262, 0, 549 3204, 3432, 0, 240, 244, 247, 262, 0,
443 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 550 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
444 /* 1440x480@60Hz */ 551 /* 14 - 1440x480@60Hz */
445 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 552 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
446 1596, 1716, 0, 480, 489, 495, 525, 0, 553 1596, 1716, 0, 480, 489, 495, 525, 0,
447 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 554 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
448 /* 1440x480@60Hz */ 555 /* 15 - 1440x480@60Hz */
449 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 556 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
450 1596, 1716, 0, 480, 489, 495, 525, 0, 557 1596, 1716, 0, 480, 489, 495, 525, 0,
451 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 558 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
452 /* 1920x1080@60Hz */ 559 /* 16 - 1920x1080@60Hz */
453 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 560 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
454 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 561 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
455 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 562 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
456 /* 720x576@50Hz */ 563 /* 17 - 720x576@50Hz */
457 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 564 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
458 796, 864, 0, 576, 581, 586, 625, 0, 565 796, 864, 0, 576, 581, 586, 625, 0,
459 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 566 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
460 /* 720x576@50Hz */ 567 /* 18 - 720x576@50Hz */
461 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 568 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
462 796, 864, 0, 576, 581, 586, 625, 0, 569 796, 864, 0, 576, 581, 586, 625, 0,
463 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 570 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
464 /* 1280x720@50Hz */ 571 /* 19 - 1280x720@50Hz */
465 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 572 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
466 1760, 1980, 0, 720, 725, 730, 750, 0, 573 1760, 1980, 0, 720, 725, 730, 750, 0,
467 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 574 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
468 /* 1920x1080i@50Hz */ 575 /* 20 - 1920x1080i@50Hz */
469 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 576 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
470 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 577 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
471 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 578 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
472 DRM_MODE_FLAG_INTERLACE) }, 579 DRM_MODE_FLAG_INTERLACE) },
473 /* 1440x576i@50Hz */ 580 /* 21 - 1440x576i@50Hz */
474 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 581 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
475 1590, 1728, 0, 576, 580, 586, 625, 0, 582 1590, 1728, 0, 576, 580, 586, 625, 0,
476 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 583 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
477 DRM_MODE_FLAG_INTERLACE) }, 584 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
478 /* 1440x576i@50Hz */ 585 /* 22 - 1440x576i@50Hz */
479 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 586 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
480 1590, 1728, 0, 576, 580, 586, 625, 0, 587 1590, 1728, 0, 576, 580, 586, 625, 0,
481 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 588 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
482 DRM_MODE_FLAG_INTERLACE) }, 589 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
483 /* 1440x288@50Hz */ 590 /* 23 - 1440x288@50Hz */
484 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 591 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
485 1590, 1728, 0, 288, 290, 293, 312, 0, 592 1590, 1728, 0, 288, 290, 293, 312, 0,
486 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 593 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
487 /* 1440x288@50Hz */ 594 DRM_MODE_FLAG_DBLCLK) },
595 /* 24 - 1440x288@50Hz */
488 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 596 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
489 1590, 1728, 0, 288, 290, 293, 312, 0, 597 1590, 1728, 0, 288, 290, 293, 312, 0,
490 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
491 /* 2880x576i@50Hz */ 599 DRM_MODE_FLAG_DBLCLK) },
600 /* 25 - 2880x576i@50Hz */
492 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 601 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
493 3180, 3456, 0, 576, 580, 586, 625, 0, 602 3180, 3456, 0, 576, 580, 586, 625, 0,
494 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 603 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
495 DRM_MODE_FLAG_INTERLACE) }, 604 DRM_MODE_FLAG_INTERLACE) },
496 /* 2880x576i@50Hz */ 605 /* 26 - 2880x576i@50Hz */
497 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 606 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
498 3180, 3456, 0, 576, 580, 586, 625, 0, 607 3180, 3456, 0, 576, 580, 586, 625, 0,
499 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 608 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
500 DRM_MODE_FLAG_INTERLACE) }, 609 DRM_MODE_FLAG_INTERLACE) },
501 /* 2880x288@50Hz */ 610 /* 27 - 2880x288@50Hz */
502 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 611 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
503 3180, 3456, 0, 288, 290, 293, 312, 0, 612 3180, 3456, 0, 288, 290, 293, 312, 0,
504 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 613 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
505 /* 2880x288@50Hz */ 614 /* 28 - 2880x288@50Hz */
506 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 615 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
507 3180, 3456, 0, 288, 290, 293, 312, 0, 616 3180, 3456, 0, 288, 290, 293, 312, 0,
508 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 617 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
509 /* 1440x576@50Hz */ 618 /* 29 - 1440x576@50Hz */
510 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 619 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
511 1592, 1728, 0, 576, 581, 586, 625, 0, 620 1592, 1728, 0, 576, 581, 586, 625, 0,
512 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 621 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
513 /* 1440x576@50Hz */ 622 /* 30 - 1440x576@50Hz */
514 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 623 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
515 1592, 1728, 0, 576, 581, 586, 625, 0, 624 1592, 1728, 0, 576, 581, 586, 625, 0,
516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 625 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
517 /* 1920x1080@50Hz */ 626 /* 31 - 1920x1080@50Hz */
518 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 627 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
519 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 628 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
520 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 629 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
521 /* 1920x1080@24Hz */ 630 /* 32 - 1920x1080@24Hz */
522 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 631 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
523 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, 632 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
524 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 633 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
525 /* 1920x1080@25Hz */ 634 /* 33 - 1920x1080@25Hz */
526 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 635 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
527 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 636 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
528 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 637 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
529 /* 1920x1080@30Hz */ 638 /* 34 - 1920x1080@30Hz */
530 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 639 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
531 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 640 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
532 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 641 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
533 /* 2880x480@60Hz */ 642 /* 35 - 2880x480@60Hz */
534 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 643 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
535 3192, 3432, 0, 480, 489, 495, 525, 0, 644 3192, 3432, 0, 480, 489, 495, 525, 0,
536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
537 /* 2880x480@60Hz */ 646 /* 36 - 2880x480@60Hz */
538 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 647 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
539 3192, 3432, 0, 480, 489, 495, 525, 0, 648 3192, 3432, 0, 480, 489, 495, 525, 0,
540 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
541 /* 2880x576@50Hz */ 650 /* 37 - 2880x576@50Hz */
542 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 651 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
543 3184, 3456, 0, 576, 581, 586, 625, 0, 652 3184, 3456, 0, 576, 581, 586, 625, 0,
544 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
545 /* 2880x576@50Hz */ 654 /* 38 - 2880x576@50Hz */
546 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 655 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
547 3184, 3456, 0, 576, 581, 586, 625, 0, 656 3184, 3456, 0, 576, 581, 586, 625, 0,
548 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
549 /* 1920x1080i@50Hz */ 658 /* 39 - 1920x1080i@50Hz */
550 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 659 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
551 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, 660 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
552 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | 661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
553 DRM_MODE_FLAG_INTERLACE) }, 662 DRM_MODE_FLAG_INTERLACE) },
554 /* 1920x1080i@100Hz */ 663 /* 40 - 1920x1080i@100Hz */
555 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 664 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
556 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 665 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
557 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 666 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
558 DRM_MODE_FLAG_INTERLACE) }, 667 DRM_MODE_FLAG_INTERLACE) },
559 /* 1280x720@100Hz */ 668 /* 41 - 1280x720@100Hz */
560 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 669 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
561 1760, 1980, 0, 720, 725, 730, 750, 0, 670 1760, 1980, 0, 720, 725, 730, 750, 0,
562 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 671 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
563 /* 720x576@100Hz */ 672 /* 42 - 720x576@100Hz */
564 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 673 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
565 796, 864, 0, 576, 581, 586, 625, 0, 674 796, 864, 0, 576, 581, 586, 625, 0,
566 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 675 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
567 /* 720x576@100Hz */ 676 /* 43 - 720x576@100Hz */
568 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 677 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
569 796, 864, 0, 576, 581, 586, 625, 0, 678 796, 864, 0, 576, 581, 586, 625, 0,
570 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 679 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
571 /* 1440x576i@100Hz */ 680 /* 44 - 1440x576i@100Hz */
572 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 681 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
573 1590, 1728, 0, 576, 580, 586, 625, 0, 682 1590, 1728, 0, 576, 580, 586, 625, 0,
574 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 683 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
575 /* 1440x576i@100Hz */ 684 DRM_MODE_FLAG_DBLCLK) },
685 /* 45 - 1440x576i@100Hz */
576 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 686 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
577 1590, 1728, 0, 576, 580, 586, 625, 0, 687 1590, 1728, 0, 576, 580, 586, 625, 0,
578 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
579 /* 1920x1080i@120Hz */ 689 DRM_MODE_FLAG_DBLCLK) },
690 /* 46 - 1920x1080i@120Hz */
580 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 691 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
581 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 692 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
582 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 693 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
583 DRM_MODE_FLAG_INTERLACE) }, 694 DRM_MODE_FLAG_INTERLACE) },
584 /* 1280x720@120Hz */ 695 /* 47 - 1280x720@120Hz */
585 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 696 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
586 1430, 1650, 0, 720, 725, 730, 750, 0, 697 1430, 1650, 0, 720, 725, 730, 750, 0,
587 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 698 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
588 /* 720x480@120Hz */ 699 /* 48 - 720x480@120Hz */
589 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 700 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
590 798, 858, 0, 480, 489, 495, 525, 0, 701 798, 858, 0, 480, 489, 495, 525, 0,
591 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 702 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
592 /* 720x480@120Hz */ 703 /* 49 - 720x480@120Hz */
593 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 704 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
594 798, 858, 0, 480, 489, 495, 525, 0, 705 798, 858, 0, 480, 489, 495, 525, 0,
595 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 706 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
596 /* 1440x480i@120Hz */ 707 /* 50 - 1440x480i@120Hz */
597 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 708 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
598 1602, 1716, 0, 480, 488, 494, 525, 0, 709 1602, 1716, 0, 480, 488, 494, 525, 0,
599 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
600 DRM_MODE_FLAG_INTERLACE) }, 711 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
601 /* 1440x480i@120Hz */ 712 /* 51 - 1440x480i@120Hz */
602 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 713 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
603 1602, 1716, 0, 480, 488, 494, 525, 0, 714 1602, 1716, 0, 480, 488, 494, 525, 0,
604 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 715 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
605 DRM_MODE_FLAG_INTERLACE) }, 716 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
606 /* 720x576@200Hz */ 717 /* 52 - 720x576@200Hz */
607 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 718 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
608 796, 864, 0, 576, 581, 586, 625, 0, 719 796, 864, 0, 576, 581, 586, 625, 0,
609 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
610 /* 720x576@200Hz */ 721 /* 53 - 720x576@200Hz */
611 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 722 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
612 796, 864, 0, 576, 581, 586, 625, 0, 723 796, 864, 0, 576, 581, 586, 625, 0,
613 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 724 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
614 /* 1440x576i@200Hz */ 725 /* 54 - 1440x576i@200Hz */
615 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 726 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
616 1590, 1728, 0, 576, 580, 586, 625, 0, 727 1590, 1728, 0, 576, 580, 586, 625, 0,
617 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
618 DRM_MODE_FLAG_INTERLACE) }, 729 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
619 /* 1440x576i@200Hz */ 730 /* 55 - 1440x576i@200Hz */
620 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 731 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
621 1590, 1728, 0, 576, 580, 586, 625, 0, 732 1590, 1728, 0, 576, 580, 586, 625, 0,
622 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 733 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
623 DRM_MODE_FLAG_INTERLACE) }, 734 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
624 /* 720x480@240Hz */ 735 /* 56 - 720x480@240Hz */
625 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 736 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
626 798, 858, 0, 480, 489, 495, 525, 0, 737 798, 858, 0, 480, 489, 495, 525, 0,
627 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 738 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
628 /* 720x480@240Hz */ 739 /* 57 - 720x480@240Hz */
629 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 740 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
630 798, 858, 0, 480, 489, 495, 525, 0, 741 798, 858, 0, 480, 489, 495, 525, 0,
631 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 742 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
632 /* 1440x480i@240 */ 743 /* 58 - 1440x480i@240 */
633 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 744 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
634 1602, 1716, 0, 480, 488, 494, 525, 0, 745 1602, 1716, 0, 480, 488, 494, 525, 0,
635 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 746 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
636 DRM_MODE_FLAG_INTERLACE) }, 747 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
637 /* 1440x480i@240 */ 748 /* 59 - 1440x480i@240 */
638 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 749 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
639 1602, 1716, 0, 480, 488, 494, 525, 0, 750 1602, 1716, 0, 480, 488, 494, 525, 0,
640 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 751 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
641 DRM_MODE_FLAG_INTERLACE) }, 752 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
642 /* 1280x720@24Hz */ 753 /* 60 - 1280x720@24Hz */
643 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 754 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
644 3080, 3300, 0, 720, 725, 730, 750, 0, 755 3080, 3300, 0, 720, 725, 730, 750, 0,
645 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 756 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
646 /* 1280x720@25Hz */ 757 /* 61 - 1280x720@25Hz */
647 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 758 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
648 3740, 3960, 0, 720, 725, 730, 750, 0, 759 3740, 3960, 0, 720, 725, 730, 750, 0,
649 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 760 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
650 /* 1280x720@30Hz */ 761 /* 62 - 1280x720@30Hz */
651 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 762 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
652 3080, 3300, 0, 720, 725, 730, 750, 0, 763 3080, 3300, 0, 720, 725, 730, 750, 0,
653 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 764 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
654 /* 1920x1080@120Hz */ 765 /* 63 - 1920x1080@120Hz */
655 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 766 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
656 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 767 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
657 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 768 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
658 /* 1920x1080@100Hz */ 769 /* 64 - 1920x1080@100Hz */
659 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 770 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
660 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 771 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 772 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
662}; 773};
663static const int drm_num_cea_modes = 774static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
664 sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0d6e894d97c..5683b7fdd746 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -136,6 +136,9 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
136{ 136{
137 uint16_t *r_base, *g_base, *b_base; 137 uint16_t *r_base, *g_base, *b_base;
138 138
139 if (crtc->funcs->gamma_set == NULL)
140 return;
141
139 r_base = crtc->gamma_store; 142 r_base = crtc->gamma_store;
140 g_base = r_base + crtc->gamma_size; 143 g_base = r_base + crtc->gamma_size;
141 b_base = g_base + crtc->gamma_size; 144 b_base = g_base + crtc->gamma_size;
@@ -383,7 +386,6 @@ int drm_fb_helper_init(struct drm_device *dev,
383 int crtc_count, int max_conn_count) 386 int crtc_count, int max_conn_count)
384{ 387{
385 struct drm_crtc *crtc; 388 struct drm_crtc *crtc;
386 int ret = 0;
387 int i; 389 int i;
388 390
389 fb_helper->dev = dev; 391 fb_helper->dev = dev;
@@ -408,10 +410,8 @@ int drm_fb_helper_init(struct drm_device *dev,
408 sizeof(struct drm_connector *), 410 sizeof(struct drm_connector *),
409 GFP_KERNEL); 411 GFP_KERNEL);
410 412
411 if (!fb_helper->crtc_info[i].mode_set.connectors) { 413 if (!fb_helper->crtc_info[i].mode_set.connectors)
412 ret = -ENOMEM;
413 goto out_free; 414 goto out_free;
414 }
415 fb_helper->crtc_info[i].mode_set.num_connectors = 0; 415 fb_helper->crtc_info[i].mode_set.num_connectors = 0;
416 } 416 }
417 417
@@ -1083,7 +1083,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1083 1083
1084 /* try and find a 1024x768 mode on each connector */ 1084 /* try and find a 1024x768 mode on each connector */
1085 can_clone = true; 1085 can_clone = true;
1086 dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); 1086 dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
1087 1087
1088 for (i = 0; i < fb_helper->connector_count; i++) { 1088 for (i = 0; i < fb_helper->connector_count; i++) {
1089 1089
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 83114b5e3cee..d58e69da1fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -201,6 +201,19 @@ free:
201} 201}
202EXPORT_SYMBOL(drm_gem_object_alloc); 202EXPORT_SYMBOL(drm_gem_object_alloc);
203 203
204static void
205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206{
207 if (obj->import_attach) {
208 drm_prime_remove_imported_buf_handle(&filp->prime,
209 obj->import_attach->dmabuf);
210 }
211 if (obj->export_dma_buf) {
212 drm_prime_remove_imported_buf_handle(&filp->prime,
213 obj->export_dma_buf);
214 }
215}
216
204/** 217/**
205 * Removes the mapping from handle to filp for this object. 218 * Removes the mapping from handle to filp for this object.
206 */ 219 */
@@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
233 idr_remove(&filp->object_idr, handle); 246 idr_remove(&filp->object_idr, handle);
234 spin_unlock(&filp->table_lock); 247 spin_unlock(&filp->table_lock);
235 248
236 if (obj->import_attach) 249 drm_gem_remove_prime_handles(obj, filp);
237 drm_prime_remove_imported_buf_handle(&filp->prime,
238 obj->import_attach->dmabuf);
239 250
240 if (dev->driver->gem_close_object) 251 if (dev->driver->gem_close_object)
241 dev->driver->gem_close_object(obj, filp); 252 dev->driver->gem_close_object(obj, filp);
@@ -272,8 +283,7 @@ again:
272 spin_unlock(&file_priv->table_lock); 283 spin_unlock(&file_priv->table_lock);
273 if (ret == -EAGAIN) 284 if (ret == -EAGAIN)
274 goto again; 285 goto again;
275 286 else if (ret)
276 if (ret != 0)
277 return ret; 287 return ret;
278 288
279 drm_gem_object_handle_reference(obj); 289 drm_gem_object_handle_reference(obj);
@@ -329,7 +339,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
329 struct drm_gem_mm *mm = dev->mm_private; 339 struct drm_gem_mm *mm = dev->mm_private;
330 struct drm_map_list *list; 340 struct drm_map_list *list;
331 struct drm_local_map *map; 341 struct drm_local_map *map;
332 int ret = 0; 342 int ret;
333 343
334 /* Set the object up for mmap'ing */ 344 /* Set the object up for mmap'ing */
335 list = &obj->map_list; 345 list = &obj->map_list;
@@ -456,8 +466,7 @@ again:
456 466
457 if (ret == -EAGAIN) 467 if (ret == -EAGAIN)
458 goto again; 468 goto again;
459 469 else if (ret)
460 if (ret != 0)
461 goto err; 470 goto err;
462 471
463 /* Allocate a reference for the name table. */ 472 /* Allocate a reference for the name table. */
@@ -532,9 +541,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
532 struct drm_gem_object *obj = ptr; 541 struct drm_gem_object *obj = ptr;
533 struct drm_device *dev = obj->dev; 542 struct drm_device *dev = obj->dev;
534 543
535 if (obj->import_attach) 544 drm_gem_remove_prime_handles(obj, file_priv);
536 drm_prime_remove_imported_buf_handle(&file_priv->prime,
537 obj->import_attach->dmabuf);
538 545
539 if (dev->driver->gem_close_object) 546 if (dev->driver->gem_close_object)
540 dev->driver->gem_close_object(obj, file_priv); 547 dev->driver->gem_close_object(obj, file_priv);
@@ -628,7 +635,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
628 drm_gem_object_reference(obj); 635 drm_gem_object_reference(obj);
629 636
630 mutex_lock(&obj->dev->struct_mutex); 637 mutex_lock(&obj->dev->struct_mutex);
631 drm_vm_open_locked(vma); 638 drm_vm_open_locked(obj->dev, vma);
632 mutex_unlock(&obj->dev->struct_mutex); 639 mutex_unlock(&obj->dev->struct_mutex);
633} 640}
634EXPORT_SYMBOL(drm_gem_vm_open); 641EXPORT_SYMBOL(drm_gem_vm_open);
@@ -639,7 +646,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
639 struct drm_device *dev = obj->dev; 646 struct drm_device *dev = obj->dev;
640 647
641 mutex_lock(&dev->struct_mutex); 648 mutex_lock(&dev->struct_mutex);
642 drm_vm_close_locked(vma); 649 drm_vm_close_locked(obj->dev, vma);
643 drm_gem_object_unreference(obj); 650 drm_gem_object_unreference(obj);
644 mutex_unlock(&dev->struct_mutex); 651 mutex_unlock(&dev->struct_mutex);
645} 652}
@@ -712,7 +719,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
712 */ 719 */
713 drm_gem_object_reference(obj); 720 drm_gem_object_reference(obj);
714 721
715 drm_vm_open_locked(vma); 722 drm_vm_open_locked(dev, vma);
716 723
717out_unlock: 724out_unlock:
718 mutex_unlock(&dev->struct_mutex); 725 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index cf85155da2a0..64a62c697313 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
283 case DRM_CAP_DUMB_PREFER_SHADOW: 283 case DRM_CAP_DUMB_PREFER_SHADOW:
284 req->value = dev->mode_config.prefer_shadow; 284 req->value = dev->mode_config.prefer_shadow;
285 break; 285 break;
286 case DRM_CAP_PRIME:
287 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
288 req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
289 break;
286 default: 290 default:
287 return -EINVAL; 291 return -EINVAL;
288 } 292 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c869436e238a..c798eeae0a03 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -189,7 +189,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
189 if (dev->num_crtcs == 0) 189 if (dev->num_crtcs == 0)
190 return; 190 return;
191 191
192 del_timer(&dev->vblank_disable_timer); 192 del_timer_sync(&dev->vblank_disable_timer);
193 193
194 vblank_disable_fn((unsigned long)dev); 194 vblank_disable_fn((unsigned long)dev);
195 195
@@ -310,7 +310,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
310 */ 310 */
311int drm_irq_install(struct drm_device *dev) 311int drm_irq_install(struct drm_device *dev)
312{ 312{
313 int ret = 0; 313 int ret;
314 unsigned long sh_flags = 0; 314 unsigned long sh_flags = 0;
315 char *irqname; 315 char *irqname;
316 316
@@ -731,7 +731,7 @@ EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
731u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 731u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
732 struct timeval *tvblank, unsigned flags) 732 struct timeval *tvblank, unsigned flags)
733{ 733{
734 int ret = 0; 734 int ret;
735 735
736 /* Define requested maximum error on timestamps (nanoseconds). */ 736 /* Define requested maximum error on timestamps (nanoseconds). */
737 int max_error = (int) drm_timestamp_precision * 1000; 737 int max_error = (int) drm_timestamp_precision * 1000;
@@ -1031,18 +1031,15 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv) 1031 struct drm_file *file_priv)
1032{ 1032{
1033 struct drm_modeset_ctl *modeset = data; 1033 struct drm_modeset_ctl *modeset = data;
1034 int ret = 0;
1035 unsigned int crtc; 1034 unsigned int crtc;
1036 1035
1037 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1036 /* If drm_vblank_init() hasn't been called yet, just no-op */
1038 if (!dev->num_crtcs) 1037 if (!dev->num_crtcs)
1039 goto out; 1038 return 0;
1040 1039
1041 crtc = modeset->crtc; 1040 crtc = modeset->crtc;
1042 if (crtc >= dev->num_crtcs) { 1041 if (crtc >= dev->num_crtcs)
1043 ret = -EINVAL; 1042 return -EINVAL;
1044 goto out;
1045 }
1046 1043
1047 switch (modeset->cmd) { 1044 switch (modeset->cmd) {
1048 case _DRM_PRE_MODESET: 1045 case _DRM_PRE_MODESET:
@@ -1052,12 +1049,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1052 drm_vblank_post_modeset(dev, crtc); 1049 drm_vblank_post_modeset(dev, crtc);
1053 break; 1050 break;
1054 default: 1051 default:
1055 ret = -EINVAL; 1052 return -EINVAL;
1056 break;
1057 } 1053 }
1058 1054
1059out: 1055 return 0;
1060 return ret;
1061} 1056}
1062 1057
1063static int drm_queue_vblank_event(struct drm_device *dev, int pipe, 1058static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
@@ -1154,7 +1149,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1154 struct drm_file *file_priv) 1149 struct drm_file *file_priv)
1155{ 1150{
1156 union drm_wait_vblank *vblwait = data; 1151 union drm_wait_vblank *vblwait = data;
1157 int ret = 0; 1152 int ret;
1158 unsigned int flags, seq, crtc, high_crtc; 1153 unsigned int flags, seq, crtc, high_crtc;
1159 1154
1160 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1155 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c79c713eeba0..521152041691 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -331,7 +331,7 @@ static int drm_notifier(void *priv)
331 331
332void drm_idlelock_take(struct drm_lock_data *lock_data) 332void drm_idlelock_take(struct drm_lock_data *lock_data)
333{ 333{
334 int ret = 0; 334 int ret;
335 335
336 spin_lock_bh(&lock_data->spinlock); 336 spin_lock_bh(&lock_data->spinlock);
337 lock_data->kernel_waiters++; 337 lock_data->kernel_waiters++;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1bdf2b54eaf6..f546ff98a114 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,6 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
68{ 68{
69 struct drm_gem_object *obj; 69 struct drm_gem_object *obj;
70 void *buf; 70 void *buf;
71 int ret;
71 72
72 obj = drm_gem_object_lookup(dev, file_priv, handle); 73 obj = drm_gem_object_lookup(dev, file_priv, handle);
73 if (!obj) 74 if (!obj)
@@ -100,6 +101,17 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
100 obj->export_dma_buf = buf; 101 obj->export_dma_buf = buf;
101 *prime_fd = dma_buf_fd(buf, flags); 102 *prime_fd = dma_buf_fd(buf, flags);
102 } 103 }
104 /* if we've exported this buffer the cheat and add it to the import list
105 * so we get the correct handle back
106 */
107 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
108 obj->export_dma_buf, handle);
109 if (ret) {
110 drm_gem_object_unreference_unlocked(obj);
111 mutex_unlock(&file_priv->prime.lock);
112 return ret;
113 }
114
103 mutex_unlock(&file_priv->prime.lock); 115 mutex_unlock(&file_priv->prime.lock);
104 return 0; 116 return 0;
105} 117}
@@ -227,6 +239,42 @@ out:
227} 239}
228EXPORT_SYMBOL(drm_prime_pages_to_sg); 240EXPORT_SYMBOL(drm_prime_pages_to_sg);
229 241
242/* export an sg table into an array of pages and addresses
243 this is currently required by the TTM driver in order to do correct fault
244 handling */
245int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
246 dma_addr_t *addrs, int max_pages)
247{
248 unsigned count;
249 struct scatterlist *sg;
250 struct page *page;
251 u32 len, offset;
252 int pg_index;
253 dma_addr_t addr;
254
255 pg_index = 0;
256 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
257 len = sg->length;
258 offset = sg->offset;
259 page = sg_page(sg);
260 addr = sg_dma_address(sg);
261
262 while (len > 0) {
263 if (WARN_ON(pg_index >= max_pages))
264 return -1;
265 pages[pg_index] = page;
266 if (addrs)
267 addrs[pg_index] = addr;
268
269 page++;
270 addr += PAGE_SIZE;
271 len -= PAGE_SIZE;
272 pg_index++;
273 }
274 }
275 return 0;
276}
277EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
230/* helper function to cleanup a GEM/prime object */ 278/* helper function to cleanup a GEM/prime object */
231void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 279void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
232{ 280{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index aa454f80e109..21bcd4a555d8 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -122,11 +122,10 @@ again:
122 ret = idr_get_new_above(&drm_minors_idr, NULL, 122 ret = idr_get_new_above(&drm_minors_idr, NULL,
123 base, &new_id); 123 base, &new_id);
124 mutex_unlock(&dev->struct_mutex); 124 mutex_unlock(&dev->struct_mutex);
125 if (ret == -EAGAIN) { 125 if (ret == -EAGAIN)
126 goto again; 126 goto again;
127 } else if (ret) { 127 else if (ret)
128 return ret; 128 return ret;
129 }
130 129
131 if (new_id >= limit) { 130 if (new_id >= limit) {
132 idr_remove(&drm_minors_idr, new_id); 131 idr_remove(&drm_minors_idr, new_id);
@@ -211,7 +210,7 @@ EXPORT_SYMBOL(drm_master_put);
211int drm_setmaster_ioctl(struct drm_device *dev, void *data, 210int drm_setmaster_ioctl(struct drm_device *dev, void *data,
212 struct drm_file *file_priv) 211 struct drm_file *file_priv)
213{ 212{
214 int ret = 0; 213 int ret;
215 214
216 if (file_priv->is_master) 215 if (file_priv->is_master)
217 return 0; 216 return 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5a7bd51fc3d8..45cf1dd3eb9c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -347,17 +347,17 @@ static struct bin_attribute edid_attr = {
347}; 347};
348 348
349/** 349/**
350 * drm_sysfs_connector_add - add an connector to sysfs 350 * drm_sysfs_connector_add - add a connector to sysfs
351 * @connector: connector to add 351 * @connector: connector to add
352 * 352 *
353 * Create an connector device in sysfs, along with its associated connector 353 * Create a connector device in sysfs, along with its associated connector
354 * properties (so far, connection status, dpms, mode list & edid) and 354 * properties (so far, connection status, dpms, mode list & edid) and
355 * generate a hotplug event so userspace knows there's a new connector 355 * generate a hotplug event so userspace knows there's a new connector
356 * available. 356 * available.
357 * 357 *
358 * Note: 358 * Note:
359 * This routine should only be called *once* for each DRM minor registered. 359 * This routine should only be called *once* for each registered connector.
360 * A second call for an already registered device will trigger the BUG_ON 360 * A second call for an already registered connector will trigger the BUG_ON
361 * below. 361 * below.
362 */ 362 */
363int drm_sysfs_connector_add(struct drm_connector *connector) 363int drm_sysfs_connector_add(struct drm_connector *connector)
@@ -366,7 +366,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
366 int attr_cnt = 0; 366 int attr_cnt = 0;
367 int opt_cnt = 0; 367 int opt_cnt = 0;
368 int i; 368 int i;
369 int ret = 0; 369 int ret;
370 370
371 /* We shouldn't get called more than once for the same connector */ 371 /* We shouldn't get called more than once for the same connector */
372 BUG_ON(device_is_registered(&connector->kdev)); 372 BUG_ON(device_is_registered(&connector->kdev));
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 149561818349..961ee08927fe 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
406 * Create a new drm_vma_entry structure as the \p vma private data entry and 406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist. 407 * add it to drm_device::vmalist.
408 */ 408 */
409void drm_vm_open_locked(struct vm_area_struct *vma) 409void drm_vm_open_locked(struct drm_device *dev,
410 struct vm_area_struct *vma)
410{ 411{
411 struct drm_file *priv = vma->vm_file->private_data;
412 struct drm_device *dev = priv->minor->dev;
413 struct drm_vma_entry *vma_entry; 412 struct drm_vma_entry *vma_entry;
414 413
415 DRM_DEBUG("0x%08lx,0x%08lx\n", 414 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
430 struct drm_device *dev = priv->minor->dev; 429 struct drm_device *dev = priv->minor->dev;
431 430
432 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
433 drm_vm_open_locked(vma); 432 drm_vm_open_locked(dev, vma);
434 mutex_unlock(&dev->struct_mutex); 433 mutex_unlock(&dev->struct_mutex);
435} 434}
436 435
437void drm_vm_close_locked(struct vm_area_struct *vma) 436void drm_vm_close_locked(struct drm_device *dev,
437 struct vm_area_struct *vma)
438{ 438{
439 struct drm_file *priv = vma->vm_file->private_data;
440 struct drm_device *dev = priv->minor->dev;
441 struct drm_vma_entry *pt, *temp; 439 struct drm_vma_entry *pt, *temp;
442 440
443 DRM_DEBUG("0x%08lx,0x%08lx\n", 441 DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
467 struct drm_device *dev = priv->minor->dev; 465 struct drm_device *dev = priv->minor->dev;
468 466
469 mutex_lock(&dev->struct_mutex); 467 mutex_lock(&dev->struct_mutex);
470 drm_vm_close_locked(vma); 468 drm_vm_close_locked(dev, vma);
471 mutex_unlock(&dev->struct_mutex); 469 mutex_unlock(&dev->struct_mutex);
472} 470}
473 471
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
519 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 517 vma->vm_flags |= VM_RESERVED; /* Don't swap */
520 vma->vm_flags |= VM_DONTEXPAND; 518 vma->vm_flags |= VM_DONTEXPAND;
521 519
522 drm_vm_open_locked(vma); 520 drm_vm_open_locked(dev, vma);
523 return 0; 521 return 0;
524} 522}
525 523
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
670 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 668 vma->vm_flags |= VM_RESERVED; /* Don't swap */
671 vma->vm_flags |= VM_DONTEXPAND; 669 vma->vm_flags |= VM_DONTEXPAND;
672 670
673 drm_vm_open_locked(vma); 671 drm_vm_open_locked(dev, vma);
674 return 0; 672 return 0;
675} 673}
676 674
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3343ac437fe5..7f5096763b7d 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS
16 help
17 Choose this option if you want to use DMABUF feature for DRM.
18
13config DRM_EXYNOS_FIMD 19config DRM_EXYNOS_FIMD
14 bool "Exynos DRM FIMD" 20 bool "Exynos DRM FIMD"
15 depends on DRM_EXYNOS && !FB_S3C 21 depends on DRM_EXYNOS && !FB_S3C
@@ -27,3 +33,9 @@ config DRM_EXYNOS_VIDI
27 depends on DRM_EXYNOS 33 depends on DRM_EXYNOS
28 help 34 help
29 Choose this option if you want to use Exynos VIDI for DRM. 35 Choose this option if you want to use Exynos VIDI for DRM.
36
37config DRM_EXYNOS_G2D
38 bool "Exynos DRM G2D"
39 depends on DRM_EXYNOS
40 help
41 Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 9e0bff8badf9..eb651ca8e2a8 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,10 +8,12 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
13 exynos_ddc.o exynos_hdmiphy.o \ 14 exynos_ddc.o exynos_hdmiphy.o \
14 exynos_drm_hdmi.o 15 exynos_drm_hdmi.o
15exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 16exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
16 18
17obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 19obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index de8d2090bce3..b3cb0a69fbf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -35,7 +35,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
35 unsigned int flags, struct exynos_drm_gem_buf *buf) 35 unsigned int flags, struct exynos_drm_gem_buf *buf)
36{ 36{
37 dma_addr_t start_addr; 37 dma_addr_t start_addr;
38 unsigned int npages, page_size, i = 0; 38 unsigned int npages, i = 0;
39 struct scatterlist *sgl; 39 struct scatterlist *sgl;
40 int ret = 0; 40 int ret = 0;
41 41
@@ -53,13 +53,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
53 53
54 if (buf->size >= SZ_1M) { 54 if (buf->size >= SZ_1M) {
55 npages = buf->size >> SECTION_SHIFT; 55 npages = buf->size >> SECTION_SHIFT;
56 page_size = SECTION_SIZE; 56 buf->page_size = SECTION_SIZE;
57 } else if (buf->size >= SZ_64K) { 57 } else if (buf->size >= SZ_64K) {
58 npages = buf->size >> 16; 58 npages = buf->size >> 16;
59 page_size = SZ_64K; 59 buf->page_size = SZ_64K;
60 } else { 60 } else {
61 npages = buf->size >> PAGE_SHIFT; 61 npages = buf->size >> PAGE_SHIFT;
62 page_size = PAGE_SIZE; 62 buf->page_size = PAGE_SIZE;
63 } 63 }
64 64
65 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 65 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -96,9 +96,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
96 96
97 while (i < npages) { 97 while (i < npages) {
98 buf->pages[i] = phys_to_page(start_addr); 98 buf->pages[i] = phys_to_page(start_addr);
99 sg_set_page(sgl, buf->pages[i], page_size, 0); 99 sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
100 sg_dma_address(sgl) = start_addr; 100 sg_dma_address(sgl) = start_addr;
101 start_addr += page_size; 101 start_addr += buf->page_size;
102 sgl = sg_next(sgl); 102 sgl = sg_next(sgl);
103 i++; 103 i++;
104 } 104 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 3486ffed0bf0..4afb625128d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -105,6 +105,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
105 overlay->fb_y = pos->fb_y; 105 overlay->fb_y = pos->fb_y;
106 overlay->fb_width = fb->width; 106 overlay->fb_width = fb->width;
107 overlay->fb_height = fb->height; 107 overlay->fb_height = fb->height;
108 overlay->src_width = pos->src_w;
109 overlay->src_height = pos->src_h;
108 overlay->bpp = fb->bits_per_pixel; 110 overlay->bpp = fb->bits_per_pixel;
109 overlay->pitch = fb->pitches[0]; 111 overlay->pitch = fb->pitches[0];
110 overlay->pixel_format = fb->pixel_format; 112 overlay->pixel_format = fb->pixel_format;
@@ -153,6 +155,8 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
153 pos.crtc_y = 0; 155 pos.crtc_y = 0;
154 pos.crtc_w = fb->width - crtc->x; 156 pos.crtc_w = fb->width - crtc->x;
155 pos.crtc_h = fb->height - crtc->y; 157 pos.crtc_h = fb->height - crtc->y;
158 pos.src_w = pos.crtc_w;
159 pos.src_h = pos.crtc_h;
156 160
157 return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos); 161 return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
158} 162}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 25f72a62cb88..16b8e2195a0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -42,6 +42,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
42 * - the unit is screen coordinates. 42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed 43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates. 44 * - the unit is screen coordinates.
45 * @src_w: width of source area to be displayed from a framebuffer.
46 * @src_h: height of source area to be displayed from a framebuffer.
45 * @crtc_x: offset x on hardware screen. 47 * @crtc_x: offset x on hardware screen.
46 * @crtc_y: offset y on hardware screen. 48 * @crtc_y: offset y on hardware screen.
47 * @crtc_w: width of hardware screen. 49 * @crtc_w: width of hardware screen.
@@ -50,6 +52,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
50struct exynos_drm_crtc_pos { 52struct exynos_drm_crtc_pos {
51 unsigned int fb_x; 53 unsigned int fb_x;
52 unsigned int fb_y; 54 unsigned int fb_y;
55 unsigned int src_w;
56 unsigned int src_h;
53 unsigned int crtc_x; 57 unsigned int crtc_x;
54 unsigned int crtc_y; 58 unsigned int crtc_y;
55 unsigned int crtc_w; 59 unsigned int crtc_w;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 000000000000..274909271c36
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,272 @@
1/* exynos_drm_dmabuf.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm.h"
28#include "exynos_drm_drv.h"
29#include "exynos_drm_gem.h"
30
31#include <linux/dma-buf.h>
32
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
34 unsigned int page_size)
35{
36 struct sg_table *sgt = NULL;
37 struct scatterlist *sgl;
38 int i, ret;
39
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
41 if (!sgt)
42 goto out;
43
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
45 if (ret)
46 goto err_free_sgt;
47
48 if (page_size < PAGE_SIZE)
49 page_size = PAGE_SIZE;
50
51 for_each_sg(sgt->sgl, sgl, nr_pages, i)
52 sg_set_page(sgl, pages[i], page_size, 0);
53
54 return sgt;
55
56err_free_sgt:
57 kfree(sgt);
58 sgt = NULL;
59out:
60 return NULL;
61}
62
63static struct sg_table *
64 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
65 enum dma_data_direction dir)
66{
67 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
68 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf;
70 struct sg_table *sgt = NULL;
71 unsigned int npages;
72 int nents;
73
74 DRM_DEBUG_PRIME("%s\n", __FILE__);
75
76 mutex_lock(&dev->struct_mutex);
77
78 buf = gem_obj->buffer;
79
80 /* there should always be pages allocated. */
81 if (!buf->pages) {
82 DRM_ERROR("pages is null.\n");
83 goto err_unlock;
84 }
85
86 npages = buf->size / buf->page_size;
87
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
89 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
90
91 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
92 npages, buf->size, buf->page_size);
93
94err_unlock:
95 mutex_unlock(&dev->struct_mutex);
96 return sgt;
97}
98
99static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
100 struct sg_table *sgt,
101 enum dma_data_direction dir)
102{
103 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
104 sg_free_table(sgt);
105 kfree(sgt);
106 sgt = NULL;
107}
108
109static void exynos_dmabuf_release(struct dma_buf *dmabuf)
110{
111 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
112
113 DRM_DEBUG_PRIME("%s\n", __FILE__);
114
115 /*
116 * exynos_dmabuf_release() call means that file object's
117 * f_count is 0 and it calls drm_gem_object_handle_unreference()
118 * to drop the references that these values had been increased
119 * at drm_prime_handle_to_fd()
120 */
121 if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
122 exynos_gem_obj->base.export_dma_buf = NULL;
123
124 /*
125 * drop this gem object refcount to release allocated buffer
126 * and resources.
127 */
128 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
129 }
130}
131
132static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
133 unsigned long page_num)
134{
135 /* TODO */
136
137 return NULL;
138}
139
140static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
141 unsigned long page_num,
142 void *addr)
143{
144 /* TODO */
145}
146
147static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
148 unsigned long page_num)
149{
150 /* TODO */
151
152 return NULL;
153}
154
155static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
156 unsigned long page_num, void *addr)
157{
158 /* TODO */
159}
160
161static struct dma_buf_ops exynos_dmabuf_ops = {
162 .map_dma_buf = exynos_gem_map_dma_buf,
163 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
164 .kmap = exynos_gem_dmabuf_kmap,
165 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
166 .kunmap = exynos_gem_dmabuf_kunmap,
167 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
168 .release = exynos_dmabuf_release,
169};
170
171struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
172 struct drm_gem_object *obj, int flags)
173{
174 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
175
176 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
177 exynos_gem_obj->base.size, 0600);
178}
179
180struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
181 struct dma_buf *dma_buf)
182{
183 struct dma_buf_attachment *attach;
184 struct sg_table *sgt;
185 struct scatterlist *sgl;
186 struct exynos_drm_gem_obj *exynos_gem_obj;
187 struct exynos_drm_gem_buf *buffer;
188 struct page *page;
189 int ret, i = 0;
190
191 DRM_DEBUG_PRIME("%s\n", __FILE__);
192
193 /* is this one of own objects? */
194 if (dma_buf->ops == &exynos_dmabuf_ops) {
195 struct drm_gem_object *obj;
196
197 exynos_gem_obj = dma_buf->priv;
198 obj = &exynos_gem_obj->base;
199
200 /* is it from our device? */
201 if (obj->dev == drm_dev) {
202 drm_gem_object_reference(obj);
203 return obj;
204 }
205 }
206
207 attach = dma_buf_attach(dma_buf, drm_dev->dev);
208 if (IS_ERR(attach))
209 return ERR_PTR(-EINVAL);
210
211
212 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
213 if (IS_ERR(sgt)) {
214 ret = PTR_ERR(sgt);
215 goto err_buf_detach;
216 }
217
218 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
219 if (!buffer) {
220 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
221 ret = -ENOMEM;
222 goto err_unmap_attach;
223 }
224
225 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
226 if (!buffer->pages) {
227 DRM_ERROR("failed to allocate pages.\n");
228 ret = -ENOMEM;
229 goto err_free_buffer;
230 }
231
232 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
233 if (!exynos_gem_obj) {
234 ret = -ENOMEM;
235 goto err_free_pages;
236 }
237
238 sgl = sgt->sgl;
239 buffer->dma_addr = sg_dma_address(sgl);
240
241 while (i < sgt->nents) {
242 buffer->pages[i] = sg_page(sgl);
243 buffer->size += sg_dma_len(sgl);
244 sgl = sg_next(sgl);
245 i++;
246 }
247
248 exynos_gem_obj->buffer = buffer;
249 buffer->sgt = sgt;
250 exynos_gem_obj->base.import_attach = attach;
251
252 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
253 buffer->size);
254
255 return &exynos_gem_obj->base;
256
257err_free_pages:
258 kfree(buffer->pages);
259 buffer->pages = NULL;
260err_free_buffer:
261 kfree(buffer);
262 buffer = NULL;
263err_unmap_attach:
264 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
265err_buf_detach:
266 dma_buf_detach(dma_buf, attach);
267 return ERR_PTR(ret);
268}
269
270MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
271MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
272MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 000000000000..662a8f98ccdb
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,39 @@
1/* exynos_drm_dmabuf.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_DMABUF_H_
27#define _EXYNOS_DRM_DMABUF_H_
28
29#ifdef CONFIG_DRM_EXYNOS_DMABUF
30struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
31 struct drm_gem_object *obj, int flags);
32
33struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
34 struct dma_buf *dma_buf);
35#else
36#define exynos_dmabuf_prime_export NULL
37#define exynos_dmabuf_prime_import NULL
38#endif
39#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a6819b5f8428..420953197d0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,6 +39,8 @@
39#include "exynos_drm_gem.h" 39#include "exynos_drm_gem.h"
40#include "exynos_drm_plane.h" 40#include "exynos_drm_plane.h"
41#include "exynos_drm_vidi.h" 41#include "exynos_drm_vidi.h"
42#include "exynos_drm_dmabuf.h"
43#include "exynos_drm_g2d.h"
42 44
43#define DRIVER_NAME "exynos" 45#define DRIVER_NAME "exynos"
44#define DRIVER_DESC "Samsung SoC DRM" 46#define DRIVER_DESC "Samsung SoC DRM"
@@ -147,8 +149,17 @@ static int exynos_drm_unload(struct drm_device *dev)
147 149
148static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 150static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
149{ 151{
152 struct drm_exynos_file_private *file_priv;
153
150 DRM_DEBUG_DRIVER("%s\n", __FILE__); 154 DRM_DEBUG_DRIVER("%s\n", __FILE__);
151 155
156 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
157 if (!file_priv)
158 return -ENOMEM;
159
160 drm_prime_init_file_private(&file->prime);
161 file->driver_priv = file_priv;
162
152 return exynos_drm_subdrv_open(dev, file); 163 return exynos_drm_subdrv_open(dev, file);
153} 164}
154 165
@@ -170,6 +181,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
170 e->base.destroy(&e->base); 181 e->base.destroy(&e->base);
171 } 182 }
172 } 183 }
184 drm_prime_destroy_file_private(&file->prime);
173 spin_unlock_irqrestore(&dev->event_lock, flags); 185 spin_unlock_irqrestore(&dev->event_lock, flags);
174 186
175 exynos_drm_subdrv_close(dev, file); 187 exynos_drm_subdrv_close(dev, file);
@@ -193,7 +205,7 @@ static void exynos_drm_lastclose(struct drm_device *dev)
193 exynos_drm_fbdev_restore_mode(dev); 205 exynos_drm_fbdev_restore_mode(dev);
194} 206}
195 207
196static struct vm_operations_struct exynos_drm_gem_vm_ops = { 208static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
197 .fault = exynos_drm_gem_fault, 209 .fault = exynos_drm_gem_fault,
198 .open = drm_gem_vm_open, 210 .open = drm_gem_vm_open,
199 .close = drm_gem_vm_close, 211 .close = drm_gem_vm_close,
@@ -207,10 +219,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
207 DRM_AUTH), 219 DRM_AUTH),
208 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP, 220 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
209 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), 221 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
222 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
223 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
210 DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl, 224 DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
211 DRM_UNLOCKED | DRM_AUTH), 225 DRM_UNLOCKED | DRM_AUTH),
212 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 226 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
213 vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), 227 vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
228 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
229 exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
230 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
231 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
232 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
233 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
214}; 234};
215 235
216static const struct file_operations exynos_drm_driver_fops = { 236static const struct file_operations exynos_drm_driver_fops = {
@@ -225,7 +245,7 @@ static const struct file_operations exynos_drm_driver_fops = {
225 245
226static struct drm_driver exynos_drm_driver = { 246static struct drm_driver exynos_drm_driver = {
227 .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | 247 .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
228 DRIVER_MODESET | DRIVER_GEM, 248 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
229 .load = exynos_drm_load, 249 .load = exynos_drm_load,
230 .unload = exynos_drm_unload, 250 .unload = exynos_drm_unload,
231 .open = exynos_drm_open, 251 .open = exynos_drm_open,
@@ -241,6 +261,10 @@ static struct drm_driver exynos_drm_driver = {
241 .dumb_create = exynos_drm_gem_dumb_create, 261 .dumb_create = exynos_drm_gem_dumb_create,
242 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 262 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
243 .dumb_destroy = exynos_drm_gem_dumb_destroy, 263 .dumb_destroy = exynos_drm_gem_dumb_destroy,
264 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
265 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
266 .gem_prime_export = exynos_dmabuf_prime_export,
267 .gem_prime_import = exynos_dmabuf_prime_import,
244 .ioctls = exynos_ioctls, 268 .ioctls = exynos_ioctls,
245 .fops = &exynos_drm_driver_fops, 269 .fops = &exynos_drm_driver_fops,
246 .name = DRIVER_NAME, 270 .name = DRIVER_NAME,
@@ -307,6 +331,12 @@ static int __init exynos_drm_init(void)
307 goto out_vidi; 331 goto out_vidi;
308#endif 332#endif
309 333
334#ifdef CONFIG_DRM_EXYNOS_G2D
335 ret = platform_driver_register(&g2d_driver);
336 if (ret < 0)
337 goto out_g2d;
338#endif
339
310 ret = platform_driver_register(&exynos_drm_platform_driver); 340 ret = platform_driver_register(&exynos_drm_platform_driver);
311 if (ret < 0) 341 if (ret < 0)
312 goto out; 342 goto out;
@@ -314,6 +344,11 @@ static int __init exynos_drm_init(void)
314 return 0; 344 return 0;
315 345
316out: 346out:
347#ifdef CONFIG_DRM_EXYNOS_G2D
348 platform_driver_unregister(&g2d_driver);
349out_g2d:
350#endif
351
317#ifdef CONFIG_DRM_EXYNOS_VIDI 352#ifdef CONFIG_DRM_EXYNOS_VIDI
318out_vidi: 353out_vidi:
319 platform_driver_unregister(&vidi_driver); 354 platform_driver_unregister(&vidi_driver);
@@ -341,6 +376,10 @@ static void __exit exynos_drm_exit(void)
341 376
342 platform_driver_unregister(&exynos_drm_platform_driver); 377 platform_driver_unregister(&exynos_drm_platform_driver);
343 378
379#ifdef CONFIG_DRM_EXYNOS_G2D
380 platform_driver_unregister(&g2d_driver);
381#endif
382
344#ifdef CONFIG_DRM_EXYNOS_HDMI 383#ifdef CONFIG_DRM_EXYNOS_HDMI
345 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 384 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
346 platform_driver_unregister(&mixer_driver); 385 platform_driver_unregister(&mixer_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 1d814175cd49..c82c90c443e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -77,6 +77,8 @@ struct exynos_drm_overlay_ops {
77 * - the unit is screen coordinates. 77 * - the unit is screen coordinates.
78 * @fb_width: width of a framebuffer. 78 * @fb_width: width of a framebuffer.
79 * @fb_height: height of a framebuffer. 79 * @fb_height: height of a framebuffer.
80 * @src_width: width of a partial image to be displayed from framebuffer.
81 * @src_height: height of a partial image to be displayed from framebuffer.
80 * @crtc_x: offset x on hardware screen. 82 * @crtc_x: offset x on hardware screen.
81 * @crtc_y: offset y on hardware screen. 83 * @crtc_y: offset y on hardware screen.
82 * @crtc_width: window width to be displayed (hardware screen). 84 * @crtc_width: window width to be displayed (hardware screen).
@@ -108,6 +110,8 @@ struct exynos_drm_overlay {
108 unsigned int fb_y; 110 unsigned int fb_y;
109 unsigned int fb_width; 111 unsigned int fb_width;
110 unsigned int fb_height; 112 unsigned int fb_height;
113 unsigned int src_width;
114 unsigned int src_height;
111 unsigned int crtc_x; 115 unsigned int crtc_x;
112 unsigned int crtc_y; 116 unsigned int crtc_y;
113 unsigned int crtc_width; 117 unsigned int crtc_width;
@@ -205,6 +209,18 @@ struct exynos_drm_manager {
205 struct exynos_drm_display_ops *display_ops; 209 struct exynos_drm_display_ops *display_ops;
206}; 210};
207 211
212struct exynos_drm_g2d_private {
213 struct device *dev;
214 struct list_head inuse_cmdlist;
215 struct list_head event_list;
216 struct list_head gem_list;
217 unsigned int gem_nr;
218};
219
220struct drm_exynos_file_private {
221 struct exynos_drm_g2d_private *g2d_priv;
222};
223
208/* 224/*
209 * Exynos drm private structure. 225 * Exynos drm private structure.
210 */ 226 */
@@ -287,4 +303,5 @@ extern struct platform_driver hdmi_driver;
287extern struct platform_driver mixer_driver; 303extern struct platform_driver mixer_driver;
288extern struct platform_driver exynos_drm_common_hdmi_driver; 304extern struct platform_driver exynos_drm_common_hdmi_driver;
289extern struct platform_driver vidi_driver; 305extern struct platform_driver vidi_driver;
306extern struct platform_driver g2d_driver;
290#endif 307#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c38c8f468fa3..f82a299553fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -191,7 +191,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
191 drm_fb_helper_hotplug_event(fb_helper); 191 drm_fb_helper_hotplug_event(fb_helper);
192} 192}
193 193
194static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 194static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
195 .fb_create = exynos_user_fb_create, 195 .fb_create = exynos_user_fb_create,
196 .output_poll_changed = exynos_drm_output_poll_changed, 196 .output_poll_changed = exynos_drm_output_poll_changed,
197}; 197};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 000000000000..d2d88f22a037
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,937 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundationr
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/clk.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/slab.h>
19#include <linux/workqueue.h>
20
21#include "drmP.h"
22#include "exynos_drm.h"
23#include "exynos_drm_drv.h"
24#include "exynos_drm_gem.h"
25
26#define G2D_HW_MAJOR_VER 4
27#define G2D_HW_MINOR_VER 1
28
29/* vaild register range set from user: 0x0104 ~ 0x0880 */
30#define G2D_VALID_START 0x0104
31#define G2D_VALID_END 0x0880
32
33/* general registers */
34#define G2D_SOFT_RESET 0x0000
35#define G2D_INTEN 0x0004
36#define G2D_INTC_PEND 0x000C
37#define G2D_DMA_SFR_BASE_ADDR 0x0080
38#define G2D_DMA_COMMAND 0x0084
39#define G2D_DMA_STATUS 0x008C
40#define G2D_DMA_HOLD_CMD 0x0090
41
42/* command registers */
43#define G2D_BITBLT_START 0x0100
44
45/* registers for base address */
46#define G2D_SRC_BASE_ADDR 0x0304
47#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
48#define G2D_DST_BASE_ADDR 0x0404
49#define G2D_DST_PLANE2_BASE_ADDR 0x0418
50#define G2D_PAT_BASE_ADDR 0x0500
51#define G2D_MSK_BASE_ADDR 0x0520
52
53/* G2D_SOFT_RESET */
54#define G2D_SFRCLEAR (1 << 1)
55#define G2D_R (1 << 0)
56
57/* G2D_INTEN */
58#define G2D_INTEN_ACF (1 << 3)
59#define G2D_INTEN_UCF (1 << 2)
60#define G2D_INTEN_GCF (1 << 1)
61#define G2D_INTEN_SCF (1 << 0)
62
63/* G2D_INTC_PEND */
64#define G2D_INTP_ACMD_FIN (1 << 3)
65#define G2D_INTP_UCMD_FIN (1 << 2)
66#define G2D_INTP_GCMD_FIN (1 << 1)
67#define G2D_INTP_SCMD_FIN (1 << 0)
68
69/* G2D_DMA_COMMAND */
70#define G2D_DMA_HALT (1 << 2)
71#define G2D_DMA_CONTINUE (1 << 1)
72#define G2D_DMA_START (1 << 0)
73
74/* G2D_DMA_STATUS */
75#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
76#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
77#define G2D_DMA_DONE (1 << 0)
78#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
79
80/* G2D_DMA_HOLD_CMD */
81#define G2D_USET_HOLD (1 << 2)
82#define G2D_LIST_HOLD (1 << 1)
83#define G2D_BITBLT_HOLD (1 << 0)
84
85/* G2D_BITBLT_START */
86#define G2D_START_CASESEL (1 << 2)
87#define G2D_START_NHOLT (1 << 1)
88#define G2D_START_BITBLT (1 << 0)
89
90#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
91#define G2D_CMDLIST_NUM 64
92#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94
95/* cmdlist data structure */
96struct g2d_cmdlist {
97 u32 head;
98 u32 data[G2D_CMDLIST_DATA_NUM];
99 u32 last; /* last data offset */
100};
101
102struct drm_exynos_pending_g2d_event {
103 struct drm_pending_event base;
104 struct drm_exynos_g2d_event event;
105};
106
107struct g2d_gem_node {
108 struct list_head list;
109 unsigned int handle;
110};
111
112struct g2d_cmdlist_node {
113 struct list_head list;
114 struct g2d_cmdlist *cmdlist;
115 unsigned int gem_nr;
116 dma_addr_t dma_addr;
117
118 struct drm_exynos_pending_g2d_event *event;
119};
120
121struct g2d_runqueue_node {
122 struct list_head list;
123 struct list_head run_cmdlist;
124 struct list_head event_list;
125 struct completion complete;
126 int async;
127};
128
129struct g2d_data {
130 struct device *dev;
131 struct clk *gate_clk;
132 struct resource *regs_res;
133 void __iomem *regs;
134 int irq;
135 struct workqueue_struct *g2d_workq;
136 struct work_struct runqueue_work;
137 struct exynos_drm_subdrv subdrv;
138 bool suspended;
139
140 /* cmdlist */
141 struct g2d_cmdlist_node *cmdlist_node;
142 struct list_head free_cmdlist;
143 struct mutex cmdlist_mutex;
144 dma_addr_t cmdlist_pool;
145 void *cmdlist_pool_virt;
146
147 /* runqueue*/
148 struct g2d_runqueue_node *runqueue_node;
149 struct list_head runqueue;
150 struct mutex runqueue_mutex;
151 struct kmem_cache *runqueue_slab;
152};
153
154static int g2d_init_cmdlist(struct g2d_data *g2d)
155{
156 struct device *dev = g2d->dev;
157 struct g2d_cmdlist_node *node = g2d->cmdlist_node;
158 int nr;
159 int ret;
160
161 g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
162 &g2d->cmdlist_pool, GFP_KERNEL);
163 if (!g2d->cmdlist_pool_virt) {
164 dev_err(dev, "failed to allocate dma memory\n");
165 return -ENOMEM;
166 }
167
168 node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
169 GFP_KERNEL);
170 if (!node) {
171 dev_err(dev, "failed to allocate memory\n");
172 ret = -ENOMEM;
173 goto err;
174 }
175
176 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
177 node[nr].cmdlist =
178 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
179 node[nr].dma_addr =
180 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
181
182 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
183 }
184
185 return 0;
186
187err:
188 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
189 g2d->cmdlist_pool);
190 return ret;
191}
192
193static void g2d_fini_cmdlist(struct g2d_data *g2d)
194{
195 struct device *dev = g2d->dev;
196
197 kfree(g2d->cmdlist_node);
198 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
199 g2d->cmdlist_pool);
200}
201
202static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
203{
204 struct device *dev = g2d->dev;
205 struct g2d_cmdlist_node *node;
206
207 mutex_lock(&g2d->cmdlist_mutex);
208 if (list_empty(&g2d->free_cmdlist)) {
209 dev_err(dev, "there is no free cmdlist\n");
210 mutex_unlock(&g2d->cmdlist_mutex);
211 return NULL;
212 }
213
214 node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
215 list);
216 list_del_init(&node->list);
217 mutex_unlock(&g2d->cmdlist_mutex);
218
219 return node;
220}
221
222static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
223{
224 mutex_lock(&g2d->cmdlist_mutex);
225 list_move_tail(&node->list, &g2d->free_cmdlist);
226 mutex_unlock(&g2d->cmdlist_mutex);
227}
228
229static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
230 struct g2d_cmdlist_node *node)
231{
232 struct g2d_cmdlist_node *lnode;
233
234 if (list_empty(&g2d_priv->inuse_cmdlist))
235 goto add_to_list;
236
237 /* this links to base address of new cmdlist */
238 lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
239 struct g2d_cmdlist_node, list);
240 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
241
242add_to_list:
243 list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
244
245 if (node->event)
246 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
247}
248
249static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
250 struct drm_file *file,
251 struct g2d_cmdlist_node *node)
252{
253 struct drm_exynos_file_private *file_priv = file->driver_priv;
254 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
255 struct g2d_cmdlist *cmdlist = node->cmdlist;
256 dma_addr_t *addr;
257 int offset;
258 int i;
259
260 for (i = 0; i < node->gem_nr; i++) {
261 struct g2d_gem_node *gem_node;
262
263 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
264 if (!gem_node) {
265 dev_err(g2d_priv->dev, "failed to allocate gem node\n");
266 return -ENOMEM;
267 }
268
269 offset = cmdlist->last - (i * 2 + 1);
270 gem_node->handle = cmdlist->data[offset];
271
272 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
273 file);
274 if (IS_ERR(addr)) {
275 node->gem_nr = i;
276 kfree(gem_node);
277 return PTR_ERR(addr);
278 }
279
280 cmdlist->data[offset] = *addr;
281 list_add_tail(&gem_node->list, &g2d_priv->gem_list);
282 g2d_priv->gem_nr++;
283 }
284
285 return 0;
286}
287
288static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
289 struct drm_file *file,
290 unsigned int nr)
291{
292 struct drm_exynos_file_private *file_priv = file->driver_priv;
293 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
294 struct g2d_gem_node *node, *n;
295
296 list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
297 if (!nr)
298 break;
299
300 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
301 list_del_init(&node->list);
302 kfree(node);
303 nr--;
304 }
305}
306
307static void g2d_dma_start(struct g2d_data *g2d,
308 struct g2d_runqueue_node *runqueue_node)
309{
310 struct g2d_cmdlist_node *node =
311 list_first_entry(&runqueue_node->run_cmdlist,
312 struct g2d_cmdlist_node, list);
313
314 pm_runtime_get_sync(g2d->dev);
315 clk_enable(g2d->gate_clk);
316
317 /* interrupt enable */
318 writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
319 g2d->regs + G2D_INTEN);
320
321 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
322 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
323}
324
325static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
326{
327 struct g2d_runqueue_node *runqueue_node;
328
329 if (list_empty(&g2d->runqueue))
330 return NULL;
331
332 runqueue_node = list_first_entry(&g2d->runqueue,
333 struct g2d_runqueue_node, list);
334 list_del_init(&runqueue_node->list);
335 return runqueue_node;
336}
337
338static void g2d_free_runqueue_node(struct g2d_data *g2d,
339 struct g2d_runqueue_node *runqueue_node)
340{
341 if (!runqueue_node)
342 return;
343
344 mutex_lock(&g2d->cmdlist_mutex);
345 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
346 mutex_unlock(&g2d->cmdlist_mutex);
347
348 kmem_cache_free(g2d->runqueue_slab, runqueue_node);
349}
350
351static void g2d_exec_runqueue(struct g2d_data *g2d)
352{
353 g2d->runqueue_node = g2d_get_runqueue_node(g2d);
354 if (g2d->runqueue_node)
355 g2d_dma_start(g2d, g2d->runqueue_node);
356}
357
358static void g2d_runqueue_worker(struct work_struct *work)
359{
360 struct g2d_data *g2d = container_of(work, struct g2d_data,
361 runqueue_work);
362
363
364 mutex_lock(&g2d->runqueue_mutex);
365 clk_disable(g2d->gate_clk);
366 pm_runtime_put_sync(g2d->dev);
367
368 complete(&g2d->runqueue_node->complete);
369 if (g2d->runqueue_node->async)
370 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
371
372 if (g2d->suspended)
373 g2d->runqueue_node = NULL;
374 else
375 g2d_exec_runqueue(g2d);
376 mutex_unlock(&g2d->runqueue_mutex);
377}
378
379static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
380{
381 struct drm_device *drm_dev = g2d->subdrv.drm_dev;
382 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
383 struct drm_exynos_pending_g2d_event *e;
384 struct timeval now;
385 unsigned long flags;
386
387 if (list_empty(&runqueue_node->event_list))
388 return;
389
390 e = list_first_entry(&runqueue_node->event_list,
391 struct drm_exynos_pending_g2d_event, base.link);
392
393 do_gettimeofday(&now);
394 e->event.tv_sec = now.tv_sec;
395 e->event.tv_usec = now.tv_usec;
396 e->event.cmdlist_no = cmdlist_no;
397
398 spin_lock_irqsave(&drm_dev->event_lock, flags);
399 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
400 wake_up_interruptible(&e->base.file_priv->event_wait);
401 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
402}
403
404static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
405{
406 struct g2d_data *g2d = dev_id;
407 u32 pending;
408
409 pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
410 if (pending)
411 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
412
413 if (pending & G2D_INTP_GCMD_FIN) {
414 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
415
416 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
417 G2D_DMA_LIST_DONE_COUNT_OFFSET;
418
419 g2d_finish_event(g2d, cmdlist_no);
420
421 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
422 if (!(pending & G2D_INTP_ACMD_FIN)) {
423 writel_relaxed(G2D_DMA_CONTINUE,
424 g2d->regs + G2D_DMA_COMMAND);
425 }
426 }
427
428 if (pending & G2D_INTP_ACMD_FIN)
429 queue_work(g2d->g2d_workq, &g2d->runqueue_work);
430
431 return IRQ_HANDLED;
432}
433
434static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
435 int nr, bool for_addr)
436{
437 int reg_offset;
438 int index;
439 int i;
440
441 for (i = 0; i < nr; i++) {
442 index = cmdlist->last - 2 * (i + 1);
443 reg_offset = cmdlist->data[index] & ~0xfffff000;
444
445 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
446 goto err;
447 if (reg_offset % 4)
448 goto err;
449
450 switch (reg_offset) {
451 case G2D_SRC_BASE_ADDR:
452 case G2D_SRC_PLANE2_BASE_ADDR:
453 case G2D_DST_BASE_ADDR:
454 case G2D_DST_PLANE2_BASE_ADDR:
455 case G2D_PAT_BASE_ADDR:
456 case G2D_MSK_BASE_ADDR:
457 if (!for_addr)
458 goto err;
459 break;
460 default:
461 if (for_addr)
462 goto err;
463 break;
464 }
465 }
466
467 return 0;
468
469err:
470 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
471 return -EINVAL;
472}
473
474/* ioctl functions */
475int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
476 struct drm_file *file)
477{
478 struct drm_exynos_g2d_get_ver *ver = data;
479
480 ver->major = G2D_HW_MAJOR_VER;
481 ver->minor = G2D_HW_MINOR_VER;
482
483 return 0;
484}
485EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
486
487int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
488 struct drm_file *file)
489{
490 struct drm_exynos_file_private *file_priv = file->driver_priv;
491 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
492 struct device *dev = g2d_priv->dev;
493 struct g2d_data *g2d;
494 struct drm_exynos_g2d_set_cmdlist *req = data;
495 struct drm_exynos_g2d_cmd *cmd;
496 struct drm_exynos_pending_g2d_event *e;
497 struct g2d_cmdlist_node *node;
498 struct g2d_cmdlist *cmdlist;
499 unsigned long flags;
500 int size;
501 int ret;
502
503 if (!dev)
504 return -ENODEV;
505
506 g2d = dev_get_drvdata(dev);
507 if (!g2d)
508 return -EFAULT;
509
510 node = g2d_get_cmdlist(g2d);
511 if (!node)
512 return -ENOMEM;
513
514 node->event = NULL;
515
516 if (req->event_type != G2D_EVENT_NOT) {
517 spin_lock_irqsave(&drm_dev->event_lock, flags);
518 if (file->event_space < sizeof(e->event)) {
519 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
520 ret = -ENOMEM;
521 goto err;
522 }
523 file->event_space -= sizeof(e->event);
524 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
525
526 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
527 if (!e) {
528 dev_err(dev, "failed to allocate event\n");
529
530 spin_lock_irqsave(&drm_dev->event_lock, flags);
531 file->event_space += sizeof(e->event);
532 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
533
534 ret = -ENOMEM;
535 goto err;
536 }
537
538 e->event.base.type = DRM_EXYNOS_G2D_EVENT;
539 e->event.base.length = sizeof(e->event);
540 e->event.user_data = req->user_data;
541 e->base.event = &e->event.base;
542 e->base.file_priv = file;
543 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
544
545 node->event = e;
546 }
547
548 cmdlist = node->cmdlist;
549
550 cmdlist->last = 0;
551
552 /*
553 * If don't clear SFR registers, the cmdlist is affected by register
554 * values of previous cmdlist. G2D hw executes SFR clear command and
555 * a next command at the same time then the next command is ignored and
556 * is executed rightly from next next command, so needs a dummy command
557 * to next command of SFR clear command.
558 */
559 cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
560 cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
561 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
562 cmdlist->data[cmdlist->last++] = 0;
563
564 if (node->event) {
565 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
566 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
567 }
568
569 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
570 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
571 if (size > G2D_CMDLIST_DATA_NUM) {
572 dev_err(dev, "cmdlist size is too big\n");
573 ret = -EINVAL;
574 goto err_free_event;
575 }
576
577 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
578
579 if (copy_from_user(cmdlist->data + cmdlist->last,
580 (void __user *)cmd,
581 sizeof(*cmd) * req->cmd_nr)) {
582 ret = -EFAULT;
583 goto err_free_event;
584 }
585 cmdlist->last += req->cmd_nr * 2;
586
587 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
588 if (ret < 0)
589 goto err_free_event;
590
591 node->gem_nr = req->cmd_gem_nr;
592 if (req->cmd_gem_nr) {
593 struct drm_exynos_g2d_cmd *cmd_gem;
594
595 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
596
597 if (copy_from_user(cmdlist->data + cmdlist->last,
598 (void __user *)cmd_gem,
599 sizeof(*cmd_gem) * req->cmd_gem_nr)) {
600 ret = -EFAULT;
601 goto err_free_event;
602 }
603 cmdlist->last += req->cmd_gem_nr * 2;
604
605 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
606 if (ret < 0)
607 goto err_free_event;
608
609 ret = g2d_get_cmdlist_gem(drm_dev, file, node);
610 if (ret < 0)
611 goto err_unmap;
612 }
613
614 cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
615 cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
616
617 /* head */
618 cmdlist->head = cmdlist->last / 2;
619
620 /* tail */
621 cmdlist->data[cmdlist->last] = 0;
622
623 g2d_add_cmdlist_to_inuse(g2d_priv, node);
624
625 return 0;
626
627err_unmap:
628 g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
629err_free_event:
630 if (node->event) {
631 spin_lock_irqsave(&drm_dev->event_lock, flags);
632 file->event_space += sizeof(e->event);
633 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
634 kfree(node->event);
635 }
636err:
637 g2d_put_cmdlist(g2d, node);
638 return ret;
639}
640EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
641
642int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
643 struct drm_file *file)
644{
645 struct drm_exynos_file_private *file_priv = file->driver_priv;
646 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
647 struct device *dev = g2d_priv->dev;
648 struct g2d_data *g2d;
649 struct drm_exynos_g2d_exec *req = data;
650 struct g2d_runqueue_node *runqueue_node;
651 struct list_head *run_cmdlist;
652 struct list_head *event_list;
653
654 if (!dev)
655 return -ENODEV;
656
657 g2d = dev_get_drvdata(dev);
658 if (!g2d)
659 return -EFAULT;
660
661 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
662 if (!runqueue_node) {
663 dev_err(dev, "failed to allocate memory\n");
664 return -ENOMEM;
665 }
666 run_cmdlist = &runqueue_node->run_cmdlist;
667 event_list = &runqueue_node->event_list;
668 INIT_LIST_HEAD(run_cmdlist);
669 INIT_LIST_HEAD(event_list);
670 init_completion(&runqueue_node->complete);
671 runqueue_node->async = req->async;
672
673 list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
674 list_splice_init(&g2d_priv->event_list, event_list);
675
676 if (list_empty(run_cmdlist)) {
677 dev_err(dev, "there is no inuse cmdlist\n");
678 kmem_cache_free(g2d->runqueue_slab, runqueue_node);
679 return -EPERM;
680 }
681
682 mutex_lock(&g2d->runqueue_mutex);
683 list_add_tail(&runqueue_node->list, &g2d->runqueue);
684 if (!g2d->runqueue_node)
685 g2d_exec_runqueue(g2d);
686 mutex_unlock(&g2d->runqueue_mutex);
687
688 if (runqueue_node->async)
689 goto out;
690
691 wait_for_completion(&runqueue_node->complete);
692 g2d_free_runqueue_node(g2d, runqueue_node);
693
694out:
695 return 0;
696}
697EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698
699static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700 struct drm_file *file)
701{
702 struct drm_exynos_file_private *file_priv = file->driver_priv;
703 struct exynos_drm_g2d_private *g2d_priv;
704
705 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
706 if (!g2d_priv) {
707 dev_err(dev, "failed to allocate g2d private data\n");
708 return -ENOMEM;
709 }
710
711 g2d_priv->dev = dev;
712 file_priv->g2d_priv = g2d_priv;
713
714 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715 INIT_LIST_HEAD(&g2d_priv->event_list);
716 INIT_LIST_HEAD(&g2d_priv->gem_list);
717
718 return 0;
719}
720
721static void g2d_close(struct drm_device *drm_dev, struct device *dev,
722 struct drm_file *file)
723{
724 struct drm_exynos_file_private *file_priv = file->driver_priv;
725 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
726 struct g2d_data *g2d;
727 struct g2d_cmdlist_node *node, *n;
728
729 if (!dev)
730 return;
731
732 g2d = dev_get_drvdata(dev);
733 if (!g2d)
734 return;
735
736 mutex_lock(&g2d->cmdlist_mutex);
737 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
738 list_move_tail(&node->list, &g2d->free_cmdlist);
739 mutex_unlock(&g2d->cmdlist_mutex);
740
741 g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
742
743 kfree(file_priv->g2d_priv);
744}
745
746static int __devinit g2d_probe(struct platform_device *pdev)
747{
748 struct device *dev = &pdev->dev;
749 struct resource *res;
750 struct g2d_data *g2d;
751 struct exynos_drm_subdrv *subdrv;
752 int ret;
753
754 g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
755 if (!g2d) {
756 dev_err(dev, "failed to allocate driver data\n");
757 return -ENOMEM;
758 }
759
760 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
761 sizeof(struct g2d_runqueue_node), 0, 0, NULL);
762 if (!g2d->runqueue_slab) {
763 ret = -ENOMEM;
764 goto err_free_mem;
765 }
766
767 g2d->dev = dev;
768
769 g2d->g2d_workq = create_singlethread_workqueue("g2d");
770 if (!g2d->g2d_workq) {
771 dev_err(dev, "failed to create workqueue\n");
772 ret = -EINVAL;
773 goto err_destroy_slab;
774 }
775
776 INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
777 INIT_LIST_HEAD(&g2d->free_cmdlist);
778 INIT_LIST_HEAD(&g2d->runqueue);
779
780 mutex_init(&g2d->cmdlist_mutex);
781 mutex_init(&g2d->runqueue_mutex);
782
783 ret = g2d_init_cmdlist(g2d);
784 if (ret < 0)
785 goto err_destroy_workqueue;
786
787 g2d->gate_clk = clk_get(dev, "fimg2d");
788 if (IS_ERR(g2d->gate_clk)) {
789 dev_err(dev, "failed to get gate clock\n");
790 ret = PTR_ERR(g2d->gate_clk);
791 goto err_fini_cmdlist;
792 }
793
794 pm_runtime_enable(dev);
795
796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797 if (!res) {
798 dev_err(dev, "failed to get I/O memory\n");
799 ret = -ENOENT;
800 goto err_put_clk;
801 }
802
803 g2d->regs_res = request_mem_region(res->start, resource_size(res),
804 dev_name(dev));
805 if (!g2d->regs_res) {
806 dev_err(dev, "failed to request I/O memory\n");
807 ret = -ENOENT;
808 goto err_put_clk;
809 }
810
811 g2d->regs = ioremap(res->start, resource_size(res));
812 if (!g2d->regs) {
813 dev_err(dev, "failed to remap I/O memory\n");
814 ret = -ENXIO;
815 goto err_release_res;
816 }
817
818 g2d->irq = platform_get_irq(pdev, 0);
819 if (g2d->irq < 0) {
820 dev_err(dev, "failed to get irq\n");
821 ret = g2d->irq;
822 goto err_unmap_base;
823 }
824
825 ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
826 if (ret < 0) {
827 dev_err(dev, "irq request failed\n");
828 goto err_unmap_base;
829 }
830
831 platform_set_drvdata(pdev, g2d);
832
833 subdrv = &g2d->subdrv;
834 subdrv->dev = dev;
835 subdrv->open = g2d_open;
836 subdrv->close = g2d_close;
837
838 ret = exynos_drm_subdrv_register(subdrv);
839 if (ret < 0) {
840 dev_err(dev, "failed to register drm g2d device\n");
841 goto err_free_irq;
842 }
843
844 dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
845 G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
846
847 return 0;
848
849err_free_irq:
850 free_irq(g2d->irq, g2d);
851err_unmap_base:
852 iounmap(g2d->regs);
853err_release_res:
854 release_resource(g2d->regs_res);
855 kfree(g2d->regs_res);
856err_put_clk:
857 pm_runtime_disable(dev);
858 clk_put(g2d->gate_clk);
859err_fini_cmdlist:
860 g2d_fini_cmdlist(g2d);
861err_destroy_workqueue:
862 destroy_workqueue(g2d->g2d_workq);
863err_destroy_slab:
864 kmem_cache_destroy(g2d->runqueue_slab);
865err_free_mem:
866 kfree(g2d);
867 return ret;
868}
869
870static int __devexit g2d_remove(struct platform_device *pdev)
871{
872 struct g2d_data *g2d = platform_get_drvdata(pdev);
873
874 cancel_work_sync(&g2d->runqueue_work);
875 exynos_drm_subdrv_unregister(&g2d->subdrv);
876 free_irq(g2d->irq, g2d);
877
878 while (g2d->runqueue_node) {
879 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
880 g2d->runqueue_node = g2d_get_runqueue_node(g2d);
881 }
882
883 iounmap(g2d->regs);
884 release_resource(g2d->regs_res);
885 kfree(g2d->regs_res);
886
887 pm_runtime_disable(&pdev->dev);
888 clk_put(g2d->gate_clk);
889
890 g2d_fini_cmdlist(g2d);
891 destroy_workqueue(g2d->g2d_workq);
892 kmem_cache_destroy(g2d->runqueue_slab);
893 kfree(g2d);
894
895 return 0;
896}
897
898#ifdef CONFIG_PM_SLEEP
899static int g2d_suspend(struct device *dev)
900{
901 struct g2d_data *g2d = dev_get_drvdata(dev);
902
903 mutex_lock(&g2d->runqueue_mutex);
904 g2d->suspended = true;
905 mutex_unlock(&g2d->runqueue_mutex);
906
907 while (g2d->runqueue_node)
908 /* FIXME: good range? */
909 usleep_range(500, 1000);
910
911 flush_work_sync(&g2d->runqueue_work);
912
913 return 0;
914}
915
916static int g2d_resume(struct device *dev)
917{
918 struct g2d_data *g2d = dev_get_drvdata(dev);
919
920 g2d->suspended = false;
921 g2d_exec_runqueue(g2d);
922
923 return 0;
924}
925#endif
926
927SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
928
929struct platform_driver g2d_driver = {
930 .probe = g2d_probe,
931 .remove = __devexit_p(g2d_remove),
932 .driver = {
933 .name = "s5p-g2d",
934 .owner = THIS_MODULE,
935 .pm = &g2d_pm_ops,
936 },
937};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 000000000000..1a9c7ca8c15b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundationr
8 */
9
10#ifdef CONFIG_DRM_EXYNOS_G2D
11extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
12 struct drm_file *file_priv);
13extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
14 struct drm_file *file_priv);
15extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
16 struct drm_file *file_priv);
17#else
18static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
19 struct drm_file *file_priv)
20{
21 return -ENODEV;
22}
23
24static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
25 void *data,
26 struct drm_file *file_priv)
27{
28 return -ENODEV;
29}
30
31static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
32 struct drm_file *file_priv)
33{
34 return -ENODEV;
35}
36#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..fc91293c4560 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
66 return 0; 66 return 0;
67} 67}
68 68
69static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
70 struct vm_area_struct *vma)
71{
72 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73
74 /* non-cachable as default. */
75 if (obj->flags & EXYNOS_BO_CACHABLE)
76 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77 else if (obj->flags & EXYNOS_BO_WC)
78 vma->vm_page_prot =
79 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
80 else
81 vma->vm_page_prot =
82 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
83}
84
69static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 85static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
70{ 86{
71 if (!IS_NONCONTIG_BUFFER(flags)) { 87 if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -80,7 +96,7 @@ out:
80 return roundup(size, PAGE_SIZE); 96 return roundup(size, PAGE_SIZE);
81} 97}
82 98
83static struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 99struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
84 gfp_t gfpmask) 100 gfp_t gfpmask)
85{ 101{
86 struct inode *inode; 102 struct inode *inode;
@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
180 } 196 }
181 197
182 npages = obj->size >> PAGE_SHIFT; 198 npages = obj->size >> PAGE_SHIFT;
199 buf->page_size = PAGE_SIZE;
183 200
184 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 201 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
185 if (!buf->sgt) { 202 if (!buf->sgt) {
@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
262void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 279void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
263{ 280{
264 struct drm_gem_object *obj; 281 struct drm_gem_object *obj;
282 struct exynos_drm_gem_buf *buf;
265 283
266 DRM_DEBUG_KMS("%s\n", __FILE__); 284 DRM_DEBUG_KMS("%s\n", __FILE__);
267 285
268 if (!exynos_gem_obj)
269 return;
270
271 obj = &exynos_gem_obj->base; 286 obj = &exynos_gem_obj->base;
287 buf = exynos_gem_obj->buffer;
272 288
273 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 289 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
274 290
275 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) && 291 if (!buf->pages)
276 exynos_gem_obj->buffer->pages) 292 return;
293
294 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
277 exynos_drm_gem_put_pages(obj); 295 exynos_drm_gem_put_pages(obj);
278 else 296 else
279 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, 297 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
280 exynos_gem_obj->buffer);
281 298
282 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer); 299 exynos_drm_fini_buf(obj->dev, buf);
283 exynos_gem_obj->buffer = NULL; 300 exynos_gem_obj->buffer = NULL;
284 301
285 if (obj->map_list.map) 302 if (obj->map_list.map)
@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
292 exynos_gem_obj = NULL; 309 exynos_gem_obj = NULL;
293} 310}
294 311
295static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 312struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
296 unsigned long size) 313 unsigned long size)
297{ 314{
298 struct exynos_drm_gem_obj *exynos_gem_obj; 315 struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 510
494 vma->vm_flags |= (VM_IO | VM_RESERVED); 511 vma->vm_flags |= (VM_IO | VM_RESERVED);
495 512
496 /* in case of direct mapping, always having non-cachable attribute */ 513 update_vm_cache_attr(exynos_gem_obj, vma);
497 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
498 514
499 vm_size = usize = vma->vm_end - vma->vm_start; 515 vm_size = usize = vma->vm_end - vma->vm_start;
500 516
@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
588 return 0; 604 return 0;
589} 605}
590 606
607int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv)
609{ struct exynos_drm_gem_obj *exynos_gem_obj;
610 struct drm_exynos_gem_info *args = data;
611 struct drm_gem_object *obj;
612
613 mutex_lock(&dev->struct_mutex);
614
615 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
616 if (!obj) {
617 DRM_ERROR("failed to lookup gem object.\n");
618 mutex_unlock(&dev->struct_mutex);
619 return -EINVAL;
620 }
621
622 exynos_gem_obj = to_exynos_gem_obj(obj);
623
624 args->flags = exynos_gem_obj->flags;
625 args->size = exynos_gem_obj->size;
626
627 drm_gem_object_unreference(obj);
628 mutex_unlock(&dev->struct_mutex);
629
630 return 0;
631}
632
591int exynos_drm_gem_init_object(struct drm_gem_object *obj) 633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
592{ 634{
593 DRM_DEBUG_KMS("%s\n", __FILE__); 635 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
597 639
598void exynos_drm_gem_free_object(struct drm_gem_object *obj) 640void exynos_drm_gem_free_object(struct drm_gem_object *obj)
599{ 641{
642 struct exynos_drm_gem_obj *exynos_gem_obj;
643 struct exynos_drm_gem_buf *buf;
644
600 DRM_DEBUG_KMS("%s\n", __FILE__); 645 DRM_DEBUG_KMS("%s\n", __FILE__);
601 646
647 exynos_gem_obj = to_exynos_gem_obj(obj);
648 buf = exynos_gem_obj->buffer;
649
650 if (obj->import_attach)
651 drm_prime_gem_destroy(obj, buf->sgt);
652
602 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 653 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
603} 654}
604 655
@@ -724,6 +775,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
724 775
725int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 776int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
726{ 777{
778 struct exynos_drm_gem_obj *exynos_gem_obj;
779 struct drm_gem_object *obj;
727 int ret; 780 int ret;
728 781
729 DRM_DEBUG_KMS("%s\n", __FILE__); 782 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +788,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
735 return ret; 788 return ret;
736 } 789 }
737 790
791 obj = vma->vm_private_data;
792 exynos_gem_obj = to_exynos_gem_obj(obj);
793
794 ret = check_gem_flags(exynos_gem_obj->flags);
795 if (ret) {
796 drm_gem_vm_close(vma);
797 drm_gem_free_mmap_offset(obj);
798 return ret;
799 }
800
738 vma->vm_flags &= ~VM_PFNMAP; 801 vma->vm_flags &= ~VM_PFNMAP;
739 vma->vm_flags |= VM_MIXEDMAP; 802 vma->vm_flags |= VM_MIXEDMAP;
740 803
804 update_vm_cache_attr(exynos_gem_obj, vma);
805
741 return ret; 806 return ret;
742} 807}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 4ed842039505..14d038b6cb02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -40,6 +40,7 @@
40 * device address with IOMMU. 40 * device address with IOMMU.
41 * @sgt: sg table to transfer page data. 41 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region. 42 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB.
43 * @size: size of allocated memory region. 44 * @size: size of allocated memory region.
44 */ 45 */
45struct exynos_drm_gem_buf { 46struct exynos_drm_gem_buf {
@@ -47,6 +48,7 @@ struct exynos_drm_gem_buf {
47 dma_addr_t dma_addr; 48 dma_addr_t dma_addr;
48 struct sg_table *sgt; 49 struct sg_table *sgt;
49 struct page **pages; 50 struct page **pages;
51 unsigned long page_size;
50 unsigned long size; 52 unsigned long size;
51}; 53};
52 54
@@ -74,9 +76,15 @@ struct exynos_drm_gem_obj {
74 unsigned int flags; 76 unsigned int flags;
75}; 77};
76 78
79struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
80
77/* destroy a buffer with gem object */ 81/* destroy a buffer with gem object */
78void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); 82void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
79 83
84/* create a private gem object and initialize it. */
85struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
86 unsigned long size);
87
80/* create a new buffer with gem object */ 88/* create a new buffer with gem object */
81struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 89struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
82 unsigned int flags, 90 unsigned int flags,
@@ -119,6 +127,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
119int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 127int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
120 struct drm_file *file_priv); 128 struct drm_file *file_priv);
121 129
130/* get buffer information to memory region allocated by gem. */
131int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv);
133
122/* initialize gem object. */ 134/* initialize gem object. */
123int exynos_drm_gem_init_object(struct drm_gem_object *obj); 135int exynos_drm_gem_init_object(struct drm_gem_object *obj);
124 136
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 3424463676e0..5d9d2c2f8f3f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -37,6 +37,8 @@ struct drm_hdmi_context {
37 struct exynos_drm_subdrv subdrv; 37 struct exynos_drm_subdrv subdrv;
38 struct exynos_drm_hdmi_context *hdmi_ctx; 38 struct exynos_drm_hdmi_context *hdmi_ctx;
39 struct exynos_drm_hdmi_context *mixer_ctx; 39 struct exynos_drm_hdmi_context *mixer_ctx;
40
41 bool enabled[MIXER_WIN_NR];
40}; 42};
41 43
42void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) 44void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
@@ -189,23 +191,34 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
189 191
190 DRM_DEBUG_KMS("%s\n", __FILE__); 192 DRM_DEBUG_KMS("%s\n", __FILE__);
191 193
192 switch (mode) { 194 if (mixer_ops && mixer_ops->dpms)
193 case DRM_MODE_DPMS_ON: 195 mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
194 break; 196
195 case DRM_MODE_DPMS_STANDBY: 197 if (hdmi_ops && hdmi_ops->dpms)
196 case DRM_MODE_DPMS_SUSPEND: 198 hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
197 case DRM_MODE_DPMS_OFF: 199}
198 if (hdmi_ops && hdmi_ops->disable) 200
199 hdmi_ops->disable(ctx->hdmi_ctx->ctx); 201static void drm_hdmi_apply(struct device *subdrv_dev)
200 break; 202{
201 default: 203 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
202 DRM_DEBUG_KMS("unkown dps mode: %d\n", mode); 204 int i;
203 break; 205
206 DRM_DEBUG_KMS("%s\n", __FILE__);
207
208 for (i = 0; i < MIXER_WIN_NR; i++) {
209 if (!ctx->enabled[i])
210 continue;
211 if (mixer_ops && mixer_ops->win_commit)
212 mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
204 } 213 }
214
215 if (hdmi_ops && hdmi_ops->commit)
216 hdmi_ops->commit(ctx->hdmi_ctx->ctx);
205} 217}
206 218
207static struct exynos_drm_manager_ops drm_hdmi_manager_ops = { 219static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
208 .dpms = drm_hdmi_dpms, 220 .dpms = drm_hdmi_dpms,
221 .apply = drm_hdmi_apply,
209 .enable_vblank = drm_hdmi_enable_vblank, 222 .enable_vblank = drm_hdmi_enable_vblank,
210 .disable_vblank = drm_hdmi_disable_vblank, 223 .disable_vblank = drm_hdmi_disable_vblank,
211 .mode_fixup = drm_hdmi_mode_fixup, 224 .mode_fixup = drm_hdmi_mode_fixup,
@@ -228,21 +241,37 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
228static void drm_mixer_commit(struct device *subdrv_dev, int zpos) 241static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
229{ 242{
230 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 243 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
244 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
231 245
232 DRM_DEBUG_KMS("%s\n", __FILE__); 246 DRM_DEBUG_KMS("%s\n", __FILE__);
233 247
248 if (win < 0 || win > MIXER_WIN_NR) {
249 DRM_ERROR("mixer window[%d] is wrong\n", win);
250 return;
251 }
252
234 if (mixer_ops && mixer_ops->win_commit) 253 if (mixer_ops && mixer_ops->win_commit)
235 mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos); 254 mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
255
256 ctx->enabled[win] = true;
236} 257}
237 258
238static void drm_mixer_disable(struct device *subdrv_dev, int zpos) 259static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
239{ 260{
240 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 261 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
262 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
241 263
242 DRM_DEBUG_KMS("%s\n", __FILE__); 264 DRM_DEBUG_KMS("%s\n", __FILE__);
243 265
266 if (win < 0 || win > MIXER_WIN_NR) {
267 DRM_ERROR("mixer window[%d] is wrong\n", win);
268 return;
269 }
270
244 if (mixer_ops && mixer_ops->win_disable) 271 if (mixer_ops && mixer_ops->win_disable)
245 mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos); 272 mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
273
274 ctx->enabled[win] = false;
246} 275}
247 276
248static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 277static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -335,25 +364,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
335 return 0; 364 return 0;
336} 365}
337 366
338static int hdmi_runtime_suspend(struct device *dev)
339{
340 DRM_DEBUG_KMS("%s\n", __FILE__);
341
342 return 0;
343}
344
345static int hdmi_runtime_resume(struct device *dev)
346{
347 DRM_DEBUG_KMS("%s\n", __FILE__);
348
349 return 0;
350}
351
352static const struct dev_pm_ops hdmi_pm_ops = {
353 .runtime_suspend = hdmi_runtime_suspend,
354 .runtime_resume = hdmi_runtime_resume,
355};
356
357static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev) 367static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
358{ 368{
359 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev); 369 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -372,6 +382,5 @@ struct platform_driver exynos_drm_common_hdmi_driver = {
372 .driver = { 382 .driver = {
373 .name = "exynos-drm-hdmi", 383 .name = "exynos-drm-hdmi",
374 .owner = THIS_MODULE, 384 .owner = THIS_MODULE,
375 .pm = &hdmi_pm_ops,
376 }, 385 },
377}; 386};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index f3ae192c8dcf..bd8126996e52 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -26,6 +26,9 @@
26#ifndef _EXYNOS_DRM_HDMI_H_ 26#ifndef _EXYNOS_DRM_HDMI_H_
27#define _EXYNOS_DRM_HDMI_H_ 27#define _EXYNOS_DRM_HDMI_H_
28 28
29#define MIXER_WIN_NR 3
30#define MIXER_DEFAULT_WIN 0
31
29/* 32/*
30 * exynos hdmi common context structure. 33 * exynos hdmi common context structure.
31 * 34 *
@@ -54,13 +57,14 @@ struct exynos_hdmi_ops {
54 void (*get_max_resol)(void *ctx, unsigned int *width, 57 void (*get_max_resol)(void *ctx, unsigned int *width,
55 unsigned int *height); 58 unsigned int *height);
56 void (*commit)(void *ctx); 59 void (*commit)(void *ctx);
57 void (*disable)(void *ctx); 60 void (*dpms)(void *ctx, int mode);
58}; 61};
59 62
60struct exynos_mixer_ops { 63struct exynos_mixer_ops {
61 /* manager */ 64 /* manager */
62 int (*enable_vblank)(void *ctx, int pipe); 65 int (*enable_vblank)(void *ctx, int pipe);
63 void (*disable_vblank)(void *ctx); 66 void (*disable_vblank)(void *ctx);
67 void (*dpms)(void *ctx, int mode);
64 68
65 /* overlay */ 69 /* overlay */
66 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 70 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index f92fe4c6174a..c4c6525d4653 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -41,8 +41,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
41 container_of(plane, struct exynos_plane, base); 41 container_of(plane, struct exynos_plane, base);
42 struct exynos_drm_overlay *overlay = &exynos_plane->overlay; 42 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
43 struct exynos_drm_crtc_pos pos; 43 struct exynos_drm_crtc_pos pos;
44 unsigned int x = src_x >> 16;
45 unsigned int y = src_y >> 16;
46 int ret; 44 int ret;
47 45
48 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 46 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -53,10 +51,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
53 pos.crtc_w = crtc_w; 51 pos.crtc_w = crtc_w;
54 pos.crtc_h = crtc_h; 52 pos.crtc_h = crtc_h;
55 53
56 pos.fb_x = x; 54 /* considering 16.16 fixed point of source values */
57 pos.fb_y = y; 55 pos.fb_x = src_x >> 16;
56 pos.fb_y = src_y >> 16;
57 pos.src_w = src_w >> 16;
58 pos.src_h = src_h >> 16;
58 59
59 /* TODO: scale feature */
60 ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos); 60 ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
61 if (ret < 0) 61 if (ret < 0)
62 return ret; 62 return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b00353876458..a137e9e39a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -57,18 +57,16 @@ struct hdmi_resources {
57struct hdmi_context { 57struct hdmi_context {
58 struct device *dev; 58 struct device *dev;
59 struct drm_device *drm_dev; 59 struct drm_device *drm_dev;
60 struct fb_videomode *default_timing; 60 bool hpd;
61 unsigned int is_v13:1; 61 bool powered;
62 unsigned int default_win; 62 bool is_v13;
63 unsigned int default_bpp; 63 bool dvi_mode;
64 bool hpd_handle; 64 struct mutex hdmi_mutex;
65 bool enabled;
66 65
67 struct resource *regs_res; 66 struct resource *regs_res;
68 void __iomem *regs; 67 void __iomem *regs;
69 unsigned int irq; 68 unsigned int external_irq;
70 struct workqueue_struct *wq; 69 unsigned int internal_irq;
71 struct work_struct hotplug_work;
72 70
73 struct i2c_client *ddc_port; 71 struct i2c_client *ddc_port;
74 struct i2c_client *hdmiphy_port; 72 struct i2c_client *hdmiphy_port;
@@ -78,6 +76,9 @@ struct hdmi_context {
78 76
79 struct hdmi_resources res; 77 struct hdmi_resources res;
80 void *parent_ctx; 78 void *parent_ctx;
79
80 void (*cfg_hpd)(bool external);
81 int (*get_hpd)(void);
81}; 82};
82 83
83/* HDMI Version 1.3 */ 84/* HDMI Version 1.3 */
@@ -361,6 +362,13 @@ static const u8 hdmiphy_conf27_027[32] = {
361 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 362 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
362}; 363};
363 364
365static const u8 hdmiphy_conf74_176[32] = {
366 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
367 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
368 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
369 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
370};
371
364static const u8 hdmiphy_conf74_25[32] = { 372static const u8 hdmiphy_conf74_25[32] = {
365 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, 373 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
366 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 374 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
@@ -750,6 +758,63 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
750 }, 758 },
751}; 759};
752 760
761static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
762 .core = {
763 .h_blank = {0x18, 0x01},
764 .v2_blank = {0x65, 0x04},
765 .v1_blank = {0x2d, 0x00},
766 .v_line = {0x65, 0x04},
767 .h_line = {0x98, 0x08},
768 .hsync_pol = {0x00},
769 .vsync_pol = {0x00},
770 .int_pro_mode = {0x00},
771 .v_blank_f0 = {0xff, 0xff},
772 .v_blank_f1 = {0xff, 0xff},
773 .h_sync_start = {0x56, 0x00},
774 .h_sync_end = {0x82, 0x00},
775 .v_sync_line_bef_2 = {0x09, 0x00},
776 .v_sync_line_bef_1 = {0x04, 0x00},
777 .v_sync_line_aft_2 = {0xff, 0xff},
778 .v_sync_line_aft_1 = {0xff, 0xff},
779 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
780 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
781 .v_blank_f2 = {0xff, 0xff},
782 .v_blank_f3 = {0xff, 0xff},
783 .v_blank_f4 = {0xff, 0xff},
784 .v_blank_f5 = {0xff, 0xff},
785 .v_sync_line_aft_3 = {0xff, 0xff},
786 .v_sync_line_aft_4 = {0xff, 0xff},
787 .v_sync_line_aft_5 = {0xff, 0xff},
788 .v_sync_line_aft_6 = {0xff, 0xff},
789 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
790 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
791 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
792 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
793 .vact_space_1 = {0xff, 0xff},
794 .vact_space_2 = {0xff, 0xff},
795 .vact_space_3 = {0xff, 0xff},
796 .vact_space_4 = {0xff, 0xff},
797 .vact_space_5 = {0xff, 0xff},
798 .vact_space_6 = {0xff, 0xff},
799 /* other don't care */
800 },
801 .tg = {
802 0x00, /* cmd */
803 0x98, 0x08, /* h_fsz */
804 0x18, 0x01, 0x80, 0x07, /* hact */
805 0x65, 0x04, /* v_fsz */
806 0x01, 0x00, 0x33, 0x02, /* vsync */
807 0x2d, 0x00, 0x38, 0x04, /* vact */
808 0x33, 0x02, /* field_chg */
809 0x48, 0x02, /* vact_st2 */
810 0x00, 0x00, /* vact_st3 */
811 0x00, 0x00, /* vact_st4 */
812 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
813 0x01, 0x00, 0x33, 0x02, /* field top/bot */
814 0x00, /* 3d FP */
815 },
816};
817
753static const struct hdmi_preset_conf hdmi_conf_1080p50 = { 818static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
754 .core = { 819 .core = {
755 .h_blank = {0xd0, 0x02}, 820 .h_blank = {0xd0, 0x02},
@@ -864,6 +929,7 @@ static const struct hdmi_conf hdmi_confs[] = {
864 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, 929 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
865 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, 930 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
866 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, 931 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
932 { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
867 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, 933 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
868 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, 934 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
869}; 935};
@@ -1194,12 +1260,8 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1194static bool hdmi_is_connected(void *ctx) 1260static bool hdmi_is_connected(void *ctx)
1195{ 1261{
1196 struct hdmi_context *hdata = ctx; 1262 struct hdmi_context *hdata = ctx;
1197 u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
1198
1199 if (val)
1200 return true;
1201 1263
1202 return false; 1264 return hdata->hpd;
1203} 1265}
1204 1266
1205static int hdmi_get_edid(void *ctx, struct drm_connector *connector, 1267static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
@@ -1215,10 +1277,12 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1215 1277
1216 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); 1278 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
1217 if (raw_edid) { 1279 if (raw_edid) {
1280 hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
1218 memcpy(edid, raw_edid, min((1 + raw_edid->extensions) 1281 memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
1219 * EDID_LENGTH, len)); 1282 * EDID_LENGTH, len));
1220 DRM_DEBUG_KMS("width[%d] x height[%d]\n", 1283 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1221 raw_edid->width_cm, raw_edid->height_cm); 1284 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1285 raw_edid->width_cm, raw_edid->height_cm);
1222 } else { 1286 } else {
1223 return -ENODEV; 1287 return -ENODEV;
1224 } 1288 }
@@ -1289,28 +1353,6 @@ static int hdmi_check_timing(void *ctx, void *timing)
1289 return hdmi_v14_check_timing(check_timing); 1353 return hdmi_v14_check_timing(check_timing);
1290} 1354}
1291 1355
1292static int hdmi_display_power_on(void *ctx, int mode)
1293{
1294 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1295
1296 switch (mode) {
1297 case DRM_MODE_DPMS_ON:
1298 DRM_DEBUG_KMS("hdmi [on]\n");
1299 break;
1300 case DRM_MODE_DPMS_STANDBY:
1301 break;
1302 case DRM_MODE_DPMS_SUSPEND:
1303 break;
1304 case DRM_MODE_DPMS_OFF:
1305 DRM_DEBUG_KMS("hdmi [off]\n");
1306 break;
1307 default:
1308 break;
1309 }
1310
1311 return 0;
1312}
1313
1314static void hdmi_set_acr(u32 freq, u8 *acr) 1356static void hdmi_set_acr(u32 freq, u8 *acr)
1315{ 1357{
1316 u32 n, cts; 1358 u32 n, cts;
@@ -1463,10 +1505,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
1463 1505
1464static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) 1506static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
1465{ 1507{
1466 u32 mod; 1508 if (hdata->dvi_mode)
1467
1468 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
1469 if (mod & HDMI_DVI_MODE_EN)
1470 return; 1509 return;
1471 1510
1472 hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); 1511 hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
@@ -1478,9 +1517,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1478{ 1517{
1479 u32 reg; 1518 u32 reg;
1480 1519
1481 /* disable hpd handle for drm */
1482 hdata->hpd_handle = false;
1483
1484 if (hdata->is_v13) 1520 if (hdata->is_v13)
1485 reg = HDMI_V13_CORE_RSTOUT; 1521 reg = HDMI_V13_CORE_RSTOUT;
1486 else 1522 else
@@ -1491,16 +1527,10 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1491 mdelay(10); 1527 mdelay(10);
1492 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); 1528 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
1493 mdelay(10); 1529 mdelay(10);
1494
1495 /* enable hpd handle for drm */
1496 hdata->hpd_handle = true;
1497} 1530}
1498 1531
1499static void hdmi_conf_init(struct hdmi_context *hdata) 1532static void hdmi_conf_init(struct hdmi_context *hdata)
1500{ 1533{
1501 /* disable hpd handle for drm */
1502 hdata->hpd_handle = false;
1503
1504 /* enable HPD interrupts */ 1534 /* enable HPD interrupts */
1505 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1535 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1506 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1536 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1514,6 +1544,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1514 /* disable bluescreen */ 1544 /* disable bluescreen */
1515 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); 1545 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
1516 1546
1547 if (hdata->dvi_mode) {
1548 /* choose DVI mode */
1549 hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
1550 HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
1551 hdmi_reg_writeb(hdata, HDMI_CON_2,
1552 HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
1553 }
1554
1517 if (hdata->is_v13) { 1555 if (hdata->is_v13) {
1518 /* choose bluescreen (fecal) color */ 1556 /* choose bluescreen (fecal) color */
1519 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); 1557 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
@@ -1535,9 +1573,6 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1535 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5); 1573 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
1536 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); 1574 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
1537 } 1575 }
1538
1539 /* enable hpd handle for drm */
1540 hdata->hpd_handle = true;
1541} 1576}
1542 1577
1543static void hdmi_v13_timing_apply(struct hdmi_context *hdata) 1578static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
@@ -1890,8 +1925,11 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1890 hdmiphy_conf_reset(hdata); 1925 hdmiphy_conf_reset(hdata);
1891 hdmiphy_conf_apply(hdata); 1926 hdmiphy_conf_apply(hdata);
1892 1927
1928 mutex_lock(&hdata->hdmi_mutex);
1893 hdmi_conf_reset(hdata); 1929 hdmi_conf_reset(hdata);
1894 hdmi_conf_init(hdata); 1930 hdmi_conf_init(hdata);
1931 mutex_unlock(&hdata->hdmi_mutex);
1932
1895 hdmi_audio_init(hdata); 1933 hdmi_audio_init(hdata);
1896 1934
1897 /* setting core registers */ 1935 /* setting core registers */
@@ -1971,20 +2009,86 @@ static void hdmi_commit(void *ctx)
1971 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2009 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1972 2010
1973 hdmi_conf_apply(hdata); 2011 hdmi_conf_apply(hdata);
2012}
2013
2014static void hdmi_poweron(struct hdmi_context *hdata)
2015{
2016 struct hdmi_resources *res = &hdata->res;
2017
2018 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2019
2020 mutex_lock(&hdata->hdmi_mutex);
2021 if (hdata->powered) {
2022 mutex_unlock(&hdata->hdmi_mutex);
2023 return;
2024 }
2025
2026 hdata->powered = true;
2027
2028 if (hdata->cfg_hpd)
2029 hdata->cfg_hpd(true);
2030 mutex_unlock(&hdata->hdmi_mutex);
2031
2032 pm_runtime_get_sync(hdata->dev);
2033
2034 regulator_bulk_enable(res->regul_count, res->regul_bulk);
2035 clk_enable(res->hdmiphy);
2036 clk_enable(res->hdmi);
2037 clk_enable(res->sclk_hdmi);
2038}
2039
2040static void hdmi_poweroff(struct hdmi_context *hdata)
2041{
2042 struct hdmi_resources *res = &hdata->res;
2043
2044 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2045
2046 mutex_lock(&hdata->hdmi_mutex);
2047 if (!hdata->powered)
2048 goto out;
2049 mutex_unlock(&hdata->hdmi_mutex);
2050
2051 /*
2052 * The TV power domain needs any condition of hdmiphy to turn off and
2053 * its reset state seems to meet the condition.
2054 */
2055 hdmiphy_conf_reset(hdata);
2056
2057 clk_disable(res->sclk_hdmi);
2058 clk_disable(res->hdmi);
2059 clk_disable(res->hdmiphy);
2060 regulator_bulk_disable(res->regul_count, res->regul_bulk);
2061
2062 pm_runtime_put_sync(hdata->dev);
1974 2063
1975 hdata->enabled = true; 2064 mutex_lock(&hdata->hdmi_mutex);
2065 if (hdata->cfg_hpd)
2066 hdata->cfg_hpd(false);
2067
2068 hdata->powered = false;
2069
2070out:
2071 mutex_unlock(&hdata->hdmi_mutex);
1976} 2072}
1977 2073
1978static void hdmi_disable(void *ctx) 2074static void hdmi_dpms(void *ctx, int mode)
1979{ 2075{
1980 struct hdmi_context *hdata = ctx; 2076 struct hdmi_context *hdata = ctx;
1981 2077
1982 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2078 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1983 2079
1984 if (hdata->enabled) { 2080 switch (mode) {
1985 hdmi_audio_control(hdata, false); 2081 case DRM_MODE_DPMS_ON:
1986 hdmiphy_conf_reset(hdata); 2082 hdmi_poweron(hdata);
1987 hdmi_conf_reset(hdata); 2083 break;
2084 case DRM_MODE_DPMS_STANDBY:
2085 case DRM_MODE_DPMS_SUSPEND:
2086 case DRM_MODE_DPMS_OFF:
2087 hdmi_poweroff(hdata);
2088 break;
2089 default:
2090 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
2091 break;
1988 } 2092 }
1989} 2093}
1990 2094
@@ -1993,30 +2097,35 @@ static struct exynos_hdmi_ops hdmi_ops = {
1993 .is_connected = hdmi_is_connected, 2097 .is_connected = hdmi_is_connected,
1994 .get_edid = hdmi_get_edid, 2098 .get_edid = hdmi_get_edid,
1995 .check_timing = hdmi_check_timing, 2099 .check_timing = hdmi_check_timing,
1996 .power_on = hdmi_display_power_on,
1997 2100
1998 /* manager */ 2101 /* manager */
1999 .mode_fixup = hdmi_mode_fixup, 2102 .mode_fixup = hdmi_mode_fixup,
2000 .mode_set = hdmi_mode_set, 2103 .mode_set = hdmi_mode_set,
2001 .get_max_resol = hdmi_get_max_resol, 2104 .get_max_resol = hdmi_get_max_resol,
2002 .commit = hdmi_commit, 2105 .commit = hdmi_commit,
2003 .disable = hdmi_disable, 2106 .dpms = hdmi_dpms,
2004}; 2107};
2005 2108
2006/* 2109static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
2007 * Handle hotplug events outside the interrupt handler proper.
2008 */
2009static void hdmi_hotplug_func(struct work_struct *work)
2010{ 2110{
2011 struct hdmi_context *hdata = 2111 struct exynos_drm_hdmi_context *ctx = arg;
2012 container_of(work, struct hdmi_context, hotplug_work); 2112 struct hdmi_context *hdata = ctx->ctx;
2013 struct exynos_drm_hdmi_context *ctx = 2113
2014 (struct exynos_drm_hdmi_context *)hdata->parent_ctx; 2114 if (!hdata->get_hpd)
2115 goto out;
2116
2117 mutex_lock(&hdata->hdmi_mutex);
2118 hdata->hpd = hdata->get_hpd();
2119 mutex_unlock(&hdata->hdmi_mutex);
2015 2120
2016 drm_helper_hpd_irq_event(ctx->drm_dev); 2121 if (ctx->drm_dev)
2122 drm_helper_hpd_irq_event(ctx->drm_dev);
2123
2124out:
2125 return IRQ_HANDLED;
2017} 2126}
2018 2127
2019static irqreturn_t hdmi_irq_handler(int irq, void *arg) 2128static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
2020{ 2129{
2021 struct exynos_drm_hdmi_context *ctx = arg; 2130 struct exynos_drm_hdmi_context *ctx = arg;
2022 struct hdmi_context *hdata = ctx->ctx; 2131 struct hdmi_context *hdata = ctx->ctx;
@@ -2025,19 +2134,28 @@ static irqreturn_t hdmi_irq_handler(int irq, void *arg)
2025 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); 2134 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
2026 /* clearing flags for HPD plug/unplug */ 2135 /* clearing flags for HPD plug/unplug */
2027 if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) { 2136 if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
2028 DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle); 2137 DRM_DEBUG_KMS("unplugged\n");
2029 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, 2138 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
2030 HDMI_INTC_FLAG_HPD_UNPLUG); 2139 HDMI_INTC_FLAG_HPD_UNPLUG);
2031 } 2140 }
2032 if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) { 2141 if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
2033 DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle); 2142 DRM_DEBUG_KMS("plugged\n");
2034 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, 2143 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
2035 HDMI_INTC_FLAG_HPD_PLUG); 2144 HDMI_INTC_FLAG_HPD_PLUG);
2036 } 2145 }
2037 2146
2038 if (ctx->drm_dev && hdata->hpd_handle) 2147 mutex_lock(&hdata->hdmi_mutex);
2039 queue_work(hdata->wq, &hdata->hotplug_work); 2148 hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
2149 if (hdata->powered && hdata->hpd) {
2150 mutex_unlock(&hdata->hdmi_mutex);
2151 goto out;
2152 }
2153 mutex_unlock(&hdata->hdmi_mutex);
2154
2155 if (ctx->drm_dev)
2156 drm_helper_hpd_irq_event(ctx->drm_dev);
2040 2157
2158out:
2041 return IRQ_HANDLED; 2159 return IRQ_HANDLED;
2042} 2160}
2043 2161
@@ -2131,68 +2249,6 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
2131 return 0; 2249 return 0;
2132} 2250}
2133 2251
2134static void hdmi_resource_poweron(struct hdmi_context *hdata)
2135{
2136 struct hdmi_resources *res = &hdata->res;
2137
2138 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2139
2140 /* turn HDMI power on */
2141 regulator_bulk_enable(res->regul_count, res->regul_bulk);
2142 /* power-on hdmi physical interface */
2143 clk_enable(res->hdmiphy);
2144 /* turn clocks on */
2145 clk_enable(res->hdmi);
2146 clk_enable(res->sclk_hdmi);
2147
2148 hdmiphy_conf_reset(hdata);
2149 hdmi_conf_reset(hdata);
2150 hdmi_conf_init(hdata);
2151 hdmi_audio_init(hdata);
2152}
2153
2154static void hdmi_resource_poweroff(struct hdmi_context *hdata)
2155{
2156 struct hdmi_resources *res = &hdata->res;
2157
2158 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2159
2160 /* turn clocks off */
2161 clk_disable(res->sclk_hdmi);
2162 clk_disable(res->hdmi);
2163 /* power-off hdmiphy */
2164 clk_disable(res->hdmiphy);
2165 /* turn HDMI power off */
2166 regulator_bulk_disable(res->regul_count, res->regul_bulk);
2167}
2168
2169static int hdmi_runtime_suspend(struct device *dev)
2170{
2171 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2172
2173 DRM_DEBUG_KMS("%s\n", __func__);
2174
2175 hdmi_resource_poweroff(ctx->ctx);
2176
2177 return 0;
2178}
2179
2180static int hdmi_runtime_resume(struct device *dev)
2181{
2182 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2183
2184 DRM_DEBUG_KMS("%s\n", __func__);
2185
2186 hdmi_resource_poweron(ctx->ctx);
2187
2188 return 0;
2189}
2190
2191static const struct dev_pm_ops hdmi_pm_ops = {
2192 .runtime_suspend = hdmi_runtime_suspend,
2193 .runtime_resume = hdmi_runtime_resume,
2194};
2195
2196static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; 2252static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
2197 2253
2198void hdmi_attach_ddc_client(struct i2c_client *ddc) 2254void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2237,15 +2293,16 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2237 return -ENOMEM; 2293 return -ENOMEM;
2238 } 2294 }
2239 2295
2296 mutex_init(&hdata->hdmi_mutex);
2297
2240 drm_hdmi_ctx->ctx = (void *)hdata; 2298 drm_hdmi_ctx->ctx = (void *)hdata;
2241 hdata->parent_ctx = (void *)drm_hdmi_ctx; 2299 hdata->parent_ctx = (void *)drm_hdmi_ctx;
2242 2300
2243 platform_set_drvdata(pdev, drm_hdmi_ctx); 2301 platform_set_drvdata(pdev, drm_hdmi_ctx);
2244 2302
2245 hdata->is_v13 = pdata->is_v13; 2303 hdata->is_v13 = pdata->is_v13;
2246 hdata->default_win = pdata->default_win; 2304 hdata->cfg_hpd = pdata->cfg_hpd;
2247 hdata->default_timing = &pdata->timing; 2305 hdata->get_hpd = pdata->get_hpd;
2248 hdata->default_bpp = pdata->bpp;
2249 hdata->dev = dev; 2306 hdata->dev = dev;
2250 2307
2251 ret = hdmi_resources_init(hdata); 2308 ret = hdmi_resources_init(hdata);
@@ -2294,41 +2351,49 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2294 2351
2295 hdata->hdmiphy_port = hdmi_hdmiphy; 2352 hdata->hdmiphy_port = hdmi_hdmiphy;
2296 2353
2297 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2354 hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
2298 if (res == NULL) { 2355 if (hdata->external_irq < 0) {
2299 DRM_ERROR("get interrupt resource failed.\n"); 2356 DRM_ERROR("failed to get platform irq\n");
2300 ret = -ENXIO; 2357 ret = hdata->external_irq;
2301 goto err_hdmiphy; 2358 goto err_hdmiphy;
2302 } 2359 }
2303 2360
2304 /* create workqueue and hotplug work */ 2361 hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
2305 hdata->wq = alloc_workqueue("exynos-drm-hdmi", 2362 if (hdata->internal_irq < 0) {
2306 WQ_UNBOUND | WQ_NON_REENTRANT, 1); 2363 DRM_ERROR("failed to get platform internal irq\n");
2307 if (hdata->wq == NULL) { 2364 ret = hdata->internal_irq;
2308 DRM_ERROR("Failed to create workqueue.\n");
2309 ret = -ENOMEM;
2310 goto err_hdmiphy; 2365 goto err_hdmiphy;
2311 } 2366 }
2312 INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
2313 2367
2314 /* register hpd interrupt */ 2368 ret = request_threaded_irq(hdata->external_irq, NULL,
2315 ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi", 2369 hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
2316 drm_hdmi_ctx); 2370 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2371 "hdmi_external", drm_hdmi_ctx);
2317 if (ret) { 2372 if (ret) {
2318 DRM_ERROR("request interrupt failed.\n"); 2373 DRM_ERROR("failed to register hdmi internal interrupt\n");
2319 goto err_workqueue; 2374 goto err_hdmiphy;
2375 }
2376
2377 if (hdata->cfg_hpd)
2378 hdata->cfg_hpd(false);
2379
2380 ret = request_threaded_irq(hdata->internal_irq, NULL,
2381 hdmi_internal_irq_thread, IRQF_ONESHOT,
2382 "hdmi_internal", drm_hdmi_ctx);
2383 if (ret) {
2384 DRM_ERROR("failed to register hdmi internal interrupt\n");
2385 goto err_free_irq;
2320 } 2386 }
2321 hdata->irq = res->start;
2322 2387
2323 /* register specific callbacks to common hdmi. */ 2388 /* register specific callbacks to common hdmi. */
2324 exynos_hdmi_ops_register(&hdmi_ops); 2389 exynos_hdmi_ops_register(&hdmi_ops);
2325 2390
2326 hdmi_resource_poweron(hdata); 2391 pm_runtime_enable(dev);
2327 2392
2328 return 0; 2393 return 0;
2329 2394
2330err_workqueue: 2395err_free_irq:
2331 destroy_workqueue(hdata->wq); 2396 free_irq(hdata->external_irq, drm_hdmi_ctx);
2332err_hdmiphy: 2397err_hdmiphy:
2333 i2c_del_driver(&hdmiphy_driver); 2398 i2c_del_driver(&hdmiphy_driver);
2334err_ddc: 2399err_ddc:
@@ -2348,18 +2413,15 @@ err_data:
2348 2413
2349static int __devexit hdmi_remove(struct platform_device *pdev) 2414static int __devexit hdmi_remove(struct platform_device *pdev)
2350{ 2415{
2416 struct device *dev = &pdev->dev;
2351 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); 2417 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
2352 struct hdmi_context *hdata = ctx->ctx; 2418 struct hdmi_context *hdata = ctx->ctx;
2353 2419
2354 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2420 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2355 2421
2356 hdmi_resource_poweroff(hdata); 2422 pm_runtime_disable(dev);
2357 2423
2358 disable_irq(hdata->irq); 2424 free_irq(hdata->internal_irq, hdata);
2359 free_irq(hdata->irq, hdata);
2360
2361 cancel_work_sync(&hdata->hotplug_work);
2362 destroy_workqueue(hdata->wq);
2363 2425
2364 hdmi_resources_cleanup(hdata); 2426 hdmi_resources_cleanup(hdata);
2365 2427
@@ -2378,12 +2440,43 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2378 return 0; 2440 return 0;
2379} 2441}
2380 2442
2443#ifdef CONFIG_PM_SLEEP
2444static int hdmi_suspend(struct device *dev)
2445{
2446 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2447 struct hdmi_context *hdata = ctx->ctx;
2448
2449 disable_irq(hdata->internal_irq);
2450 disable_irq(hdata->external_irq);
2451
2452 hdata->hpd = false;
2453 if (ctx->drm_dev)
2454 drm_helper_hpd_irq_event(ctx->drm_dev);
2455
2456 hdmi_poweroff(hdata);
2457
2458 return 0;
2459}
2460
2461static int hdmi_resume(struct device *dev)
2462{
2463 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2464 struct hdmi_context *hdata = ctx->ctx;
2465
2466 enable_irq(hdata->external_irq);
2467 enable_irq(hdata->internal_irq);
2468 return 0;
2469}
2470#endif
2471
2472static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
2473
2381struct platform_driver hdmi_driver = { 2474struct platform_driver hdmi_driver = {
2382 .probe = hdmi_probe, 2475 .probe = hdmi_probe,
2383 .remove = __devexit_p(hdmi_remove), 2476 .remove = __devexit_p(hdmi_remove),
2384 .driver = { 2477 .driver = {
2385 .name = "exynos4-hdmi", 2478 .name = "exynos4-hdmi",
2386 .owner = THIS_MODULE, 2479 .owner = THIS_MODULE,
2387 .pm = &hdmi_pm_ops, 2480 .pm = &hdmi_pm_ops,
2388 }, 2481 },
2389}; 2482};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e15438c01129..68ef01028375 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,9 +37,6 @@
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h" 38#include "exynos_drm_hdmi.h"
39 39
40#define MIXER_WIN_NR 3
41#define MIXER_DEFAULT_WIN 0
42
43#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 40#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
44 41
45struct hdmi_win_data { 42struct hdmi_win_data {
@@ -57,13 +54,14 @@ struct hdmi_win_data {
57 unsigned int fb_y; 54 unsigned int fb_y;
58 unsigned int fb_width; 55 unsigned int fb_width;
59 unsigned int fb_height; 56 unsigned int fb_height;
57 unsigned int src_width;
58 unsigned int src_height;
60 unsigned int mode_width; 59 unsigned int mode_width;
61 unsigned int mode_height; 60 unsigned int mode_height;
62 unsigned int scan_flags; 61 unsigned int scan_flags;
63}; 62};
64 63
65struct mixer_resources { 64struct mixer_resources {
66 struct device *dev;
67 int irq; 65 int irq;
68 void __iomem *mixer_regs; 66 void __iomem *mixer_regs;
69 void __iomem *vp_regs; 67 void __iomem *vp_regs;
@@ -76,10 +74,13 @@ struct mixer_resources {
76}; 74};
77 75
78struct mixer_context { 76struct mixer_context {
79 unsigned int irq; 77 struct device *dev;
80 int pipe; 78 int pipe;
81 bool interlace; 79 bool interlace;
80 bool powered;
81 u32 int_en;
82 82
83 struct mutex mixer_mutex;
83 struct mixer_resources mixer_res; 84 struct mixer_resources mixer_res;
84 struct hdmi_win_data win_data[MIXER_WIN_NR]; 85 struct hdmi_win_data win_data[MIXER_WIN_NR];
85}; 86};
@@ -352,10 +353,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
352 struct mixer_resources *res = &ctx->mixer_res; 353 struct mixer_resources *res = &ctx->mixer_res;
353 unsigned long flags; 354 unsigned long flags;
354 struct hdmi_win_data *win_data; 355 struct hdmi_win_data *win_data;
355 unsigned int full_width, full_height, width, height;
356 unsigned int x_ratio, y_ratio; 356 unsigned int x_ratio, y_ratio;
357 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
358 unsigned int mode_width, mode_height;
359 unsigned int buf_num; 357 unsigned int buf_num;
360 dma_addr_t luma_addr[2], chroma_addr[2]; 358 dma_addr_t luma_addr[2], chroma_addr[2];
361 bool tiled_mode = false; 359 bool tiled_mode = false;
@@ -382,21 +380,9 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
382 return; 380 return;
383 } 381 }
384 382
385 full_width = win_data->fb_width;
386 full_height = win_data->fb_height;
387 width = win_data->crtc_width;
388 height = win_data->crtc_height;
389 mode_width = win_data->mode_width;
390 mode_height = win_data->mode_height;
391
392 /* scaling feature: (src << 16) / dst */ 383 /* scaling feature: (src << 16) / dst */
393 x_ratio = (width << 16) / width; 384 x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
394 y_ratio = (height << 16) / height; 385 y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
395
396 src_x_offset = win_data->fb_x;
397 src_y_offset = win_data->fb_y;
398 dst_x_offset = win_data->crtc_x;
399 dst_y_offset = win_data->crtc_y;
400 386
401 if (buf_num == 2) { 387 if (buf_num == 2) {
402 luma_addr[0] = win_data->dma_addr; 388 luma_addr[0] = win_data->dma_addr;
@@ -404,7 +390,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
404 } else { 390 } else {
405 luma_addr[0] = win_data->dma_addr; 391 luma_addr[0] = win_data->dma_addr;
406 chroma_addr[0] = win_data->dma_addr 392 chroma_addr[0] = win_data->dma_addr
407 + (full_width * full_height); 393 + (win_data->fb_width * win_data->fb_height);
408 } 394 }
409 395
410 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { 396 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -413,8 +399,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
413 luma_addr[1] = luma_addr[0] + 0x40; 399 luma_addr[1] = luma_addr[0] + 0x40;
414 chroma_addr[1] = chroma_addr[0] + 0x40; 400 chroma_addr[1] = chroma_addr[0] + 0x40;
415 } else { 401 } else {
416 luma_addr[1] = luma_addr[0] + full_width; 402 luma_addr[1] = luma_addr[0] + win_data->fb_width;
417 chroma_addr[1] = chroma_addr[0] + full_width; 403 chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
418 } 404 }
419 } else { 405 } else {
420 ctx->interlace = false; 406 ctx->interlace = false;
@@ -435,26 +421,26 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
435 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); 421 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
436 422
437 /* setting size of input image */ 423 /* setting size of input image */
438 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) | 424 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
439 VP_IMG_VSIZE(full_height)); 425 VP_IMG_VSIZE(win_data->fb_height));
440 /* chroma height has to reduced by 2 to avoid chroma distorions */ 426 /* chroma height has to reduced by 2 to avoid chroma distorions */
441 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) | 427 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
442 VP_IMG_VSIZE(full_height / 2)); 428 VP_IMG_VSIZE(win_data->fb_height / 2));
443 429
444 vp_reg_write(res, VP_SRC_WIDTH, width); 430 vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
445 vp_reg_write(res, VP_SRC_HEIGHT, height); 431 vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
446 vp_reg_write(res, VP_SRC_H_POSITION, 432 vp_reg_write(res, VP_SRC_H_POSITION,
447 VP_SRC_H_POSITION_VAL(src_x_offset)); 433 VP_SRC_H_POSITION_VAL(win_data->fb_x));
448 vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset); 434 vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
449 435
450 vp_reg_write(res, VP_DST_WIDTH, width); 436 vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
451 vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset); 437 vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
452 if (ctx->interlace) { 438 if (ctx->interlace) {
453 vp_reg_write(res, VP_DST_HEIGHT, height / 2); 439 vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
454 vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2); 440 vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
455 } else { 441 } else {
456 vp_reg_write(res, VP_DST_HEIGHT, height); 442 vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
457 vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset); 443 vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
458 } 444 }
459 445
460 vp_reg_write(res, VP_H_RATIO, x_ratio); 446 vp_reg_write(res, VP_H_RATIO, x_ratio);
@@ -468,8 +454,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
468 vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); 454 vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
469 vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); 455 vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
470 456
471 mixer_cfg_scan(ctx, mode_height); 457 mixer_cfg_scan(ctx, win_data->mode_height);
472 mixer_cfg_rgb_fmt(ctx, mode_height); 458 mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
473 mixer_cfg_layer(ctx, win, true); 459 mixer_cfg_layer(ctx, win, true);
474 mixer_run(ctx); 460 mixer_run(ctx);
475 461
@@ -484,10 +470,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
484 struct mixer_resources *res = &ctx->mixer_res; 470 struct mixer_resources *res = &ctx->mixer_res;
485 unsigned long flags; 471 unsigned long flags;
486 struct hdmi_win_data *win_data; 472 struct hdmi_win_data *win_data;
487 unsigned int full_width, width, height;
488 unsigned int x_ratio, y_ratio; 473 unsigned int x_ratio, y_ratio;
489 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; 474 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
490 unsigned int mode_width, mode_height;
491 dma_addr_t dma_addr; 475 dma_addr_t dma_addr;
492 unsigned int fmt; 476 unsigned int fmt;
493 u32 val; 477 u32 val;
@@ -510,26 +494,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
510 fmt = ARGB8888; 494 fmt = ARGB8888;
511 } 495 }
512 496
513 dma_addr = win_data->dma_addr;
514 full_width = win_data->fb_width;
515 width = win_data->crtc_width;
516 height = win_data->crtc_height;
517 mode_width = win_data->mode_width;
518 mode_height = win_data->mode_height;
519
520 /* 2x scaling feature */ 497 /* 2x scaling feature */
521 x_ratio = 0; 498 x_ratio = 0;
522 y_ratio = 0; 499 y_ratio = 0;
523 500
524 src_x_offset = win_data->fb_x;
525 src_y_offset = win_data->fb_y;
526 dst_x_offset = win_data->crtc_x; 501 dst_x_offset = win_data->crtc_x;
527 dst_y_offset = win_data->crtc_y; 502 dst_y_offset = win_data->crtc_y;
528 503
529 /* converting dma address base and source offset */ 504 /* converting dma address base and source offset */
530 dma_addr = dma_addr 505 dma_addr = win_data->dma_addr
531 + (src_x_offset * win_data->bpp >> 3) 506 + (win_data->fb_x * win_data->bpp >> 3)
532 + (src_y_offset * full_width * win_data->bpp >> 3); 507 + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
533 src_x_offset = 0; 508 src_x_offset = 0;
534 src_y_offset = 0; 509 src_y_offset = 0;
535 510
@@ -546,10 +521,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
546 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); 521 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
547 522
548 /* setup geometry */ 523 /* setup geometry */
549 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width); 524 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
550 525
551 val = MXR_GRP_WH_WIDTH(width); 526 val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
552 val |= MXR_GRP_WH_HEIGHT(height); 527 val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
553 val |= MXR_GRP_WH_H_SCALE(x_ratio); 528 val |= MXR_GRP_WH_H_SCALE(x_ratio);
554 val |= MXR_GRP_WH_V_SCALE(y_ratio); 529 val |= MXR_GRP_WH_V_SCALE(y_ratio);
555 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); 530 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -567,8 +542,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
567 /* set buffer address to mixer */ 542 /* set buffer address to mixer */
568 mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); 543 mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
569 544
570 mixer_cfg_scan(ctx, mode_height); 545 mixer_cfg_scan(ctx, win_data->mode_height);
571 mixer_cfg_rgb_fmt(ctx, mode_height); 546 mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
572 mixer_cfg_layer(ctx, win, true); 547 mixer_cfg_layer(ctx, win, true);
573 mixer_run(ctx); 548 mixer_run(ctx);
574 549
@@ -591,6 +566,116 @@ static void vp_win_reset(struct mixer_context *ctx)
591 WARN(tries == 0, "failed to reset Video Processor\n"); 566 WARN(tries == 0, "failed to reset Video Processor\n");
592} 567}
593 568
569static void mixer_win_reset(struct mixer_context *ctx)
570{
571 struct mixer_resources *res = &ctx->mixer_res;
572 unsigned long flags;
573 u32 val; /* value stored to register */
574
575 spin_lock_irqsave(&res->reg_slock, flags);
576 mixer_vsync_set_update(ctx, false);
577
578 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
579
580 /* set output in RGB888 mode */
581 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
582
583 /* 16 beat burst in DMA */
584 mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
585 MXR_STATUS_BURST_MASK);
586
587 /* setting default layer priority: layer1 > layer0 > video
588 * because typical usage scenario would be
589 * layer1 - OSD
590 * layer0 - framebuffer
591 * video - video overlay
592 */
593 val = MXR_LAYER_CFG_GRP1_VAL(3);
594 val |= MXR_LAYER_CFG_GRP0_VAL(2);
595 val |= MXR_LAYER_CFG_VP_VAL(1);
596 mixer_reg_write(res, MXR_LAYER_CFG, val);
597
598 /* setting background color */
599 mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
600 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
602
603 /* setting graphical layers */
604
605 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
606 val |= MXR_GRP_CFG_WIN_BLEND_EN;
607 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
608
609 /* the same configuration for both layers */
610 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
611
612 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
613 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
614 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
615
616 /* configuration of Video Processor Registers */
617 vp_win_reset(ctx);
618 vp_default_filter(res);
619
620 /* disable all layers */
621 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
622 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
623 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
624
625 mixer_vsync_set_update(ctx, true);
626 spin_unlock_irqrestore(&res->reg_slock, flags);
627}
628
629static void mixer_poweron(struct mixer_context *ctx)
630{
631 struct mixer_resources *res = &ctx->mixer_res;
632
633 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
634
635 mutex_lock(&ctx->mixer_mutex);
636 if (ctx->powered) {
637 mutex_unlock(&ctx->mixer_mutex);
638 return;
639 }
640 ctx->powered = true;
641 mutex_unlock(&ctx->mixer_mutex);
642
643 pm_runtime_get_sync(ctx->dev);
644
645 clk_enable(res->mixer);
646 clk_enable(res->vp);
647 clk_enable(res->sclk_mixer);
648
649 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
650 mixer_win_reset(ctx);
651}
652
653static void mixer_poweroff(struct mixer_context *ctx)
654{
655 struct mixer_resources *res = &ctx->mixer_res;
656
657 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
658
659 mutex_lock(&ctx->mixer_mutex);
660 if (!ctx->powered)
661 goto out;
662 mutex_unlock(&ctx->mixer_mutex);
663
664 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
665
666 clk_disable(res->mixer);
667 clk_disable(res->vp);
668 clk_disable(res->sclk_mixer);
669
670 pm_runtime_put_sync(ctx->dev);
671
672 mutex_lock(&ctx->mixer_mutex);
673 ctx->powered = false;
674
675out:
676 mutex_unlock(&ctx->mixer_mutex);
677}
678
594static int mixer_enable_vblank(void *ctx, int pipe) 679static int mixer_enable_vblank(void *ctx, int pipe)
595{ 680{
596 struct mixer_context *mixer_ctx = ctx; 681 struct mixer_context *mixer_ctx = ctx;
@@ -618,6 +703,27 @@ static void mixer_disable_vblank(void *ctx)
618 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 703 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
619} 704}
620 705
706static void mixer_dpms(void *ctx, int mode)
707{
708 struct mixer_context *mixer_ctx = ctx;
709
710 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
711
712 switch (mode) {
713 case DRM_MODE_DPMS_ON:
714 mixer_poweron(mixer_ctx);
715 break;
716 case DRM_MODE_DPMS_STANDBY:
717 case DRM_MODE_DPMS_SUSPEND:
718 case DRM_MODE_DPMS_OFF:
719 mixer_poweroff(mixer_ctx);
720 break;
721 default:
722 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
723 break;
724 }
725}
726
621static void mixer_win_mode_set(void *ctx, 727static void mixer_win_mode_set(void *ctx,
622 struct exynos_drm_overlay *overlay) 728 struct exynos_drm_overlay *overlay)
623{ 729{
@@ -643,7 +749,7 @@ static void mixer_win_mode_set(void *ctx,
643 win = MIXER_DEFAULT_WIN; 749 win = MIXER_DEFAULT_WIN;
644 750
645 if (win < 0 || win > MIXER_WIN_NR) { 751 if (win < 0 || win > MIXER_WIN_NR) {
646 DRM_ERROR("overlay plane[%d] is wrong\n", win); 752 DRM_ERROR("mixer window[%d] is wrong\n", win);
647 return; 753 return;
648 } 754 }
649 755
@@ -665,6 +771,8 @@ static void mixer_win_mode_set(void *ctx,
665 win_data->fb_y = overlay->fb_y; 771 win_data->fb_y = overlay->fb_y;
666 win_data->fb_width = overlay->fb_width; 772 win_data->fb_width = overlay->fb_width;
667 win_data->fb_height = overlay->fb_height; 773 win_data->fb_height = overlay->fb_height;
774 win_data->src_width = overlay->src_width;
775 win_data->src_height = overlay->src_height;
668 776
669 win_data->mode_width = overlay->mode_width; 777 win_data->mode_width = overlay->mode_width;
670 win_data->mode_height = overlay->mode_height; 778 win_data->mode_height = overlay->mode_height;
@@ -672,44 +780,26 @@ static void mixer_win_mode_set(void *ctx,
672 win_data->scan_flags = overlay->scan_flag; 780 win_data->scan_flags = overlay->scan_flag;
673} 781}
674 782
675static void mixer_win_commit(void *ctx, int zpos) 783static void mixer_win_commit(void *ctx, int win)
676{ 784{
677 struct mixer_context *mixer_ctx = ctx; 785 struct mixer_context *mixer_ctx = ctx;
678 int win = zpos;
679 786
680 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 787 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
681 788
682 if (win == DEFAULT_ZPOS)
683 win = MIXER_DEFAULT_WIN;
684
685 if (win < 0 || win > MIXER_WIN_NR) {
686 DRM_ERROR("overlay plane[%d] is wrong\n", win);
687 return;
688 }
689
690 if (win > 1) 789 if (win > 1)
691 vp_video_buffer(mixer_ctx, win); 790 vp_video_buffer(mixer_ctx, win);
692 else 791 else
693 mixer_graph_buffer(mixer_ctx, win); 792 mixer_graph_buffer(mixer_ctx, win);
694} 793}
695 794
696static void mixer_win_disable(void *ctx, int zpos) 795static void mixer_win_disable(void *ctx, int win)
697{ 796{
698 struct mixer_context *mixer_ctx = ctx; 797 struct mixer_context *mixer_ctx = ctx;
699 struct mixer_resources *res = &mixer_ctx->mixer_res; 798 struct mixer_resources *res = &mixer_ctx->mixer_res;
700 unsigned long flags; 799 unsigned long flags;
701 int win = zpos;
702 800
703 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 801 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
704 802
705 if (win == DEFAULT_ZPOS)
706 win = MIXER_DEFAULT_WIN;
707
708 if (win < 0 || win > MIXER_WIN_NR) {
709 DRM_ERROR("overlay plane[%d] is wrong\n", win);
710 return;
711 }
712
713 spin_lock_irqsave(&res->reg_slock, flags); 803 spin_lock_irqsave(&res->reg_slock, flags);
714 mixer_vsync_set_update(mixer_ctx, false); 804 mixer_vsync_set_update(mixer_ctx, false);
715 805
@@ -723,6 +813,7 @@ static struct exynos_mixer_ops mixer_ops = {
723 /* manager */ 813 /* manager */
724 .enable_vblank = mixer_enable_vblank, 814 .enable_vblank = mixer_enable_vblank,
725 .disable_vblank = mixer_disable_vblank, 815 .disable_vblank = mixer_disable_vblank,
816 .dpms = mixer_dpms,
726 817
727 /* overlay */ 818 /* overlay */
728 .win_mode_set = mixer_win_mode_set, 819 .win_mode_set = mixer_win_mode_set,
@@ -773,7 +864,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
773 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; 864 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
774 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 865 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
775 struct mixer_resources *res = &ctx->mixer_res; 866 struct mixer_resources *res = &ctx->mixer_res;
776 u32 val, val_base; 867 u32 val, base, shadow;
777 868
778 spin_lock(&res->reg_slock); 869 spin_lock(&res->reg_slock);
779 870
@@ -784,12 +875,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
784 if (val & MXR_INT_STATUS_VSYNC) { 875 if (val & MXR_INT_STATUS_VSYNC) {
785 /* interlace scan need to check shadow register */ 876 /* interlace scan need to check shadow register */
786 if (ctx->interlace) { 877 if (ctx->interlace) {
787 val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); 878 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
788 if (ctx->win_data[0].dma_addr != val_base) 879 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
880 if (base != shadow)
789 goto out; 881 goto out;
790 882
791 val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1)); 883 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
792 if (ctx->win_data[1].dma_addr != val_base) 884 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
885 if (base != shadow)
793 goto out; 886 goto out;
794 } 887 }
795 888
@@ -811,117 +904,6 @@ out:
811 return IRQ_HANDLED; 904 return IRQ_HANDLED;
812} 905}
813 906
814static void mixer_win_reset(struct mixer_context *ctx)
815{
816 struct mixer_resources *res = &ctx->mixer_res;
817 unsigned long flags;
818 u32 val; /* value stored to register */
819
820 spin_lock_irqsave(&res->reg_slock, flags);
821 mixer_vsync_set_update(ctx, false);
822
823 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
824
825 /* set output in RGB888 mode */
826 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
827
828 /* 16 beat burst in DMA */
829 mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
830 MXR_STATUS_BURST_MASK);
831
832 /* setting default layer priority: layer1 > layer0 > video
833 * because typical usage scenario would be
834 * layer1 - OSD
835 * layer0 - framebuffer
836 * video - video overlay
837 */
838 val = MXR_LAYER_CFG_GRP1_VAL(3);
839 val |= MXR_LAYER_CFG_GRP0_VAL(2);
840 val |= MXR_LAYER_CFG_VP_VAL(1);
841 mixer_reg_write(res, MXR_LAYER_CFG, val);
842
843 /* setting background color */
844 mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
845 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
846 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
847
848 /* setting graphical layers */
849
850 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
851 val |= MXR_GRP_CFG_WIN_BLEND_EN;
852 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
853
854 /* the same configuration for both layers */
855 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
856
857 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
858 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
859 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
860
861 /* configuration of Video Processor Registers */
862 vp_win_reset(ctx);
863 vp_default_filter(res);
864
865 /* disable all layers */
866 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
867 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
868 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
869
870 mixer_vsync_set_update(ctx, true);
871 spin_unlock_irqrestore(&res->reg_slock, flags);
872}
873
874static void mixer_resource_poweron(struct mixer_context *ctx)
875{
876 struct mixer_resources *res = &ctx->mixer_res;
877
878 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
879
880 clk_enable(res->mixer);
881 clk_enable(res->vp);
882 clk_enable(res->sclk_mixer);
883
884 mixer_win_reset(ctx);
885}
886
887static void mixer_resource_poweroff(struct mixer_context *ctx)
888{
889 struct mixer_resources *res = &ctx->mixer_res;
890
891 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
892
893 clk_disable(res->mixer);
894 clk_disable(res->vp);
895 clk_disable(res->sclk_mixer);
896}
897
898static int mixer_runtime_resume(struct device *dev)
899{
900 struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
901
902 DRM_DEBUG_KMS("resume - start\n");
903
904 mixer_resource_poweron(ctx->ctx);
905
906 return 0;
907}
908
909static int mixer_runtime_suspend(struct device *dev)
910{
911 struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
912
913 DRM_DEBUG_KMS("suspend - start\n");
914
915 mixer_resource_poweroff(ctx->ctx);
916
917 return 0;
918}
919
920static const struct dev_pm_ops mixer_pm_ops = {
921 .runtime_suspend = mixer_runtime_suspend,
922 .runtime_resume = mixer_runtime_resume,
923};
924
925static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, 907static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
926 struct platform_device *pdev) 908 struct platform_device *pdev)
927{ 909{
@@ -931,7 +913,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
931 struct resource *res; 913 struct resource *res;
932 int ret; 914 int ret;
933 915
934 mixer_res->dev = dev;
935 spin_lock_init(&mixer_res->reg_slock); 916 spin_lock_init(&mixer_res->reg_slock);
936 917
937 mixer_res->mixer = clk_get(dev, "mixer"); 918 mixer_res->mixer = clk_get(dev, "mixer");
@@ -1027,7 +1008,6 @@ fail:
1027 clk_put(mixer_res->vp); 1008 clk_put(mixer_res->vp);
1028 if (!IS_ERR_OR_NULL(mixer_res->mixer)) 1009 if (!IS_ERR_OR_NULL(mixer_res->mixer))
1029 clk_put(mixer_res->mixer); 1010 clk_put(mixer_res->mixer);
1030 mixer_res->dev = NULL;
1031 return ret; 1011 return ret;
1032} 1012}
1033 1013
@@ -1035,7 +1015,6 @@ static void mixer_resources_cleanup(struct mixer_context *ctx)
1035{ 1015{
1036 struct mixer_resources *res = &ctx->mixer_res; 1016 struct mixer_resources *res = &ctx->mixer_res;
1037 1017
1038 disable_irq(res->irq);
1039 free_irq(res->irq, ctx); 1018 free_irq(res->irq, ctx);
1040 1019
1041 iounmap(res->vp_regs); 1020 iounmap(res->vp_regs);
@@ -1064,6 +1043,9 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1064 return -ENOMEM; 1043 return -ENOMEM;
1065 } 1044 }
1066 1045
1046 mutex_init(&ctx->mixer_mutex);
1047
1048 ctx->dev = &pdev->dev;
1067 drm_hdmi_ctx->ctx = (void *)ctx; 1049 drm_hdmi_ctx->ctx = (void *)ctx;
1068 1050
1069 platform_set_drvdata(pdev, drm_hdmi_ctx); 1051 platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1076,7 +1058,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1076 /* register specific callback point to common hdmi. */ 1058 /* register specific callback point to common hdmi. */
1077 exynos_mixer_ops_register(&mixer_ops); 1059 exynos_mixer_ops_register(&mixer_ops);
1078 1060
1079 mixer_resource_poweron(ctx); 1061 pm_runtime_enable(dev);
1080 1062
1081 return 0; 1063 return 0;
1082 1064
@@ -1095,12 +1077,27 @@ static int mixer_remove(struct platform_device *pdev)
1095 1077
1096 dev_info(dev, "remove successful\n"); 1078 dev_info(dev, "remove successful\n");
1097 1079
1098 mixer_resource_poweroff(ctx); 1080 pm_runtime_disable(&pdev->dev);
1081
1099 mixer_resources_cleanup(ctx); 1082 mixer_resources_cleanup(ctx);
1100 1083
1101 return 0; 1084 return 0;
1102} 1085}
1103 1086
1087#ifdef CONFIG_PM_SLEEP
1088static int mixer_suspend(struct device *dev)
1089{
1090 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1091 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1092
1093 mixer_poweroff(ctx);
1094
1095 return 0;
1096}
1097#endif
1098
1099static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
1100
1104struct platform_driver mixer_driver = { 1101struct platform_driver mixer_driver = {
1105 .driver = { 1102 .driver = {
1106 .name = "s5p-mixer", 1103 .name = "s5p-mixer",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 3c04bea842ce..9cc7c5e9718c 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -138,14 +138,16 @@
138#define HDMI_ASP_MASK (1 << 2) 138#define HDMI_ASP_MASK (1 << 2)
139#define HDMI_EN (1 << 0) 139#define HDMI_EN (1 << 0)
140 140
141/* HDMI_CON_2 */
142#define HDMI_VID_PREAMBLE_DIS (1 << 5)
143#define HDMI_GUARD_BAND_DIS (1 << 1)
144
141/* HDMI_PHY_STATUS */ 145/* HDMI_PHY_STATUS */
142#define HDMI_PHY_STATUS_READY (1 << 0) 146#define HDMI_PHY_STATUS_READY (1 << 0)
143 147
144/* HDMI_MODE_SEL */ 148/* HDMI_MODE_SEL */
145#define HDMI_MODE_HDMI_EN (1 << 1) 149#define HDMI_MODE_HDMI_EN (1 << 1)
146#define HDMI_MODE_DVI_EN (1 << 0) 150#define HDMI_MODE_DVI_EN (1 << 0)
147#define HDMI_DVI_MODE_EN (1)
148#define HDMI_DVI_MODE_DIS (0)
149#define HDMI_MODE_MASK (3 << 0) 151#define HDMI_MODE_MASK (3 << 0)
150 152
151/* HDMI_TG_CMD */ 153/* HDMI_TG_CMD */
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 1583982917ce..abfa2a93f0d0 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# KMS driver for the GMA500 2# KMS driver for the GMA500
3# 3#
4ccflags-y += -Iinclude/drm 4ccflags-y += -I$(srctree)/include/drm
5 5
6gma500_gfx-y += gem_glue.o \ 6gma500_gfx-y += gem_glue.o \
7 accel_2d.o \ 7 accel_2d.o \
@@ -12,7 +12,6 @@ gma500_gfx-y += gem_glue.o \
12 intel_bios.o \ 12 intel_bios.o \
13 intel_i2c.o \ 13 intel_i2c.o \
14 intel_gmbus.o \ 14 intel_gmbus.o \
15 intel_opregion.o \
16 mmu.o \ 15 mmu.o \
17 power.o \ 16 power.o \
18 psb_drv.o \ 17 psb_drv.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
25 psb_device.o \ 24 psb_device.o \
26 mid_bios.o 25 mid_bios.o
27 26
27gma500_gfx-$(CONFIG_ACPI) += opregion.o \
28
28gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \ 29gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
29 cdv_intel_crt.o \ 30 cdv_intel_crt.o \
30 cdv_intel_display.o \ 31 cdv_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index a54cc738926a..9764045428ce 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -49,13 +49,15 @@ static void cdv_disable_vga(struct drm_device *dev)
49static int cdv_output_init(struct drm_device *dev) 49static int cdv_output_init(struct drm_device *dev)
50{ 50{
51 struct drm_psb_private *dev_priv = dev->dev_private; 51 struct drm_psb_private *dev_priv = dev->dev_private;
52
53 drm_mode_create_scaling_mode_property(dev);
54
52 cdv_disable_vga(dev); 55 cdv_disable_vga(dev);
53 56
54 cdv_intel_crt_init(dev, &dev_priv->mode_dev); 57 cdv_intel_crt_init(dev, &dev_priv->mode_dev);
55 cdv_intel_lvds_init(dev, &dev_priv->mode_dev); 58 cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
56 59
57 /* These bits indicate HDMI not SDVO on CDV, but we don't yet support 60 /* These bits indicate HDMI not SDVO on CDV */
58 the HDMI interface */
59 if (REG_READ(SDVOB) & SDVO_DETECTED) 61 if (REG_READ(SDVOB) & SDVO_DETECTED)
60 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); 62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
61 if (REG_READ(SDVOC) & SDVO_DETECTED) 63 if (REG_READ(SDVOC) & SDVO_DETECTED)
@@ -66,76 +68,71 @@ static int cdv_output_init(struct drm_device *dev)
66#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 68#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
67 69
68/* 70/*
69 * Poulsbo Backlight Interfaces 71 * Cedartrail Backlght Interfaces
70 */ 72 */
71 73
72#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
73#define BLC_PWM_FREQ_CALC_CONSTANT 32
74#define MHz 1000000
75
76#define PSB_BLC_PWM_PRECISION_FACTOR 10
77#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
78#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
79
80#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
81#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
82
83static int cdv_brightness;
84static struct backlight_device *cdv_backlight_device; 74static struct backlight_device *cdv_backlight_device;
85 75
86static int cdv_get_brightness(struct backlight_device *bd) 76static int cdv_backlight_combination_mode(struct drm_device *dev)
87{ 77{
88 /* return locally cached var instead of HW read (due to DPST etc.) */ 78 return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
89 /* FIXME: ideally return actual value in case firmware fiddled with
90 it */
91 return cdv_brightness;
92} 79}
93 80
94 81static int cdv_get_brightness(struct backlight_device *bd)
95static int cdv_backlight_setup(struct drm_device *dev)
96{ 82{
97 struct drm_psb_private *dev_priv = dev->dev_private; 83 struct drm_device *dev = bl_get_data(bd);
98 unsigned long core_clock; 84 u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
99 /* u32 bl_max_freq; */
100 /* unsigned long value; */
101 u16 bl_max_freq;
102 uint32_t value;
103 uint32_t blc_pwm_precision_factor;
104
105 /* get bl_max_freq and pol from dev_priv*/
106 if (!dev_priv->lvds_bl) {
107 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
108 return -ENOENT;
109 }
110 bl_max_freq = dev_priv->lvds_bl->freq;
111 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
112 85
113 core_clock = dev_priv->core_freq; 86 if (cdv_backlight_combination_mode(dev)) {
87 u8 lbpc;
114 88
115 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT; 89 val &= ~1;
116 value *= blc_pwm_precision_factor; 90 pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
117 value /= bl_max_freq; 91 val *= lbpc;
118 value /= blc_pwm_precision_factor; 92 }
93 return val;
94}
119 95
120 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ || 96static u32 cdv_get_max_backlight(struct drm_device *dev)
121 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ) 97{
122 return -ERANGE; 98 u32 max = REG_READ(BLC_PWM_CTL);
123 else { 99
124 /* FIXME */ 100 if (max == 0) {
101 DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
102 /* i915 does this, I believe which means that we should not
103 * smash PWM control as firmware will take control of it. */
104 return 1;
125 } 105 }
126 return 0; 106
107 max >>= 16;
108 if (cdv_backlight_combination_mode(dev))
109 max *= 0xff;
110 return max;
127} 111}
128 112
129static int cdv_set_brightness(struct backlight_device *bd) 113static int cdv_set_brightness(struct backlight_device *bd)
130{ 114{
115 struct drm_device *dev = bl_get_data(bd);
131 int level = bd->props.brightness; 116 int level = bd->props.brightness;
117 u32 blc_pwm_ctl;
132 118
133 /* Percentage 1-100% being valid */ 119 /* Percentage 1-100% being valid */
134 if (level < 1) 120 if (level < 1)
135 level = 1; 121 level = 1;
136 122
137 /*cdv_intel_lvds_set_brightness(dev, level); FIXME */ 123 if (cdv_backlight_combination_mode(dev)) {
138 cdv_brightness = level; 124 u32 max = cdv_get_max_backlight(dev);
125 u8 lbpc;
126
127 lbpc = level * 0xfe / max + 1;
128 level /= lbpc;
129
130 pci_write_config_byte(dev->pdev, 0xF4, lbpc);
131 }
132
133 blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
134 REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
135 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
139 return 0; 136 return 0;
140} 137}
141 138
@@ -147,7 +144,6 @@ static const struct backlight_ops cdv_ops = {
147static int cdv_backlight_init(struct drm_device *dev) 144static int cdv_backlight_init(struct drm_device *dev)
148{ 145{
149 struct drm_psb_private *dev_priv = dev->dev_private; 146 struct drm_psb_private *dev_priv = dev->dev_private;
150 int ret;
151 struct backlight_properties props; 147 struct backlight_properties props;
152 148
153 memset(&props, 0, sizeof(struct backlight_properties)); 149 memset(&props, 0, sizeof(struct backlight_properties));
@@ -159,14 +155,9 @@ static int cdv_backlight_init(struct drm_device *dev)
159 if (IS_ERR(cdv_backlight_device)) 155 if (IS_ERR(cdv_backlight_device))
160 return PTR_ERR(cdv_backlight_device); 156 return PTR_ERR(cdv_backlight_device);
161 157
162 ret = cdv_backlight_setup(dev); 158 cdv_backlight_device->props.brightness =
163 if (ret < 0) { 159 cdv_get_brightness(cdv_backlight_device);
164 backlight_device_unregister(cdv_backlight_device); 160 cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
165 cdv_backlight_device = NULL;
166 return ret;
167 }
168 cdv_backlight_device->props.brightness = 100;
169 cdv_backlight_device->props.max_brightness = 100;
170 backlight_update_status(cdv_backlight_device); 161 backlight_update_status(cdv_backlight_device);
171 dev_priv->backlight_device = cdv_backlight_device; 162 dev_priv->backlight_device = cdv_backlight_device;
172 return 0; 163 return 0;
@@ -238,6 +229,19 @@ static void cdv_init_pm(struct drm_device *dev)
238 dev_err(dev->dev, "GPU: power management timed out.\n"); 229 dev_err(dev->dev, "GPU: power management timed out.\n");
239} 230}
240 231
232static void cdv_errata(struct drm_device *dev)
233{
234 /* Disable bonus launch.
235 * CPU and GPU competes for memory and display misses updates and
236 * flickers. Worst with dual core, dual displays.
237 *
238 * Fixes were done to Win 7 gfx driver to disable a feature called
239 * Bonus Launch to work around the issue, by degrading
240 * performance.
241 */
242 CDV_MSG_WRITE32(3, 0x30, 0x08027108);
243}
244
241/** 245/**
242 * cdv_save_display_registers - save registers lost on suspend 246 * cdv_save_display_registers - save registers lost on suspend
243 * @dev: our DRM device 247 * @dev: our DRM device
@@ -251,7 +255,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
251 struct psb_save_area *regs = &dev_priv->regs; 255 struct psb_save_area *regs = &dev_priv->regs;
252 struct drm_connector *connector; 256 struct drm_connector *connector;
253 257
254 dev_info(dev->dev, "Saving GPU registers.\n"); 258 dev_dbg(dev->dev, "Saving GPU registers.\n");
255 259
256 pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB); 260 pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
257 261
@@ -355,7 +359,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
355 REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR); 359 REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
356 360
357 /* Fix arbitration bug */ 361 /* Fix arbitration bug */
358 CDV_MSG_WRITE32(3, 0x30, 0x08027108); 362 cdv_errata(dev);
359 363
360 drm_mode_config_reset(dev); 364 drm_mode_config_reset(dev);
361 365
@@ -447,13 +451,106 @@ static void cdv_get_core_freq(struct drm_device *dev)
447 } 451 }
448} 452}
449 453
454static void cdv_hotplug_work_func(struct work_struct *work)
455{
456 struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
457 hotplug_work);
458 struct drm_device *dev = dev_priv->dev;
459
460 /* Just fire off a uevent and let userspace tell us what to do */
461 drm_helper_hpd_irq_event(dev);
462}
463
464/* The core driver has received a hotplug IRQ. We are in IRQ context
465 so extract the needed information and kick off queued processing */
466
467static int cdv_hotplug_event(struct drm_device *dev)
468{
469 struct drm_psb_private *dev_priv = dev->dev_private;
470 schedule_work(&dev_priv->hotplug_work);
471 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
472 return 1;
473}
474
475static void cdv_hotplug_enable(struct drm_device *dev, bool on)
476{
477 if (on) {
478 u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
479 hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
480 HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
481 REG_WRITE(PORT_HOTPLUG_EN, hotplug);
482 } else {
483 REG_WRITE(PORT_HOTPLUG_EN, 0);
484 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
485 }
486}
487
488/* Cedarview */
489static const struct psb_offset cdv_regmap[2] = {
490 {
491 .fp0 = FPA0,
492 .fp1 = FPA1,
493 .cntr = DSPACNTR,
494 .conf = PIPEACONF,
495 .src = PIPEASRC,
496 .dpll = DPLL_A,
497 .dpll_md = DPLL_A_MD,
498 .htotal = HTOTAL_A,
499 .hblank = HBLANK_A,
500 .hsync = HSYNC_A,
501 .vtotal = VTOTAL_A,
502 .vblank = VBLANK_A,
503 .vsync = VSYNC_A,
504 .stride = DSPASTRIDE,
505 .size = DSPASIZE,
506 .pos = DSPAPOS,
507 .base = DSPABASE,
508 .surf = DSPASURF,
509 .addr = DSPABASE,
510 .status = PIPEASTAT,
511 .linoff = DSPALINOFF,
512 .tileoff = DSPATILEOFF,
513 .palette = PALETTE_A,
514 },
515 {
516 .fp0 = FPB0,
517 .fp1 = FPB1,
518 .cntr = DSPBCNTR,
519 .conf = PIPEBCONF,
520 .src = PIPEBSRC,
521 .dpll = DPLL_B,
522 .dpll_md = DPLL_B_MD,
523 .htotal = HTOTAL_B,
524 .hblank = HBLANK_B,
525 .hsync = HSYNC_B,
526 .vtotal = VTOTAL_B,
527 .vblank = VBLANK_B,
528 .vsync = VSYNC_B,
529 .stride = DSPBSTRIDE,
530 .size = DSPBSIZE,
531 .pos = DSPBPOS,
532 .base = DSPBBASE,
533 .surf = DSPBSURF,
534 .addr = DSPBBASE,
535 .status = PIPEBSTAT,
536 .linoff = DSPBLINOFF,
537 .tileoff = DSPBTILEOFF,
538 .palette = PALETTE_B,
539 }
540};
541
450static int cdv_chip_setup(struct drm_device *dev) 542static int cdv_chip_setup(struct drm_device *dev)
451{ 543{
544 struct drm_psb_private *dev_priv = dev->dev_private;
545 INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
546
547 if (pci_enable_msi(dev->pdev))
548 dev_warn(dev->dev, "Enabling MSI failed!\n");
549 dev_priv->regmap = cdv_regmap;
452 cdv_get_core_freq(dev); 550 cdv_get_core_freq(dev);
453 gma_intel_opregion_init(dev); 551 psb_intel_opregion_init(dev);
454 psb_intel_init_bios(dev); 552 psb_intel_init_bios(dev);
455 REG_WRITE(PORT_HOTPLUG_EN, 0); 553 cdv_hotplug_enable(dev, false);
456 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
457 return 0; 554 return 0;
458} 555}
459 556
@@ -464,13 +561,19 @@ const struct psb_ops cdv_chip_ops = {
464 .accel_2d = 0, 561 .accel_2d = 0,
465 .pipes = 2, 562 .pipes = 2,
466 .crtcs = 2, 563 .crtcs = 2,
564 .hdmi_mask = (1 << 0) | (1 << 1),
565 .lvds_mask = (1 << 1),
566 .cursor_needs_phys = 0,
467 .sgx_offset = MRST_SGX_OFFSET, 567 .sgx_offset = MRST_SGX_OFFSET,
468 .chip_setup = cdv_chip_setup, 568 .chip_setup = cdv_chip_setup,
569 .errata = cdv_errata,
469 570
470 .crtc_helper = &cdv_intel_helper_funcs, 571 .crtc_helper = &cdv_intel_helper_funcs,
471 .crtc_funcs = &cdv_intel_crtc_funcs, 572 .crtc_funcs = &cdv_intel_crtc_funcs,
472 573
473 .output_init = cdv_output_init, 574 .output_init = cdv_output_init,
575 .hotplug = cdv_hotplug_event,
576 .hotplug_enable = cdv_hotplug_enable,
474 577
475#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 578#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
476 .backlight_init = cdv_backlight_init, 579 .backlight_init = cdv_backlight_init,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index a71a6cd95bdd..187422018601 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -67,8 +67,6 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
67static int cdv_intel_crt_mode_valid(struct drm_connector *connector, 67static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
68 struct drm_display_mode *mode) 68 struct drm_display_mode *mode)
69{ 69{
70 struct drm_psb_private *dev_priv = connector->dev->dev_private;
71 int max_clock = 0;
72 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
73 return MODE_NO_DBLESCAN; 71 return MODE_NO_DBLESCAN;
74 72
@@ -77,18 +75,9 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
77 return MODE_CLOCK_LOW; 75 return MODE_CLOCK_LOW;
78 76
79 /* The max clock for CDV is 355 instead of 400 */ 77 /* The max clock for CDV is 355 instead of 400 */
80 max_clock = 355000; 78 if (mode->clock > 355000)
81 if (mode->clock > max_clock)
82 return MODE_CLOCK_HIGH; 79 return MODE_CLOCK_HIGH;
83 80
84 if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
85 return MODE_PANEL;
86
87 /* We assume worst case scenario of 32 bpp here, since we don't know */
88 if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
89 dev_priv->vram_stolen_size)
90 return MODE_MEM;
91
92 return MODE_OK; 81 return MODE_OK;
93} 82}
94 83
@@ -156,13 +145,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
156 struct drm_device *dev = connector->dev; 145 struct drm_device *dev = connector->dev;
157 u32 hotplug_en; 146 u32 hotplug_en;
158 int i, tries = 0, ret = false; 147 int i, tries = 0, ret = false;
159 u32 adpa_orig; 148 u32 orig;
160
161 /* disable the DAC when doing the hotplug detection */
162
163 adpa_orig = REG_READ(ADPA);
164
165 REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
166 149
167 /* 150 /*
168 * On a CDV thep, CRT detect sequence need to be done twice 151 * On a CDV thep, CRT detect sequence need to be done twice
@@ -170,7 +153,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
170 */ 153 */
171 tries = 2; 154 tries = 2;
172 155
173 hotplug_en = REG_READ(PORT_HOTPLUG_EN); 156 orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
174 hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK); 157 hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
175 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 158 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
176 159
@@ -195,8 +178,11 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
195 CRT_HOTPLUG_MONITOR_NONE) 178 CRT_HOTPLUG_MONITOR_NONE)
196 ret = true; 179 ret = true;
197 180
198 /* Restore the saved ADPA */ 181 /* clear the interrupt we just generated, if any */
199 REG_WRITE(ADPA, adpa_orig); 182 REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
183
184 /* and put the bits back */
185 REG_WRITE(PORT_HOTPLUG_EN, orig);
200 return ret; 186 return ret;
201} 187}
202 188
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index be8455919b33..c3e9a0f701df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -216,22 +216,22 @@ static void cdv_sb_reset(struct drm_device *dev)
216 */ 216 */
217static int 217static int
218cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, 218cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
219 struct cdv_intel_clock_t *clock) 219 struct cdv_intel_clock_t *clock, bool is_lvds)
220{ 220{
221 struct psb_intel_crtc *psb_crtc = 221 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
222 to_psb_intel_crtc(crtc);
223 int pipe = psb_crtc->pipe; 222 int pipe = psb_crtc->pipe;
224 u32 m, n_vco, p; 223 u32 m, n_vco, p;
225 int ret = 0; 224 int ret = 0;
226 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 225 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
226 int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
227 u32 ref_value; 227 u32 ref_value;
228 u32 lane_reg, lane_value;
228 229
229 cdv_sb_reset(dev); 230 cdv_sb_reset(dev);
230 231
231 if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) { 232 REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
232 DRM_ERROR("Attempting to set DPLL with refclk disabled\n"); 233
233 return -EBUSY; 234 udelay(100);
234 }
235 235
236 /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */ 236 /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
237 ref_value = 0x68A701; 237 ref_value = 0x68A701;
@@ -241,6 +241,35 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
241 /* We don't know what the other fields of these regs are, so 241 /* We don't know what the other fields of these regs are, so
242 * leave them in place. 242 * leave them in place.
243 */ 243 */
244 /*
245 * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
246 * for the pipe A/B. Display spec 1.06 has wrong definition.
247 * Correct definition is like below:
248 *
249 * refclka mean use clock from same PLL
250 *
251 * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
252 *
253 * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
254 *
255 */
256 ret = cdv_sb_read(dev, ref_sfr, &ref_value);
257 if (ret)
258 return ret;
259 ref_value &= ~(REF_CLK_MASK);
260
261 /* use DPLL_A for pipeB on CRT/HDMI */
262 if (pipe == 1 && !is_lvds) {
263 DRM_DEBUG_KMS("use DPLLA for pipe B\n");
264 ref_value |= REF_CLK_DPLLA;
265 } else {
266 DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
267 ref_value |= REF_CLK_DPLL;
268 }
269 ret = cdv_sb_write(dev, ref_sfr, ref_value);
270 if (ret)
271 return ret;
272
244 ret = cdv_sb_read(dev, SB_M(pipe), &m); 273 ret = cdv_sb_read(dev, SB_M(pipe), &m);
245 if (ret) 274 if (ret)
246 return ret; 275 return ret;
@@ -307,36 +336,29 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
307 if (ret) 336 if (ret)
308 return ret; 337 return ret;
309 338
310 /* always Program the Lane Register for the Pipe A*/ 339 lane_reg = PSB_LANE0;
311 if (pipe == 0) { 340 cdv_sb_read(dev, lane_reg, &lane_value);
312 /* Program the Lane0/1 for HDMI B */ 341 lane_value &= ~(LANE_PLL_MASK);
313 u32 lane_reg, lane_value; 342 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
314 343 cdv_sb_write(dev, lane_reg, lane_value);
315 lane_reg = PSB_LANE0; 344
316 cdv_sb_read(dev, lane_reg, &lane_value); 345 lane_reg = PSB_LANE1;
317 lane_value &= ~(LANE_PLL_MASK); 346 cdv_sb_read(dev, lane_reg, &lane_value);
318 lane_value |= LANE_PLL_ENABLE; 347 lane_value &= ~(LANE_PLL_MASK);
319 cdv_sb_write(dev, lane_reg, lane_value); 348 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
320 349 cdv_sb_write(dev, lane_reg, lane_value);
321 lane_reg = PSB_LANE1; 350
322 cdv_sb_read(dev, lane_reg, &lane_value); 351 lane_reg = PSB_LANE2;
323 lane_value &= ~(LANE_PLL_MASK); 352 cdv_sb_read(dev, lane_reg, &lane_value);
324 lane_value |= LANE_PLL_ENABLE; 353 lane_value &= ~(LANE_PLL_MASK);
325 cdv_sb_write(dev, lane_reg, lane_value); 354 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
326 355 cdv_sb_write(dev, lane_reg, lane_value);
327 /* Program the Lane2/3 for HDMI C */ 356
328 lane_reg = PSB_LANE2; 357 lane_reg = PSB_LANE3;
329 cdv_sb_read(dev, lane_reg, &lane_value); 358 cdv_sb_read(dev, lane_reg, &lane_value);
330 lane_value &= ~(LANE_PLL_MASK); 359 lane_value &= ~(LANE_PLL_MASK);
331 lane_value |= LANE_PLL_ENABLE; 360 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
332 cdv_sb_write(dev, lane_reg, lane_value); 361 cdv_sb_write(dev, lane_reg, lane_value);
333
334 lane_reg = PSB_LANE3;
335 cdv_sb_read(dev, lane_reg, &lane_value);
336 lane_value &= ~(LANE_PLL_MASK);
337 lane_value |= LANE_PLL_ENABLE;
338 cdv_sb_write(dev, lane_reg, lane_value);
339 }
340 362
341 return 0; 363 return 0;
342} 364}
@@ -480,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
480 int x, int y, struct drm_framebuffer *old_fb) 502 int x, int y, struct drm_framebuffer *old_fb)
481{ 503{
482 struct drm_device *dev = crtc->dev; 504 struct drm_device *dev = crtc->dev;
505 struct drm_psb_private *dev_priv = dev->dev_private;
483 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 506 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
484 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 507 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
485 int pipe = psb_intel_crtc->pipe; 508 int pipe = psb_intel_crtc->pipe;
509 const struct psb_offset *map = &dev_priv->regmap[pipe];
486 unsigned long start, offset; 510 unsigned long start, offset;
487 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
488 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
489 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
490 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
491 u32 dspcntr; 511 u32 dspcntr;
492 int ret = 0; 512 int ret = 0;
493 513
@@ -509,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
509 start = psbfb->gtt->offset; 529 start = psbfb->gtt->offset;
510 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 530 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
511 531
512 REG_WRITE(dspstride, crtc->fb->pitches[0]); 532 REG_WRITE(map->stride, crtc->fb->pitches[0]);
513 533
514 dspcntr = REG_READ(dspcntr_reg); 534 dspcntr = REG_READ(map->cntr);
515 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 535 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
516 536
517 switch (crtc->fb->bits_per_pixel) { 537 switch (crtc->fb->bits_per_pixel) {
@@ -533,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
533 ret = -EINVAL; 553 ret = -EINVAL;
534 goto psb_intel_pipe_set_base_exit; 554 goto psb_intel_pipe_set_base_exit;
535 } 555 }
536 REG_WRITE(dspcntr_reg, dspcntr); 556 REG_WRITE(map->cntr, dspcntr);
537 557
538 dev_dbg(dev->dev, 558 dev_dbg(dev->dev,
539 "Writing base %08lX %08lX %d %d\n", start, offset, x, y); 559 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
540 560
541 REG_WRITE(dspbase, offset); 561 REG_WRITE(map->base, offset);
542 REG_READ(dspbase); 562 REG_READ(map->base);
543 REG_WRITE(dspsurf, start); 563 REG_WRITE(map->surf, start);
544 REG_READ(dspsurf); 564 REG_READ(map->surf);
545 565
546psb_intel_pipe_cleaner: 566psb_intel_pipe_cleaner:
547 /* If there was a previous display we can now unpin it */ 567 /* If there was a previous display we can now unpin it */
@@ -553,6 +573,199 @@ psb_intel_pipe_set_base_exit:
553 return ret; 573 return ret;
554} 574}
555 575
576#define FIFO_PIPEA (1 << 0)
577#define FIFO_PIPEB (1 << 1)
578
579static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
580{
581 struct drm_crtc *crtc;
582 struct drm_psb_private *dev_priv = dev->dev_private;
583 struct psb_intel_crtc *psb_intel_crtc = NULL;
584
585 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
586 psb_intel_crtc = to_psb_intel_crtc(crtc);
587
588 if (crtc->fb == NULL || !psb_intel_crtc->active)
589 return false;
590 return true;
591}
592
593static bool cdv_intel_single_pipe_active (struct drm_device *dev)
594{
595 uint32_t pipe_enabled = 0;
596
597 if (cdv_intel_pipe_enabled(dev, 0))
598 pipe_enabled |= FIFO_PIPEA;
599
600 if (cdv_intel_pipe_enabled(dev, 1))
601 pipe_enabled |= FIFO_PIPEB;
602
603
604 DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
605
606 if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
607 return true;
608 else
609 return false;
610}
611
612static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
613{
614 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
615 struct drm_mode_config *mode_config = &dev->mode_config;
616 struct drm_connector *connector;
617
618 if (psb_intel_crtc->pipe != 1)
619 return false;
620
621 list_for_each_entry(connector, &mode_config->connector_list, head) {
622 struct psb_intel_encoder *psb_intel_encoder =
623 psb_intel_attached_encoder(connector);
624
625 if (!connector->encoder
626 || connector->encoder->crtc != crtc)
627 continue;
628
629 if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
630 return true;
631 }
632
633 return false;
634}
635
636static void cdv_intel_disable_self_refresh (struct drm_device *dev)
637{
638 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
639
640 /* Disable self-refresh before adjust WM */
641 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
642 REG_READ(FW_BLC_SELF);
643
644 cdv_intel_wait_for_vblank(dev);
645
646 /* Cedarview workaround to write ovelay plane, which force to leave
647 * MAX_FIFO state.
648 */
649 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
650 REG_READ(OV_OVADD);
651
652 cdv_intel_wait_for_vblank(dev);
653 }
654
655}
656
657static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
658{
659
660 if (cdv_intel_single_pipe_active(dev)) {
661 u32 fw;
662
663 fw = REG_READ(DSPFW1);
664 fw &= ~DSP_FIFO_SR_WM_MASK;
665 fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
666 fw &= ~CURSOR_B_FIFO_WM_MASK;
667 fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
668 REG_WRITE(DSPFW1, fw);
669
670 fw = REG_READ(DSPFW2);
671 fw &= ~CURSOR_A_FIFO_WM_MASK;
672 fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
673 fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
674 fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
675 REG_WRITE(DSPFW2, fw);
676
677 REG_WRITE(DSPFW3, 0x36000000);
678
679 /* ignore FW4 */
680
681 if (is_pipeb_lvds(dev, crtc)) {
682 REG_WRITE(DSPFW5, 0x00040330);
683 } else {
684 fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
685 (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
686 (3 << CURSOR_B_FIFO_WM1_SHIFT) |
687 (4 << CURSOR_FIFO_SR_WM1_SHIFT);
688 REG_WRITE(DSPFW5, fw);
689 }
690
691 REG_WRITE(DSPFW6, 0x10);
692
693 cdv_intel_wait_for_vblank(dev);
694
695 /* enable self-refresh for single pipe active */
696 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
697 REG_READ(FW_BLC_SELF);
698 cdv_intel_wait_for_vblank(dev);
699
700 } else {
701
702 /* HW team suggested values... */
703 REG_WRITE(DSPFW1, 0x3f880808);
704 REG_WRITE(DSPFW2, 0x0b020202);
705 REG_WRITE(DSPFW3, 0x24000000);
706 REG_WRITE(DSPFW4, 0x08030202);
707 REG_WRITE(DSPFW5, 0x01010101);
708 REG_WRITE(DSPFW6, 0x1d0);
709
710 cdv_intel_wait_for_vblank(dev);
711
712 cdv_intel_disable_self_refresh(dev);
713
714 }
715}
716
717/** Loads the palette/gamma unit for the CRTC with the prepared values */
718static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
719{
720 struct drm_device *dev = crtc->dev;
721 struct drm_psb_private *dev_priv = dev->dev_private;
722 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
723 int palreg = PALETTE_A;
724 int i;
725
726 /* The clocks have to be on to load the palette. */
727 if (!crtc->enabled)
728 return;
729
730 switch (psb_intel_crtc->pipe) {
731 case 0:
732 break;
733 case 1:
734 palreg = PALETTE_B;
735 break;
736 case 2:
737 palreg = PALETTE_C;
738 break;
739 default:
740 dev_err(dev->dev, "Illegal Pipe Number.\n");
741 return;
742 }
743
744 if (gma_power_begin(dev, false)) {
745 for (i = 0; i < 256; i++) {
746 REG_WRITE(palreg + 4 * i,
747 ((psb_intel_crtc->lut_r[i] +
748 psb_intel_crtc->lut_adj[i]) << 16) |
749 ((psb_intel_crtc->lut_g[i] +
750 psb_intel_crtc->lut_adj[i]) << 8) |
751 (psb_intel_crtc->lut_b[i] +
752 psb_intel_crtc->lut_adj[i]));
753 }
754 gma_power_end(dev);
755 } else {
756 for (i = 0; i < 256; i++) {
757 dev_priv->regs.pipe[0].palette[i] =
758 ((psb_intel_crtc->lut_r[i] +
759 psb_intel_crtc->lut_adj[i]) << 16) |
760 ((psb_intel_crtc->lut_g[i] +
761 psb_intel_crtc->lut_adj[i]) << 8) |
762 (psb_intel_crtc->lut_b[i] +
763 psb_intel_crtc->lut_adj[i]);
764 }
765
766 }
767}
768
556/** 769/**
557 * Sets the power management mode of the pipe and plane. 770 * Sets the power management mode of the pipe and plane.
558 * 771 *
@@ -562,62 +775,80 @@ psb_intel_pipe_set_base_exit:
562static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) 775static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
563{ 776{
564 struct drm_device *dev = crtc->dev; 777 struct drm_device *dev = crtc->dev;
778 struct drm_psb_private *dev_priv = dev->dev_private;
565 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 779 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
566 int pipe = psb_intel_crtc->pipe; 780 int pipe = psb_intel_crtc->pipe;
567 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 781 const struct psb_offset *map = &dev_priv->regmap[pipe];
568 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
569 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
570 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
571 u32 temp; 782 u32 temp;
572 783
573 /* XXX: When our outputs are all unaware of DPMS modes other than off 784 /* XXX: When our outputs are all unaware of DPMS modes other than off
574 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 785 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
575 */ 786 */
787 cdv_intel_disable_self_refresh(dev);
788
576 switch (mode) { 789 switch (mode) {
577 case DRM_MODE_DPMS_ON: 790 case DRM_MODE_DPMS_ON:
578 case DRM_MODE_DPMS_STANDBY: 791 case DRM_MODE_DPMS_STANDBY:
579 case DRM_MODE_DPMS_SUSPEND: 792 case DRM_MODE_DPMS_SUSPEND:
793 if (psb_intel_crtc->active)
794 return;
795
796 psb_intel_crtc->active = true;
797
580 /* Enable the DPLL */ 798 /* Enable the DPLL */
581 temp = REG_READ(dpll_reg); 799 temp = REG_READ(map->dpll);
582 if ((temp & DPLL_VCO_ENABLE) == 0) { 800 if ((temp & DPLL_VCO_ENABLE) == 0) {
583 REG_WRITE(dpll_reg, temp); 801 REG_WRITE(map->dpll, temp);
584 REG_READ(dpll_reg); 802 REG_READ(map->dpll);
585 /* Wait for the clocks to stabilize. */ 803 /* Wait for the clocks to stabilize. */
586 udelay(150); 804 udelay(150);
587 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 805 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
588 REG_READ(dpll_reg); 806 REG_READ(map->dpll);
589 /* Wait for the clocks to stabilize. */ 807 /* Wait for the clocks to stabilize. */
590 udelay(150); 808 udelay(150);
591 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 809 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
592 REG_READ(dpll_reg); 810 REG_READ(map->dpll);
593 /* Wait for the clocks to stabilize. */ 811 /* Wait for the clocks to stabilize. */
594 udelay(150); 812 udelay(150);
595 } 813 }
596 814
597 /* Jim Bish - switch plan and pipe per scott */ 815 /* Jim Bish - switch plan and pipe per scott */
598 /* Enable the plane */ 816 /* Enable the plane */
599 temp = REG_READ(dspcntr_reg); 817 temp = REG_READ(map->cntr);
600 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 818 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
601 REG_WRITE(dspcntr_reg, 819 REG_WRITE(map->cntr,
602 temp | DISPLAY_PLANE_ENABLE); 820 temp | DISPLAY_PLANE_ENABLE);
603 /* Flush the plane changes */ 821 /* Flush the plane changes */
604 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 822 REG_WRITE(map->base, REG_READ(map->base));
605 } 823 }
606 824
607 udelay(150); 825 udelay(150);
608 826
609 /* Enable the pipe */ 827 /* Enable the pipe */
610 temp = REG_READ(pipeconf_reg); 828 temp = REG_READ(map->conf);
611 if ((temp & PIPEACONF_ENABLE) == 0) 829 if ((temp & PIPEACONF_ENABLE) == 0)
612 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 830 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
831
832 temp = REG_READ(map->status);
833 temp &= ~(0xFFFF);
834 temp |= PIPE_FIFO_UNDERRUN;
835 REG_WRITE(map->status, temp);
836 REG_READ(map->status);
613 837
614 psb_intel_crtc_load_lut(crtc); 838 cdv_intel_update_watermark(dev, crtc);
839 cdv_intel_crtc_load_lut(crtc);
615 840
616 /* Give the overlay scaler a chance to enable 841 /* Give the overlay scaler a chance to enable
617 * if it's on this pipe */ 842 * if it's on this pipe */
618 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 843 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
844 psb_intel_crtc->crtc_enable = true;
619 break; 845 break;
620 case DRM_MODE_DPMS_OFF: 846 case DRM_MODE_DPMS_OFF:
847 if (!psb_intel_crtc->active)
848 return;
849
850 psb_intel_crtc->active = false;
851
621 /* Give the overlay scaler a chance to disable 852 /* Give the overlay scaler a chance to disable
622 * if it's on this pipe */ 853 * if it's on this pipe */
623 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 854 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
@@ -627,14 +858,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
627 858
628 /* Jim Bish - changed pipe/plane here as well. */ 859 /* Jim Bish - changed pipe/plane here as well. */
629 860
861 drm_vblank_off(dev, pipe);
630 /* Wait for vblank for the disable to take effect */ 862 /* Wait for vblank for the disable to take effect */
631 cdv_intel_wait_for_vblank(dev); 863 cdv_intel_wait_for_vblank(dev);
632 864
633 /* Next, disable display pipes */ 865 /* Next, disable display pipes */
634 temp = REG_READ(pipeconf_reg); 866 temp = REG_READ(map->conf);
635 if ((temp & PIPEACONF_ENABLE) != 0) { 867 if ((temp & PIPEACONF_ENABLE) != 0) {
636 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 868 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
637 REG_READ(pipeconf_reg); 869 REG_READ(map->conf);
638 } 870 }
639 871
640 /* Wait for vblank for the disable to take effect. */ 872 /* Wait for vblank for the disable to take effect. */
@@ -643,23 +875,25 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
643 udelay(150); 875 udelay(150);
644 876
645 /* Disable display plane */ 877 /* Disable display plane */
646 temp = REG_READ(dspcntr_reg); 878 temp = REG_READ(map->cntr);
647 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 879 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
648 REG_WRITE(dspcntr_reg, 880 REG_WRITE(map->cntr,
649 temp & ~DISPLAY_PLANE_ENABLE); 881 temp & ~DISPLAY_PLANE_ENABLE);
650 /* Flush the plane changes */ 882 /* Flush the plane changes */
651 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 883 REG_WRITE(map->base, REG_READ(map->base));
652 REG_READ(dspbase_reg); 884 REG_READ(map->base);
653 } 885 }
654 886
655 temp = REG_READ(dpll_reg); 887 temp = REG_READ(map->dpll);
656 if ((temp & DPLL_VCO_ENABLE) != 0) { 888 if ((temp & DPLL_VCO_ENABLE) != 0) {
657 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); 889 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
658 REG_READ(dpll_reg); 890 REG_READ(map->dpll);
659 } 891 }
660 892
661 /* Wait for the clocks to turn off. */ 893 /* Wait for the clocks to turn off. */
662 udelay(150); 894 udelay(150);
895 cdv_intel_update_watermark(dev, crtc);
896 psb_intel_crtc->crtc_enable = false;
663 break; 897 break;
664 } 898 }
665 /*Set FIFO Watermarks*/ 899 /*Set FIFO Watermarks*/
@@ -709,21 +943,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
709 struct drm_framebuffer *old_fb) 943 struct drm_framebuffer *old_fb)
710{ 944{
711 struct drm_device *dev = crtc->dev; 945 struct drm_device *dev = crtc->dev;
946 struct drm_psb_private *dev_priv = dev->dev_private;
712 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 947 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
713 int pipe = psb_intel_crtc->pipe; 948 int pipe = psb_intel_crtc->pipe;
714 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 949 const struct psb_offset *map = &dev_priv->regmap[pipe];
715 int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
716 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
717 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
718 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
719 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
720 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
721 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
722 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
723 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
724 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
725 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
726 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
727 int refclk; 950 int refclk;
728 struct cdv_intel_clock_t clock; 951 struct cdv_intel_clock_t clock;
729 u32 dpll = 0, dspcntr, pipeconf; 952 u32 dpll = 0, dspcntr, pipeconf;
@@ -757,13 +980,18 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
757 } 980 }
758 } 981 }
759 982
760 refclk = 96000; 983 if (dev_priv->dplla_96mhz)
761 984 /* low-end sku, 96/100 mhz */
762 /* Hack selection about ref clk for CRT */ 985 refclk = 96000;
763 /* Select 27MHz as the reference clk for HDMI */ 986 else
764 if (is_crt || is_hdmi) 987 /* high-end sku, 27/100 mhz */
765 refclk = 27000; 988 refclk = 27000;
766 989
990 if (is_lvds && dev_priv->lvds_use_ssc) {
991 refclk = dev_priv->lvds_ssc_freq * 1000;
992 DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
993 }
994
767 drm_mode_debug_printmodeline(adjusted_mode); 995 drm_mode_debug_printmodeline(adjusted_mode);
768 996
769 ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 997 ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
@@ -779,18 +1007,17 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
779/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 1007/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
780 dpll |= 3; 1008 dpll |= 3;
781 } 1009 }
782 dpll |= PLL_REF_INPUT_DREFCLK; 1010/* dpll |= PLL_REF_INPUT_DREFCLK; */
783 1011
784 dpll |= DPLL_SYNCLOCK_ENABLE; 1012 dpll |= DPLL_SYNCLOCK_ENABLE;
785 dpll |= DPLL_VGA_MODE_DIS; 1013/* if (is_lvds)
786 if (is_lvds)
787 dpll |= DPLLB_MODE_LVDS; 1014 dpll |= DPLLB_MODE_LVDS;
788 else 1015 else
789 dpll |= DPLLB_MODE_DAC_SERIAL; 1016 dpll |= DPLLB_MODE_DAC_SERIAL; */
790 /* dpll |= (2 << 11); */ 1017 /* dpll |= (2 << 11); */
791 1018
792 /* setup pipeconf */ 1019 /* setup pipeconf */
793 pipeconf = REG_READ(pipeconf_reg); 1020 pipeconf = REG_READ(map->conf);
794 1021
795 /* Set up the display plane register */ 1022 /* Set up the display plane register */
796 dspcntr = DISPPLANE_GAMMA_ENABLE; 1023 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -803,10 +1030,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
803 dspcntr |= DISPLAY_PLANE_ENABLE; 1030 dspcntr |= DISPLAY_PLANE_ENABLE;
804 pipeconf |= PIPEACONF_ENABLE; 1031 pipeconf |= PIPEACONF_ENABLE;
805 1032
806 REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE); 1033 REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
807 REG_READ(dpll_reg); 1034 REG_READ(map->dpll);
808 1035
809 cdv_dpll_set_clock_cdv(dev, crtc, &clock); 1036 cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
810 1037
811 udelay(150); 1038 udelay(150);
812 1039
@@ -848,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
848 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 1075 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
849 drm_mode_debug_printmodeline(mode); 1076 drm_mode_debug_printmodeline(mode);
850 1077
851 REG_WRITE(dpll_reg, 1078 REG_WRITE(map->dpll,
852 (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE); 1079 (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
853 REG_READ(dpll_reg); 1080 REG_READ(map->dpll);
854 /* Wait for the clocks to stabilize. */ 1081 /* Wait for the clocks to stabilize. */
855 udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */ 1082 udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
856 1083
857 if (!(REG_READ(dpll_reg) & DPLL_LOCK)) { 1084 if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
858 dev_err(dev->dev, "Failed to get DPLL lock\n"); 1085 dev_err(dev->dev, "Failed to get DPLL lock\n");
859 return -EBUSY; 1086 return -EBUSY;
860 } 1087 }
861 1088
862 { 1089 {
863 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1090 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
864 REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 1091 REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
865 } 1092 }
866 1093
867 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 1094 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
868 ((adjusted_mode->crtc_htotal - 1) << 16)); 1095 ((adjusted_mode->crtc_htotal - 1) << 16));
869 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 1096 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
870 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 1097 ((adjusted_mode->crtc_hblank_end - 1) << 16));
871 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 1098 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
872 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 1099 ((adjusted_mode->crtc_hsync_end - 1) << 16));
873 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 1100 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
874 ((adjusted_mode->crtc_vtotal - 1) << 16)); 1101 ((adjusted_mode->crtc_vtotal - 1) << 16));
875 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 1102 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
876 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 1103 ((adjusted_mode->crtc_vblank_end - 1) << 16));
877 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 1104 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
878 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 1105 ((adjusted_mode->crtc_vsync_end - 1) << 16));
879 /* pipesrc and dspsize control the size that is scaled from, 1106 /* pipesrc and dspsize control the size that is scaled from,
880 * which should always be the user's requested size. 1107 * which should always be the user's requested size.
881 */ 1108 */
882 REG_WRITE(dspsize_reg, 1109 REG_WRITE(map->size,
883 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 1110 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
884 REG_WRITE(dsppos_reg, 0); 1111 REG_WRITE(map->pos, 0);
885 REG_WRITE(pipesrc_reg, 1112 REG_WRITE(map->src,
886 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 1113 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
887 REG_WRITE(pipeconf_reg, pipeconf); 1114 REG_WRITE(map->conf, pipeconf);
888 REG_READ(pipeconf_reg); 1115 REG_READ(map->conf);
889 1116
890 cdv_intel_wait_for_vblank(dev); 1117 cdv_intel_wait_for_vblank(dev);
891 1118
892 REG_WRITE(dspcntr_reg, dspcntr); 1119 REG_WRITE(map->cntr, dspcntr);
893 1120
894 /* Flush the plane changes */ 1121 /* Flush the plane changes */
895 { 1122 {
@@ -903,58 +1130,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
903 return 0; 1130 return 0;
904} 1131}
905 1132
906/** Loads the palette/gamma unit for the CRTC with the prepared values */
907static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
908{
909 struct drm_device *dev = crtc->dev;
910 struct drm_psb_private *dev_priv =
911 (struct drm_psb_private *)dev->dev_private;
912 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
913 int palreg = PALETTE_A;
914 int i;
915
916 /* The clocks have to be on to load the palette. */
917 if (!crtc->enabled)
918 return;
919
920 switch (psb_intel_crtc->pipe) {
921 case 0:
922 break;
923 case 1:
924 palreg = PALETTE_B;
925 break;
926 case 2:
927 palreg = PALETTE_C;
928 break;
929 default:
930 dev_err(dev->dev, "Illegal Pipe Number.\n");
931 return;
932 }
933
934 if (gma_power_begin(dev, false)) {
935 for (i = 0; i < 256; i++) {
936 REG_WRITE(palreg + 4 * i,
937 ((psb_intel_crtc->lut_r[i] +
938 psb_intel_crtc->lut_adj[i]) << 16) |
939 ((psb_intel_crtc->lut_g[i] +
940 psb_intel_crtc->lut_adj[i]) << 8) |
941 (psb_intel_crtc->lut_b[i] +
942 psb_intel_crtc->lut_adj[i]));
943 }
944 gma_power_end(dev);
945 } else {
946 for (i = 0; i < 256; i++) {
947 dev_priv->regs.psb.save_palette_a[i] =
948 ((psb_intel_crtc->lut_r[i] +
949 psb_intel_crtc->lut_adj[i]) << 16) |
950 ((psb_intel_crtc->lut_g[i] +
951 psb_intel_crtc->lut_adj[i]) << 8) |
952 (psb_intel_crtc->lut_b[i] +
953 psb_intel_crtc->lut_adj[i]);
954 }
955
956 }
957}
958 1133
959/** 1134/**
960 * Save HW states of giving crtc 1135 * Save HW states of giving crtc
@@ -962,11 +1137,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
962static void cdv_intel_crtc_save(struct drm_crtc *crtc) 1137static void cdv_intel_crtc_save(struct drm_crtc *crtc)
963{ 1138{
964 struct drm_device *dev = crtc->dev; 1139 struct drm_device *dev = crtc->dev;
965 /* struct drm_psb_private *dev_priv = 1140 struct drm_psb_private *dev_priv = dev->dev_private;
966 (struct drm_psb_private *)dev->dev_private; */
967 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1141 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
968 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 1142 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
969 int pipeA = (psb_intel_crtc->pipe == 0); 1143 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
970 uint32_t paletteReg; 1144 uint32_t paletteReg;
971 int i; 1145 int i;
972 1146
@@ -975,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
975 return; 1149 return;
976 } 1150 }
977 1151
978 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); 1152 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
979 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); 1153 crtc_state->savePIPECONF = REG_READ(map->conf);
980 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); 1154 crtc_state->savePIPESRC = REG_READ(map->src);
981 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); 1155 crtc_state->saveFP0 = REG_READ(map->fp0);
982 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); 1156 crtc_state->saveFP1 = REG_READ(map->fp1);
983 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); 1157 crtc_state->saveDPLL = REG_READ(map->dpll);
984 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); 1158 crtc_state->saveHTOTAL = REG_READ(map->htotal);
985 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); 1159 crtc_state->saveHBLANK = REG_READ(map->hblank);
986 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); 1160 crtc_state->saveHSYNC = REG_READ(map->hsync);
987 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); 1161 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
988 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); 1162 crtc_state->saveVBLANK = REG_READ(map->vblank);
989 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); 1163 crtc_state->saveVSYNC = REG_READ(map->vsync);
990 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); 1164 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
991 1165
992 /*NOTE: DSPSIZE DSPPOS only for psb*/ 1166 /*NOTE: DSPSIZE DSPPOS only for psb*/
993 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); 1167 crtc_state->saveDSPSIZE = REG_READ(map->size);
994 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); 1168 crtc_state->saveDSPPOS = REG_READ(map->pos);
995 1169
996 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); 1170 crtc_state->saveDSPBASE = REG_READ(map->base);
997 1171
998 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 1172 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
999 crtc_state->saveDSPCNTR, 1173 crtc_state->saveDSPCNTR,
@@ -1014,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
1014 crtc_state->saveDSPBASE 1188 crtc_state->saveDSPBASE
1015 ); 1189 );
1016 1190
1017 paletteReg = pipeA ? PALETTE_A : PALETTE_B; 1191 paletteReg = map->palette;
1018 for (i = 0; i < 256; ++i) 1192 for (i = 0; i < 256; ++i)
1019 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); 1193 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1020} 1194}
@@ -1025,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
1025static void cdv_intel_crtc_restore(struct drm_crtc *crtc) 1199static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1026{ 1200{
1027 struct drm_device *dev = crtc->dev; 1201 struct drm_device *dev = crtc->dev;
1028 /* struct drm_psb_private * dev_priv = 1202 struct drm_psb_private *dev_priv = dev->dev_private;
1029 (struct drm_psb_private *)dev->dev_private; */
1030 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1203 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1031 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 1204 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1032 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */ 1205 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1033 int pipeA = (psb_intel_crtc->pipe == 0);
1034 uint32_t paletteReg; 1206 uint32_t paletteReg;
1035 int i; 1207 int i;
1036 1208
@@ -1041,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1041 1213
1042 DRM_DEBUG( 1214 DRM_DEBUG(
1043 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 1215 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1044 REG_READ(pipeA ? DSPACNTR : DSPBCNTR), 1216 REG_READ(map->cntr),
1045 REG_READ(pipeA ? PIPEACONF : PIPEBCONF), 1217 REG_READ(map->conf),
1046 REG_READ(pipeA ? PIPEASRC : PIPEBSRC), 1218 REG_READ(map->src),
1047 REG_READ(pipeA ? FPA0 : FPB0), 1219 REG_READ(map->fp0),
1048 REG_READ(pipeA ? FPA1 : FPB1), 1220 REG_READ(map->fp1),
1049 REG_READ(pipeA ? DPLL_A : DPLL_B), 1221 REG_READ(map->dpll),
1050 REG_READ(pipeA ? HTOTAL_A : HTOTAL_B), 1222 REG_READ(map->htotal),
1051 REG_READ(pipeA ? HBLANK_A : HBLANK_B), 1223 REG_READ(map->hblank),
1052 REG_READ(pipeA ? HSYNC_A : HSYNC_B), 1224 REG_READ(map->hsync),
1053 REG_READ(pipeA ? VTOTAL_A : VTOTAL_B), 1225 REG_READ(map->vtotal),
1054 REG_READ(pipeA ? VBLANK_A : VBLANK_B), 1226 REG_READ(map->vblank),
1055 REG_READ(pipeA ? VSYNC_A : VSYNC_B), 1227 REG_READ(map->vsync),
1056 REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE), 1228 REG_READ(map->stride),
1057 REG_READ(pipeA ? DSPASIZE : DSPBSIZE), 1229 REG_READ(map->size),
1058 REG_READ(pipeA ? DSPAPOS : DSPBPOS), 1230 REG_READ(map->pos),
1059 REG_READ(pipeA ? DSPABASE : DSPBBASE) 1231 REG_READ(map->base)
1060 ); 1232 );
1061 1233
1062 DRM_DEBUG( 1234 DRM_DEBUG(
1063 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 1235 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1077,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1077 crtc_state->saveDSPSIZE, 1249 crtc_state->saveDSPSIZE,
1078 crtc_state->saveDSPPOS, 1250 crtc_state->saveDSPPOS,
1079 crtc_state->saveDSPBASE 1251 crtc_state->saveDSPBASE
1080 ); 1252 );
1081 1253
1082 1254
1083 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 1255 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1084 REG_WRITE(pipeA ? DPLL_A : DPLL_B, 1256 REG_WRITE(map->dpll,
1085 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 1257 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1086 REG_READ(pipeA ? DPLL_A : DPLL_B); 1258 REG_READ(map->dpll);
1087 DRM_DEBUG("write dpll: %x\n", 1259 DRM_DEBUG("write dpll: %x\n",
1088 REG_READ(pipeA ? DPLL_A : DPLL_B)); 1260 REG_READ(map->dpll));
1089 udelay(150); 1261 udelay(150);
1090 } 1262 }
1091 1263
1092 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); 1264 REG_WRITE(map->fp0, crtc_state->saveFP0);
1093 REG_READ(pipeA ? FPA0 : FPB0); 1265 REG_READ(map->fp0);
1094 1266
1095 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); 1267 REG_WRITE(map->fp1, crtc_state->saveFP1);
1096 REG_READ(pipeA ? FPA1 : FPB1); 1268 REG_READ(map->fp1);
1097 1269
1098 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); 1270 REG_WRITE(map->dpll, crtc_state->saveDPLL);
1099 REG_READ(pipeA ? DPLL_A : DPLL_B); 1271 REG_READ(map->dpll);
1100 udelay(150); 1272 udelay(150);
1101 1273
1102 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); 1274 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
1103 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); 1275 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
1104 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); 1276 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
1105 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); 1277 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
1106 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); 1278 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
1107 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); 1279 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
1108 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); 1280 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
1109 1281
1110 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); 1282 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
1111 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); 1283 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
1112 1284
1113 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC); 1285 REG_WRITE(map->src, crtc_state->savePIPESRC);
1114 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 1286 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1115 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); 1287 REG_WRITE(map->conf, crtc_state->savePIPECONF);
1116 1288
1117 cdv_intel_wait_for_vblank(dev); 1289 cdv_intel_wait_for_vblank(dev);
1118 1290
1119 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); 1291 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
1120 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 1292 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1121 1293
1122 cdv_intel_wait_for_vblank(dev); 1294 cdv_intel_wait_for_vblank(dev);
1123 1295
1124 paletteReg = pipeA ? PALETTE_A : PALETTE_B; 1296 paletteReg = map->palette;
1125 for (i = 0; i < 256; ++i) 1297 for (i = 0; i < 256; ++i)
1126 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); 1298 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1127} 1299}
@@ -1296,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
1296static int cdv_intel_crtc_clock_get(struct drm_device *dev, 1468static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1297 struct drm_crtc *crtc) 1469 struct drm_crtc *crtc)
1298{ 1470{
1471 struct drm_psb_private *dev_priv = dev->dev_private;
1299 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1472 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1300 int pipe = psb_intel_crtc->pipe; 1473 int pipe = psb_intel_crtc->pipe;
1474 const struct psb_offset *map = &dev_priv->regmap[pipe];
1301 u32 dpll; 1475 u32 dpll;
1302 u32 fp; 1476 u32 fp;
1303 struct cdv_intel_clock_t clock; 1477 struct cdv_intel_clock_t clock;
1304 bool is_lvds; 1478 bool is_lvds;
1305 struct drm_psb_private *dev_priv = dev->dev_private; 1479 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1306 1480
1307 if (gma_power_begin(dev, false)) { 1481 if (gma_power_begin(dev, false)) {
1308 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); 1482 dpll = REG_READ(map->dpll);
1309 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1483 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1310 fp = REG_READ((pipe == 0) ? FPA0 : FPB0); 1484 fp = REG_READ(map->fp0);
1311 else 1485 else
1312 fp = REG_READ((pipe == 0) ? FPA1 : FPB1); 1486 fp = REG_READ(map->fp1);
1313 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); 1487 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1314 gma_power_end(dev); 1488 gma_power_end(dev);
1315 } else { 1489 } else {
1316 dpll = (pipe == 0) ? 1490 dpll = p->dpll;
1317 dev_priv->regs.psb.saveDPLL_A :
1318 dev_priv->regs.psb.saveDPLL_B;
1319
1320 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1491 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1321 fp = (pipe == 0) ? 1492 fp = p->fp0;
1322 dev_priv->regs.psb.saveFPA0 :
1323 dev_priv->regs.psb.saveFPB0;
1324 else 1493 else
1325 fp = (pipe == 0) ? 1494 fp = p->fp1;
1326 dev_priv->regs.psb.saveFPA1 :
1327 dev_priv->regs.psb.saveFPB1;
1328 1495
1329 is_lvds = (pipe == 1) && 1496 is_lvds = (pipe == 1) &&
1330 (dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN); 1497 (dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1382,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1382{ 1549{
1383 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1550 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1384 int pipe = psb_intel_crtc->pipe; 1551 int pipe = psb_intel_crtc->pipe;
1552 struct drm_psb_private *dev_priv = dev->dev_private;
1553 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1554 const struct psb_offset *map = &dev_priv->regmap[pipe];
1385 struct drm_display_mode *mode; 1555 struct drm_display_mode *mode;
1386 int htot; 1556 int htot;
1387 int hsync; 1557 int hsync;
1388 int vtot; 1558 int vtot;
1389 int vsync; 1559 int vsync;
1390 struct drm_psb_private *dev_priv = dev->dev_private;
1391 1560
1392 if (gma_power_begin(dev, false)) { 1561 if (gma_power_begin(dev, false)) {
1393 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); 1562 htot = REG_READ(map->htotal);
1394 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); 1563 hsync = REG_READ(map->hsync);
1395 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); 1564 vtot = REG_READ(map->vtotal);
1396 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); 1565 vsync = REG_READ(map->vsync);
1397 gma_power_end(dev); 1566 gma_power_end(dev);
1398 } else { 1567 } else {
1399 htot = (pipe == 0) ? 1568 htot = p->htotal;
1400 dev_priv->regs.psb.saveHTOTAL_A : 1569 hsync = p->hsync;
1401 dev_priv->regs.psb.saveHTOTAL_B; 1570 vtot = p->vtotal;
1402 hsync = (pipe == 0) ? 1571 vsync = p->vsync;
1403 dev_priv->regs.psb.saveHSYNC_A :
1404 dev_priv->regs.psb.saveHSYNC_B;
1405 vtot = (pipe == 0) ?
1406 dev_priv->regs.psb.saveVTOTAL_A :
1407 dev_priv->regs.psb.saveVTOTAL_B;
1408 vsync = (pipe == 0) ?
1409 dev_priv->regs.psb.saveVSYNC_A :
1410 dev_priv->regs.psb.saveVSYNC_B;
1411 } 1572 }
1412 1573
1413 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 1574 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 8d5269555005..88b59d4a7b7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -242,8 +242,6 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
242static int cdv_hdmi_mode_valid(struct drm_connector *connector, 242static int cdv_hdmi_mode_valid(struct drm_connector *connector,
243 struct drm_display_mode *mode) 243 struct drm_display_mode *mode)
244{ 244{
245 struct drm_psb_private *dev_priv = connector->dev->dev_private;
246
247 if (mode->clock > 165000) 245 if (mode->clock > 165000)
248 return MODE_CLOCK_HIGH; 246 return MODE_CLOCK_HIGH;
249 if (mode->clock < 20000) 247 if (mode->clock < 20000)
@@ -257,11 +255,6 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
257 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 255 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
258 return MODE_NO_INTERLACE; 256 return MODE_NO_INTERLACE;
259 257
260 /* We assume worst case scenario of 32 bpp here, since we don't know */
261 if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
262 dev_priv->vram_stolen_size)
263 return MODE_MEM;
264
265 return MODE_OK; 258 return MODE_OK;
266} 259}
267 260
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8359c1a3f45f..ff5b58eb878c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,6 +356,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
356{ 356{
357 struct drm_device *dev = encoder->dev; 357 struct drm_device *dev = encoder->dev;
358 struct drm_psb_private *dev_priv = dev->dev_private; 358 struct drm_psb_private *dev_priv = dev->dev_private;
359 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
360 encoder->crtc);
359 u32 pfit_control; 361 u32 pfit_control;
360 362
361 /* 363 /*
@@ -377,6 +379,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
377 else 379 else
378 pfit_control = 0; 380 pfit_control = 0;
379 381
382 pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
383
380 if (dev_priv->lvds_dither) 384 if (dev_priv->lvds_dither)
381 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 385 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
382 386
@@ -552,10 +556,60 @@ static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
552 drm_encoder_cleanup(encoder); 556 drm_encoder_cleanup(encoder);
553} 557}
554 558
555const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = { 559static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
556 .destroy = cdv_intel_lvds_enc_destroy, 560 .destroy = cdv_intel_lvds_enc_destroy,
557}; 561};
558 562
563/*
564 * Enumerate the child dev array parsed from VBT to check whether
565 * the LVDS is present.
566 * If it is present, return 1.
567 * If it is not present, return false.
568 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
569 */
570static bool lvds_is_present_in_vbt(struct drm_device *dev,
571 u8 *i2c_pin)
572{
573 struct drm_psb_private *dev_priv = dev->dev_private;
574 int i;
575
576 if (!dev_priv->child_dev_num)
577 return true;
578
579 for (i = 0; i < dev_priv->child_dev_num; i++) {
580 struct child_device_config *child = dev_priv->child_dev + i;
581
582 /* If the device type is not LFP, continue.
583 * We have to check both the new identifiers as well as the
584 * old for compatibility with some BIOSes.
585 */
586 if (child->device_type != DEVICE_TYPE_INT_LFP &&
587 child->device_type != DEVICE_TYPE_LFP)
588 continue;
589
590 if (child->i2c_pin)
591 *i2c_pin = child->i2c_pin;
592
593 /* However, we cannot trust the BIOS writers to populate
594 * the VBT correctly. Since LVDS requires additional
595 * information from AIM blocks, a non-zero addin offset is
596 * a good indicator that the LVDS is actually present.
597 */
598 if (child->addin_offset)
599 return true;
600
601 /* But even then some BIOS writers perform some black magic
602 * and instantiate the device without reference to any
603 * additional data. Trust that if the VBT was written into
604 * the OpRegion then they have validated the LVDS's existence.
605 */
606 if (dev_priv->opregion.vbt)
607 return true;
608 }
609
610 return false;
611}
612
559/** 613/**
560 * cdv_intel_lvds_init - setup LVDS connectors on this device 614 * cdv_intel_lvds_init - setup LVDS connectors on this device
561 * @dev: drm device 615 * @dev: drm device
@@ -576,6 +630,13 @@ void cdv_intel_lvds_init(struct drm_device *dev,
576 struct drm_psb_private *dev_priv = dev->dev_private; 630 struct drm_psb_private *dev_priv = dev->dev_private;
577 u32 lvds; 631 u32 lvds;
578 int pipe; 632 int pipe;
633 u8 pin;
634
635 pin = GMBUS_PORT_PANEL;
636 if (!lvds_is_present_in_vbt(dev, &pin)) {
637 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
638 return;
639 }
579 640
580 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 641 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
581 GFP_KERNEL); 642 GFP_KERNEL);
@@ -710,6 +771,19 @@ void cdv_intel_lvds_init(struct drm_device *dev,
710 goto failed_find; 771 goto failed_find;
711 } 772 }
712 773
774 /* setup PWM */
775 {
776 u32 pwm;
777
778 pwm = REG_READ(BLC_PWM_CTL2);
779 if (pipe == 1)
780 pwm |= PWM_PIPE_B;
781 else
782 pwm &= ~PWM_PIPE_B;
783 pwm |= PWM_ENABLE;
784 REG_WRITE(BLC_PWM_CTL2, pwm);
785 }
786
713out: 787out:
714 drm_sysfs_connector_add(connector); 788 drm_sysfs_connector_add(connector);
715 return; 789 return;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8ea202f1ba50..5732b5702e1c 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -153,7 +153,7 @@ static void psbfb_vm_close(struct vm_area_struct *vma)
153{ 153{
154} 154}
155 155
156static struct vm_operations_struct psbfb_vm_ops = { 156static const struct vm_operations_struct psbfb_vm_ops = {
157 .fault = psbfb_vm_fault, 157 .fault = psbfb_vm_fault,
158 .open = psbfb_vm_open, 158 .open = psbfb_vm_open,
159 .close = psbfb_vm_close 159 .close = psbfb_vm_close
@@ -408,6 +408,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
408 return -ENOMEM; 408 return -ENOMEM;
409 } 409 }
410 410
411 memset(dev_priv->vram_addr + backing->offset, 0, size);
412
411 mutex_lock(&dev->struct_mutex); 413 mutex_lock(&dev->struct_mutex);
412 414
413 info = framebuffer_alloc(0, device); 415 info = framebuffer_alloc(0, device);
@@ -453,8 +455,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
453 info->fix.ypanstep = 0; 455 info->fix.ypanstep = 0;
454 456
455 /* Accessed stolen memory directly */ 457 /* Accessed stolen memory directly */
456 info->screen_base = (char *)dev_priv->vram_addr + 458 info->screen_base = dev_priv->vram_addr + backing->offset;
457 backing->offset;
458 info->screen_size = size; 459 info->screen_size = size;
459 460
460 if (dev_priv->gtt.stolen_size) { 461 if (dev_priv->gtt.stolen_size) {
@@ -475,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
475 476
476 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 477 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
477 478
478 dev_info(dev->dev, "allocated %dx%d fb\n", 479 dev_dbg(dev->dev, "allocated %dx%d fb\n",
479 psbfb->base.width, psbfb->base.height); 480 psbfb->base.width, psbfb->base.height);
480 481
481 mutex_unlock(&dev->struct_mutex); 482 mutex_unlock(&dev->struct_mutex);
@@ -543,9 +544,25 @@ static int psbfb_probe(struct drm_fb_helper *helper,
543 struct drm_fb_helper_surface_size *sizes) 544 struct drm_fb_helper_surface_size *sizes)
544{ 545{
545 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper; 546 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
547 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
548 struct drm_psb_private *dev_priv = dev->dev_private;
546 int new_fb = 0; 549 int new_fb = 0;
550 int bytespp;
547 int ret; 551 int ret;
548 552
553 bytespp = sizes->surface_bpp / 8;
554 if (bytespp == 3) /* no 24bit packed */
555 bytespp = 4;
556
557 /* If the mode will not fit in 32bit then switch to 16bit to get
558 a console on full resolution. The X mode setting server will
559 allocate its own 32bit GEM framebuffer */
560 if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
561 dev_priv->vram_stolen_size) {
562 sizes->surface_bpp = 16;
563 sizes->surface_depth = 16;
564 }
565
549 if (!helper->fb) { 566 if (!helper->fb) {
550 ret = psbfb_create(psb_fbdev, sizes); 567 ret = psbfb_create(psb_fbdev, sizes);
551 if (ret) 568 if (ret)
@@ -555,7 +572,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
555 return new_fb; 572 return new_fb;
556} 573}
557 574
558struct drm_fb_helper_funcs psb_fb_helper_funcs = { 575static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
559 .gamma_set = psbfb_gamma_set, 576 .gamma_set = psbfb_gamma_set,
560 .gamma_get = psbfb_gamma_get, 577 .gamma_get = psbfb_gamma_get,
561 .fb_probe = psbfb_probe, 578 .fb_probe = psbfb_probe,
@@ -732,10 +749,7 @@ static void psb_setup_outputs(struct drm_device *dev)
732 clone_mask = (1 << INTEL_OUTPUT_SDVO); 749 clone_mask = (1 << INTEL_OUTPUT_SDVO);
733 break; 750 break;
734 case INTEL_OUTPUT_LVDS: 751 case INTEL_OUTPUT_LVDS:
735 if (IS_MRST(dev)) 752 crtc_mask = dev_priv->ops->lvds_mask;
736 crtc_mask = (1 << 0);
737 else
738 crtc_mask = (1 << 1);
739 clone_mask = (1 << INTEL_OUTPUT_LVDS); 753 clone_mask = (1 << INTEL_OUTPUT_LVDS);
740 break; 754 break;
741 case INTEL_OUTPUT_MIPI: 755 case INTEL_OUTPUT_MIPI:
@@ -747,10 +761,7 @@ static void psb_setup_outputs(struct drm_device *dev)
747 clone_mask = (1 << INTEL_OUTPUT_MIPI2); 761 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
748 break; 762 break;
749 case INTEL_OUTPUT_HDMI: 763 case INTEL_OUTPUT_HDMI:
750 if (IS_MFLD(dev)) 764 crtc_mask = dev_priv->ops->hdmi_mask;
751 crtc_mask = (1 << 1);
752 else
753 crtc_mask = (1 << 0);
754 clone_mask = (1 << INTEL_OUTPUT_HDMI); 765 clone_mask = (1 << INTEL_OUTPUT_HDMI);
755 break; 766 break;
756 } 767 }
@@ -771,7 +782,7 @@ void psb_modeset_init(struct drm_device *dev)
771 dev->mode_config.min_width = 0; 782 dev->mode_config.min_width = 0;
772 dev->mode_config.min_height = 0; 783 dev->mode_config.min_height = 0;
773 784
774 dev->mode_config.funcs = (void *) &psb_mode_funcs; 785 dev->mode_config.funcs = &psb_mode_funcs;
775 786
776 /* set memory base */ 787 /* set memory base */
777 /* Oaktrail and Poulsbo should use BAR 2*/ 788 /* Oaktrail and Poulsbo should use BAR 2*/
@@ -786,15 +797,23 @@ void psb_modeset_init(struct drm_device *dev)
786 dev->mode_config.max_height = 2048; 797 dev->mode_config.max_height = 2048;
787 798
788 psb_setup_outputs(dev); 799 psb_setup_outputs(dev);
800
801 if (dev_priv->ops->errata)
802 dev_priv->ops->errata(dev);
803
804 dev_priv->modeset = true;
789} 805}
790 806
791void psb_modeset_cleanup(struct drm_device *dev) 807void psb_modeset_cleanup(struct drm_device *dev)
792{ 808{
793 mutex_lock(&dev->struct_mutex); 809 struct drm_psb_private *dev_priv = dev->dev_private;
810 if (dev_priv->modeset) {
811 mutex_lock(&dev->struct_mutex);
794 812
795 drm_kms_helper_poll_fini(dev); 813 drm_kms_helper_poll_fini(dev);
796 psb_fbdev_fini(dev); 814 psb_fbdev_fini(dev);
797 drm_mode_config_cleanup(dev); 815 drm_mode_config_cleanup(dev);
798 816
799 mutex_unlock(&dev->struct_mutex); 817 mutex_unlock(&dev->struct_mutex);
818 }
800} 819}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 9fbb86868e2e..fc7d144bc2d3 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -124,6 +124,8 @@ static int psb_gem_create(struct drm_file *file,
124 dev_err(dev->dev, "GEM init failed for %lld\n", size); 124 dev_err(dev->dev, "GEM init failed for %lld\n", size);
125 return -ENOMEM; 125 return -ENOMEM;
126 } 126 }
127 /* Limit the object to 32bit mappings */
128 mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
127 /* Give the object a handle so we can carry it more easily */ 129 /* Give the object a handle so we can carry it more easily */
128 ret = drm_gem_handle_create(file, &r->gem, &handle); 130 ret = drm_gem_handle_create(file, &r->gem, &handle);
129 if (ret) { 131 if (ret) {
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index c6465b40090f..04a371aceb34 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -39,6 +39,10 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
39{ 39{
40 uint32_t mask = PSB_PTE_VALID; 40 uint32_t mask = PSB_PTE_VALID;
41 41
42 /* Ensure we explode rather than put an invalid low mapping of
43 a high mapping page into the gtt */
44 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
45
42 if (type & PSB_MMU_CACHED_MEMORY) 46 if (type & PSB_MMU_CACHED_MEMORY)
43 mask |= PSB_PTE_CACHED; 47 mask |= PSB_PTE_CACHED;
44 if (type & PSB_MMU_RO_MEMORY) 48 if (type & PSB_MMU_RO_MEMORY)
@@ -57,7 +61,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
57 * Given a gtt_range object return the GTT offset of the page table 61 * Given a gtt_range object return the GTT offset of the page table
58 * entries for this gtt_range 62 * entries for this gtt_range
59 */ 63 */
60static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) 64static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
61{ 65{
62 struct drm_psb_private *dev_priv = dev->dev_private; 66 struct drm_psb_private *dev_priv = dev->dev_private;
63 unsigned long offset; 67 unsigned long offset;
@@ -78,7 +82,8 @@ static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
78 */ 82 */
79static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) 83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
80{ 84{
81 u32 *gtt_slot, pte; 85 u32 __iomem *gtt_slot;
86 u32 pte;
82 struct page **pages; 87 struct page **pages;
83 int i; 88 int i;
84 89
@@ -93,7 +98,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
93 pages = r->pages; 98 pages = r->pages;
94 99
95 /* Make sure changes are visible to the GPU */ 100 /* Make sure changes are visible to the GPU */
96 set_pages_array_uc(pages, r->npage); 101 set_pages_array_wc(pages, r->npage);
97 102
98 /* Write our page entries into the GTT itself */ 103 /* Write our page entries into the GTT itself */
99 for (i = r->roll; i < r->npage; i++) { 104 for (i = r->roll; i < r->npage; i++) {
@@ -122,7 +127,8 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
122static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 127static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
123{ 128{
124 struct drm_psb_private *dev_priv = dev->dev_private; 129 struct drm_psb_private *dev_priv = dev->dev_private;
125 u32 *gtt_slot, pte; 130 u32 __iomem *gtt_slot;
131 u32 pte;
126 int i; 132 int i;
127 133
128 WARN_ON(r->stolen); 134 WARN_ON(r->stolen);
@@ -148,7 +154,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
148 */ 154 */
149void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) 155void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
150{ 156{
151 u32 *gtt_slot, pte; 157 u32 __iomem *gtt_slot;
158 u32 pte;
152 int i; 159 int i;
153 160
154 if (roll >= r->npage) { 161 if (roll >= r->npage) {
@@ -409,8 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
409 unsigned long stolen_size, vram_stolen_size; 416 unsigned long stolen_size, vram_stolen_size;
410 unsigned i, num_pages; 417 unsigned i, num_pages;
411 unsigned pfn_base; 418 unsigned pfn_base;
412 uint32_t vram_pages;
413 uint32_t dvmt_mode = 0;
414 struct psb_gtt *pg; 419 struct psb_gtt *pg;
415 420
416 int ret = 0; 421 int ret = 0;
@@ -483,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
483 488
484 stolen_size = vram_stolen_size; 489 stolen_size = vram_stolen_size;
485 490
486 printk(KERN_INFO "Stolen memory information\n"); 491 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
487 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base); 492 dev_priv->stolen_base, vram_stolen_size / 1024);
488 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
489 vram_stolen_size/1024);
490 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
491 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
492 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
493 493
494 if (resume && (gtt_pages != pg->gtt_pages) && 494 if (resume && (gtt_pages != pg->gtt_pages) &&
495 (stolen_size != pg->stolen_size)) { 495 (stolen_size != pg->stolen_size)) {
@@ -525,8 +525,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
525 */ 525 */
526 526
527 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT; 527 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
528 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; 528 num_pages = vram_stolen_size >> PAGE_SHIFT;
529 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 529 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
530 num_pages, pfn_base << PAGE_SHIFT, 0); 530 num_pages, pfn_base << PAGE_SHIFT, 0);
531 for (i = 0; i < num_pages; ++i) { 531 for (i = 0; i < num_pages; ++i) {
532 pte = psb_gtt_mask_pte(pfn_base + i, 0); 532 pte = psb_gtt_mask_pte(pfn_base + i, 0);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d4d0c5b8bf91..973d7f6d66b7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -26,6 +26,8 @@
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28 28
29#define SLAVE_ADDR1 0x70
30#define SLAVE_ADDR2 0x72
29 31
30static void *find_section(struct bdb_header *bdb, int section_id) 32static void *find_section(struct bdb_header *bdb, int section_id)
31{ 33{
@@ -52,6 +54,16 @@ static void *find_section(struct bdb_header *bdb, int section_id)
52 return NULL; 54 return NULL;
53} 55}
54 56
57static u16
58get_blocksize(void *p)
59{
60 u16 *block_ptr, block_size;
61
62 block_ptr = (u16 *)((char *)p - 2);
63 block_size = *block_ptr;
64 return block_size;
65}
66
55static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 67static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
56 struct lvds_dvo_timing *dvo_timing) 68 struct lvds_dvo_timing *dvo_timing)
57{ 69{
@@ -75,6 +87,16 @@ static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
75 panel_fixed_mode->clock = dvo_timing->clock * 10; 87 panel_fixed_mode->clock = dvo_timing->clock * 10;
76 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 88 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
77 89
90 if (dvo_timing->hsync_positive)
91 panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
92 else
93 panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
94
95 if (dvo_timing->vsync_positive)
96 panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
97 else
98 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
99
78 /* Some VBTs have bogus h/vtotal values */ 100 /* Some VBTs have bogus h/vtotal values */
79 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 101 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
80 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 102 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -217,6 +239,180 @@ static void parse_general_features(struct drm_psb_private *dev_priv,
217 } 239 }
218} 240}
219 241
242static void
243parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
244 struct bdb_header *bdb)
245{
246 struct sdvo_device_mapping *p_mapping;
247 struct bdb_general_definitions *p_defs;
248 struct child_device_config *p_child;
249 int i, child_device_num, count;
250 u16 block_size;
251
252 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
253 if (!p_defs) {
254 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
255 return;
256 }
257 /* judge whether the size of child device meets the requirements.
258 * If the child device size obtained from general definition block
259 * is different with sizeof(struct child_device_config), skip the
260 * parsing of sdvo device info
261 */
262 if (p_defs->child_dev_size != sizeof(*p_child)) {
263 /* different child dev size . Ignore it */
264 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
265 return;
266 }
267 /* get the block size of general definitions */
268 block_size = get_blocksize(p_defs);
269 /* get the number of child device */
270 child_device_num = (block_size - sizeof(*p_defs)) /
271 sizeof(*p_child);
272 count = 0;
273 for (i = 0; i < child_device_num; i++) {
274 p_child = &(p_defs->devices[i]);
275 if (!p_child->device_type) {
276 /* skip the device block if device type is invalid */
277 continue;
278 }
279 if (p_child->slave_addr != SLAVE_ADDR1 &&
280 p_child->slave_addr != SLAVE_ADDR2) {
281 /*
282 * If the slave address is neither 0x70 nor 0x72,
283 * it is not a SDVO device. Skip it.
284 */
285 continue;
286 }
287 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
288 p_child->dvo_port != DEVICE_PORT_DVOC) {
289 /* skip the incorrect SDVO port */
290 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
291 continue;
292 }
293 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
294 " %s port\n",
295 p_child->slave_addr,
296 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
297 "SDVOB" : "SDVOC");
298 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
299 if (!p_mapping->initialized) {
300 p_mapping->dvo_port = p_child->dvo_port;
301 p_mapping->slave_addr = p_child->slave_addr;
302 p_mapping->dvo_wiring = p_child->dvo_wiring;
303 p_mapping->ddc_pin = p_child->ddc_pin;
304 p_mapping->i2c_pin = p_child->i2c_pin;
305 p_mapping->initialized = 1;
306 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
307 p_mapping->dvo_port,
308 p_mapping->slave_addr,
309 p_mapping->dvo_wiring,
310 p_mapping->ddc_pin,
311 p_mapping->i2c_pin);
312 } else {
313 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
314 "two SDVO device.\n");
315 }
316 if (p_child->slave2_addr) {
317 /* Maybe this is a SDVO device with multiple inputs */
318 /* And the mapping info is not added */
319 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
320 " is a SDVO device with multiple inputs.\n");
321 }
322 count++;
323 }
324
325 if (!count) {
326 /* No SDVO device info is found */
327 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
328 }
329 return;
330}
331
332
333static void
334parse_driver_features(struct drm_psb_private *dev_priv,
335 struct bdb_header *bdb)
336{
337 struct bdb_driver_features *driver;
338
339 driver = find_section(bdb, BDB_DRIVER_FEATURES);
340 if (!driver)
341 return;
342
343 /* This bit means to use 96Mhz for DPLL_A or not */
344 if (driver->primary_lfp_id)
345 dev_priv->dplla_96mhz = true;
346 else
347 dev_priv->dplla_96mhz = false;
348}
349
350static void
351parse_device_mapping(struct drm_psb_private *dev_priv,
352 struct bdb_header *bdb)
353{
354 struct bdb_general_definitions *p_defs;
355 struct child_device_config *p_child, *child_dev_ptr;
356 int i, child_device_num, count;
357 u16 block_size;
358
359 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
360 if (!p_defs) {
361 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
362 return;
363 }
364 /* judge whether the size of child device meets the requirements.
365 * If the child device size obtained from general definition block
366 * is different with sizeof(struct child_device_config), skip the
367 * parsing of sdvo device info
368 */
369 if (p_defs->child_dev_size != sizeof(*p_child)) {
370 /* different child dev size . Ignore it */
371 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
372 return;
373 }
374 /* get the block size of general definitions */
375 block_size = get_blocksize(p_defs);
376 /* get the number of child device */
377 child_device_num = (block_size - sizeof(*p_defs)) /
378 sizeof(*p_child);
379 count = 0;
380 /* get the number of child devices that are present */
381 for (i = 0; i < child_device_num; i++) {
382 p_child = &(p_defs->devices[i]);
383 if (!p_child->device_type) {
384 /* skip the device block if device type is invalid */
385 continue;
386 }
387 count++;
388 }
389 if (!count) {
390 DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
391 return;
392 }
393 dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
394 if (!dev_priv->child_dev) {
395 DRM_DEBUG_KMS("No memory space for child devices\n");
396 return;
397 }
398
399 dev_priv->child_dev_num = count;
400 count = 0;
401 for (i = 0; i < child_device_num; i++) {
402 p_child = &(p_defs->devices[i]);
403 if (!p_child->device_type) {
404 /* skip the device block if device type is invalid */
405 continue;
406 }
407 child_dev_ptr = dev_priv->child_dev + count;
408 count++;
409 memcpy((void *)child_dev_ptr, (void *)p_child,
410 sizeof(*p_child));
411 }
412 return;
413}
414
415
220/** 416/**
221 * psb_intel_init_bios - initialize VBIOS settings & find VBT 417 * psb_intel_init_bios - initialize VBIOS settings & find VBT
222 * @dev: DRM device 418 * @dev: DRM device
@@ -236,38 +432,54 @@ bool psb_intel_init_bios(struct drm_device *dev)
236 struct drm_psb_private *dev_priv = dev->dev_private; 432 struct drm_psb_private *dev_priv = dev->dev_private;
237 struct pci_dev *pdev = dev->pdev; 433 struct pci_dev *pdev = dev->pdev;
238 struct vbt_header *vbt = NULL; 434 struct vbt_header *vbt = NULL;
239 struct bdb_header *bdb; 435 struct bdb_header *bdb = NULL;
240 u8 __iomem *bios; 436 u8 __iomem *bios = NULL;
241 size_t size; 437 size_t size;
242 int i; 438 int i;
243 439
244 bios = pci_map_rom(pdev, &size); 440 /* XXX Should this validation be moved to intel_opregion.c? */
245 if (!bios) 441 if (dev_priv->opregion.vbt) {
246 return -1; 442 struct vbt_header *vbt = dev_priv->opregion.vbt;
443 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
444 DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
445 vbt->signature);
446 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
447 } else
448 dev_priv->opregion.vbt = NULL;
449 }
247 450
248 /* Scour memory looking for the VBT signature */ 451 if (bdb == NULL) {
249 for (i = 0; i + 4 < size; i++) { 452 bios = pci_map_rom(pdev, &size);
250 if (!memcmp(bios + i, "$VBT", 4)) { 453 if (!bios)
251 vbt = (struct vbt_header *)(bios + i); 454 return -1;
252 break; 455
456 /* Scour memory looking for the VBT signature */
457 for (i = 0; i + 4 < size; i++) {
458 if (!memcmp(bios + i, "$VBT", 4)) {
459 vbt = (struct vbt_header *)(bios + i);
460 break;
461 }
253 } 462 }
254 }
255 463
256 if (!vbt) { 464 if (!vbt) {
257 dev_err(dev->dev, "VBT signature missing\n"); 465 dev_err(dev->dev, "VBT signature missing\n");
258 pci_unmap_rom(pdev, bios); 466 pci_unmap_rom(pdev, bios);
259 return -1; 467 return -1;
468 }
469 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
260 } 470 }
261 471
262 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); 472 /* Grab useful general dxefinitions */
263
264 /* Grab useful general definitions */
265 parse_general_features(dev_priv, bdb); 473 parse_general_features(dev_priv, bdb);
474 parse_driver_features(dev_priv, bdb);
266 parse_lfp_panel_data(dev_priv, bdb); 475 parse_lfp_panel_data(dev_priv, bdb);
267 parse_sdvo_panel_data(dev_priv, bdb); 476 parse_sdvo_panel_data(dev_priv, bdb);
477 parse_sdvo_device_mapping(dev_priv, bdb);
478 parse_device_mapping(dev_priv, bdb);
268 parse_backlight_data(dev_priv, bdb); 479 parse_backlight_data(dev_priv, bdb);
269 480
270 pci_unmap_rom(pdev, bios); 481 if (bios)
482 pci_unmap_rom(pdev, bios);
271 483
272 return 0; 484 return 0;
273} 485}
@@ -278,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
278void psb_intel_destroy_bios(struct drm_device *dev) 490void psb_intel_destroy_bios(struct drm_device *dev)
279{ 491{
280 struct drm_psb_private *dev_priv = dev->dev_private; 492 struct drm_psb_private *dev_priv = dev->dev_private;
281 struct drm_display_mode *sdvo_lvds_vbt_mode =
282 dev_priv->sdvo_lvds_vbt_mode;
283 struct drm_display_mode *lfp_lvds_vbt_mode =
284 dev_priv->lfp_lvds_vbt_mode;
285 struct bdb_lvds_backlight *lvds_bl =
286 dev_priv->lvds_bl;
287
288 /*free sdvo panel mode*/
289 if (sdvo_lvds_vbt_mode) {
290 dev_priv->sdvo_lvds_vbt_mode = NULL;
291 kfree(sdvo_lvds_vbt_mode);
292 }
293 493
294 if (lfp_lvds_vbt_mode) { 494 kfree(dev_priv->sdvo_lvds_vbt_mode);
295 dev_priv->lfp_lvds_vbt_mode = NULL; 495 kfree(dev_priv->lfp_lvds_vbt_mode);
296 kfree(lfp_lvds_vbt_mode); 496 kfree(dev_priv->lvds_bl);
297 }
298
299 if (lvds_bl) {
300 dev_priv->lvds_bl = NULL;
301 kfree(lvds_bl);
302 }
303} 497}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 70f1bf018183..0a738663eb5a 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -127,9 +127,93 @@ struct bdb_general_features {
127 /* bits 5 */ 127 /* bits 5 */
128 u8 int_crt_support:1; 128 u8 int_crt_support:1;
129 u8 int_tv_support:1; 129 u8 int_tv_support:1;
130 u8 rsvd11:6; /* finish byte */ 130 u8 int_efp_support:1;
131 u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
132 u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
133 u8 rsvd11:3; /* finish byte */
131} __attribute__((packed)); 134} __attribute__((packed));
132 135
136/* pre-915 */
137#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
138#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
139#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
140#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
141
142/* Pre 915 */
143#define DEVICE_TYPE_NONE 0x00
144#define DEVICE_TYPE_CRT 0x01
145#define DEVICE_TYPE_TV 0x09
146#define DEVICE_TYPE_EFP 0x12
147#define DEVICE_TYPE_LFP 0x22
148/* On 915+ */
149#define DEVICE_TYPE_CRT_DPMS 0x6001
150#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
151#define DEVICE_TYPE_TV_COMPOSITE 0x0209
152#define DEVICE_TYPE_TV_MACROVISION 0x0289
153#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
154#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
155#define DEVICE_TYPE_TV_SCART 0x0209
156#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
157#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
158#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
159#define DEVICE_TYPE_EFP_DVI_I 0x6053
160#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
161#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
162#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
163#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
164#define DEVICE_TYPE_LFP_PANELLINK 0x5012
165#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
166#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
167#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
168#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
169
170#define DEVICE_CFG_NONE 0x00
171#define DEVICE_CFG_12BIT_DVOB 0x01
172#define DEVICE_CFG_12BIT_DVOC 0x02
173#define DEVICE_CFG_24BIT_DVOBC 0x09
174#define DEVICE_CFG_24BIT_DVOCB 0x0a
175#define DEVICE_CFG_DUAL_DVOB 0x11
176#define DEVICE_CFG_DUAL_DVOC 0x12
177#define DEVICE_CFG_DUAL_DVOBC 0x13
178#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
179#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
180
181#define DEVICE_WIRE_NONE 0x00
182#define DEVICE_WIRE_DVOB 0x01
183#define DEVICE_WIRE_DVOC 0x02
184#define DEVICE_WIRE_DVOBC 0x03
185#define DEVICE_WIRE_DVOBB 0x05
186#define DEVICE_WIRE_DVOCC 0x06
187#define DEVICE_WIRE_DVOB_MASTER 0x0d
188#define DEVICE_WIRE_DVOC_MASTER 0x0e
189
190#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
191#define DEVICE_PORT_DVOB 0x01
192#define DEVICE_PORT_DVOC 0x02
193
194struct child_device_config {
195 u16 handle;
196 u16 device_type;
197 u8 device_id[10]; /* ascii string */
198 u16 addin_offset;
199 u8 dvo_port; /* See Device_PORT_* above */
200 u8 i2c_pin;
201 u8 slave_addr;
202 u8 ddc_pin;
203 u16 edid_ptr;
204 u8 dvo_cfg; /* See DEVICE_CFG_* above */
205 u8 dvo2_port;
206 u8 i2c2_pin;
207 u8 slave2_addr;
208 u8 ddc2_pin;
209 u8 capabilities;
210 u8 dvo_wiring;/* See DEVICE_WIRE_* above */
211 u8 dvo2_wiring;
212 u16 extended_type;
213 u8 dvo_function;
214} __attribute__((packed));
215
216
133struct bdb_general_definitions { 217struct bdb_general_definitions {
134 /* DDC GPIO */ 218 /* DDC GPIO */
135 u8 crt_ddc_gmbus_pin; 219 u8 crt_ddc_gmbus_pin;
@@ -144,13 +228,18 @@ struct bdb_general_definitions {
144 u8 boot_display[2]; 228 u8 boot_display[2];
145 u8 child_dev_size; 229 u8 child_dev_size;
146 230
147 /* device info */ 231 /*
148 u8 tv_or_lvds_info[33]; 232 * Device info:
149 u8 dev1[33]; 233 * If TV is present, it'll be at devices[0].
150 u8 dev2[33]; 234 * LVDS will be next, either devices[0] or [1], if present.
151 u8 dev3[33]; 235 * On some platforms the number of device is 6. But could be as few as
152 u8 dev4[33]; 236 * 4 if both TV and LVDS are missing.
153 /* may be another device block here on some platforms */ 237 * And the device num is related with the size of general definition
238 * block. It is obtained by using the following formula:
239 * number = (block_size - sizeof(bdb_general_definitions))/
240 * sizeof(child_device_config);
241 */
242 struct child_device_config devices[0];
154}; 243};
155 244
156struct bdb_lvds_options { 245struct bdb_lvds_options {
@@ -302,6 +391,45 @@ struct bdb_sdvo_lvds_options {
302 u8 panel_misc_bits_4; 391 u8 panel_misc_bits_4;
303} __attribute__((packed)); 392} __attribute__((packed));
304 393
394struct bdb_driver_features {
395 u8 boot_dev_algorithm:1;
396 u8 block_display_switch:1;
397 u8 allow_display_switch:1;
398 u8 hotplug_dvo:1;
399 u8 dual_view_zoom:1;
400 u8 int15h_hook:1;
401 u8 sprite_in_clone:1;
402 u8 primary_lfp_id:1;
403
404 u16 boot_mode_x;
405 u16 boot_mode_y;
406 u8 boot_mode_bpp;
407 u8 boot_mode_refresh;
408
409 u16 enable_lfp_primary:1;
410 u16 selective_mode_pruning:1;
411 u16 dual_frequency:1;
412 u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
413 u16 nt_clone_support:1;
414 u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
415 u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
416 u16 cui_aspect_scaling:1;
417 u16 preserve_aspect_ratio:1;
418 u16 sdvo_device_power_down:1;
419 u16 crt_hotplug:1;
420 u16 lvds_config:2;
421 u16 tv_hotplug:1;
422 u16 hdmi_config:2;
423
424 u8 static_display:1;
425 u8 reserved2:7;
426 u16 legacy_crt_max_x;
427 u16 legacy_crt_max_y;
428 u8 legacy_crt_max_refresh;
429
430 u8 hdmi_termination;
431 u8 custom_vbt_version;
432} __attribute__((packed));
305 433
306extern bool psb_intel_init_bios(struct drm_device *dev); 434extern bool psb_intel_init_bios(struct drm_device *dev);
307extern void psb_intel_destroy_bios(struct drm_device *dev); 435extern void psb_intel_destroy_bios(struct drm_device *dev);
@@ -427,4 +555,21 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
427#define SWF14_APM_STANDBY 0x1 555#define SWF14_APM_STANDBY 0x1
428#define SWF14_APM_RESTORE 0x0 556#define SWF14_APM_RESTORE 0x0
429 557
558/* Add the device class for LFP, TV, HDMI */
559#define DEVICE_TYPE_INT_LFP 0x1022
560#define DEVICE_TYPE_INT_TV 0x1009
561#define DEVICE_TYPE_HDMI 0x60D2
562#define DEVICE_TYPE_DP 0x68C6
563#define DEVICE_TYPE_eDP 0x78C6
564
565/* define the DVO port for HDMI output type */
566#define DVO_B 1
567#define DVO_C 2
568#define DVO_D 3
569
570/* define the PORT for DP output type */
571#define PORT_IDPB 7
572#define PORT_IDPC 8
573#define PORT_IDPD 9
574
430#endif /* _I830_BIOS_H_ */ 575#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index af656787db0f..265ad0de44a6 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
163 * 163 *
164 * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio 164 * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
165 */ 165 */
166static int mdfld_save_display_registers(struct drm_device *dev, int pipe) 166static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
167{ 167{
168 struct drm_psb_private *dev_priv = dev->dev_private; 168 struct drm_psb_private *dev_priv = dev->dev_private;
169 struct medfield_state *regs = &dev_priv->regs.mdfld; 169 struct medfield_state *regs = &dev_priv->regs.mdfld;
170 struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
171 const struct psb_offset *map = &dev_priv->regmap[pipenum];
170 int i; 172 int i;
173 u32 *mipi_val;
171 174
172 /* register */ 175 /* register */
173 u32 dpll_reg = MRST_DPLL_A;
174 u32 fp_reg = MRST_FPA0;
175 u32 pipeconf_reg = PIPEACONF;
176 u32 htot_reg = HTOTAL_A;
177 u32 hblank_reg = HBLANK_A;
178 u32 hsync_reg = HSYNC_A;
179 u32 vtot_reg = VTOTAL_A;
180 u32 vblank_reg = VBLANK_A;
181 u32 vsync_reg = VSYNC_A;
182 u32 pipesrc_reg = PIPEASRC;
183 u32 dspstride_reg = DSPASTRIDE;
184 u32 dsplinoff_reg = DSPALINOFF;
185 u32 dsptileoff_reg = DSPATILEOFF;
186 u32 dspsize_reg = DSPASIZE;
187 u32 dsppos_reg = DSPAPOS;
188 u32 dspsurf_reg = DSPASURF;
189 u32 mipi_reg = MIPI; 176 u32 mipi_reg = MIPI;
190 u32 dspcntr_reg = DSPACNTR; 177
191 u32 dspstatus_reg = PIPEASTAT; 178 switch (pipenum) {
192 u32 palette_reg = PALETTE_A;
193
194 /* pointer to values */
195 u32 *dpll_val = &regs->saveDPLL_A;
196 u32 *fp_val = &regs->saveFPA0;
197 u32 *pipeconf_val = &regs->savePIPEACONF;
198 u32 *htot_val = &regs->saveHTOTAL_A;
199 u32 *hblank_val = &regs->saveHBLANK_A;
200 u32 *hsync_val = &regs->saveHSYNC_A;
201 u32 *vtot_val = &regs->saveVTOTAL_A;
202 u32 *vblank_val = &regs->saveVBLANK_A;
203 u32 *vsync_val = &regs->saveVSYNC_A;
204 u32 *pipesrc_val = &regs->savePIPEASRC;
205 u32 *dspstride_val = &regs->saveDSPASTRIDE;
206 u32 *dsplinoff_val = &regs->saveDSPALINOFF;
207 u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
208 u32 *dspsize_val = &regs->saveDSPASIZE;
209 u32 *dsppos_val = &regs->saveDSPAPOS;
210 u32 *dspsurf_val = &regs->saveDSPASURF;
211 u32 *mipi_val = &regs->saveMIPI;
212 u32 *dspcntr_val = &regs->saveDSPACNTR;
213 u32 *dspstatus_val = &regs->saveDSPASTATUS;
214 u32 *palette_val = regs->save_palette_a;
215
216 switch (pipe) {
217 case 0: 179 case 0:
180 mipi_val = &regs->saveMIPI;
218 break; 181 break;
219 case 1: 182 case 1:
220 /* regester */ 183 mipi_val = &regs->saveMIPI;
221 dpll_reg = MDFLD_DPLL_B;
222 fp_reg = MDFLD_DPLL_DIV0;
223 pipeconf_reg = PIPEBCONF;
224 htot_reg = HTOTAL_B;
225 hblank_reg = HBLANK_B;
226 hsync_reg = HSYNC_B;
227 vtot_reg = VTOTAL_B;
228 vblank_reg = VBLANK_B;
229 vsync_reg = VSYNC_B;
230 pipesrc_reg = PIPEBSRC;
231 dspstride_reg = DSPBSTRIDE;
232 dsplinoff_reg = DSPBLINOFF;
233 dsptileoff_reg = DSPBTILEOFF;
234 dspsize_reg = DSPBSIZE;
235 dsppos_reg = DSPBPOS;
236 dspsurf_reg = DSPBSURF;
237 dspcntr_reg = DSPBCNTR;
238 dspstatus_reg = PIPEBSTAT;
239 palette_reg = PALETTE_B;
240
241 /* values */
242 dpll_val = &regs->saveDPLL_B;
243 fp_val = &regs->saveFPB0;
244 pipeconf_val = &regs->savePIPEBCONF;
245 htot_val = &regs->saveHTOTAL_B;
246 hblank_val = &regs->saveHBLANK_B;
247 hsync_val = &regs->saveHSYNC_B;
248 vtot_val = &regs->saveVTOTAL_B;
249 vblank_val = &regs->saveVBLANK_B;
250 vsync_val = &regs->saveVSYNC_B;
251 pipesrc_val = &regs->savePIPEBSRC;
252 dspstride_val = &regs->saveDSPBSTRIDE;
253 dsplinoff_val = &regs->saveDSPBLINOFF;
254 dsptileoff_val = &regs->saveDSPBTILEOFF;
255 dspsize_val = &regs->saveDSPBSIZE;
256 dsppos_val = &regs->saveDSPBPOS;
257 dspsurf_val = &regs->saveDSPBSURF;
258 dspcntr_val = &regs->saveDSPBCNTR;
259 dspstatus_val = &regs->saveDSPBSTATUS;
260 palette_val = regs->save_palette_b;
261 break; 184 break;
262 case 2: 185 case 2:
263 /* register */ 186 /* register */
264 pipeconf_reg = PIPECCONF;
265 htot_reg = HTOTAL_C;
266 hblank_reg = HBLANK_C;
267 hsync_reg = HSYNC_C;
268 vtot_reg = VTOTAL_C;
269 vblank_reg = VBLANK_C;
270 vsync_reg = VSYNC_C;
271 pipesrc_reg = PIPECSRC;
272 dspstride_reg = DSPCSTRIDE;
273 dsplinoff_reg = DSPCLINOFF;
274 dsptileoff_reg = DSPCTILEOFF;
275 dspsize_reg = DSPCSIZE;
276 dsppos_reg = DSPCPOS;
277 dspsurf_reg = DSPCSURF;
278 mipi_reg = MIPI_C; 187 mipi_reg = MIPI_C;
279 dspcntr_reg = DSPCCNTR;
280 dspstatus_reg = PIPECSTAT;
281 palette_reg = PALETTE_C;
282
283 /* pointer to values */ 188 /* pointer to values */
284 pipeconf_val = &regs->savePIPECCONF;
285 htot_val = &regs->saveHTOTAL_C;
286 hblank_val = &regs->saveHBLANK_C;
287 hsync_val = &regs->saveHSYNC_C;
288 vtot_val = &regs->saveVTOTAL_C;
289 vblank_val = &regs->saveVBLANK_C;
290 vsync_val = &regs->saveVSYNC_C;
291 pipesrc_val = &regs->savePIPECSRC;
292 dspstride_val = &regs->saveDSPCSTRIDE;
293 dsplinoff_val = &regs->saveDSPCLINOFF;
294 dsptileoff_val = &regs->saveDSPCTILEOFF;
295 dspsize_val = &regs->saveDSPCSIZE;
296 dsppos_val = &regs->saveDSPCPOS;
297 dspsurf_val = &regs->saveDSPCSURF;
298 mipi_val = &regs->saveMIPI_C; 189 mipi_val = &regs->saveMIPI_C;
299 dspcntr_val = &regs->saveDSPCCNTR;
300 dspstatus_val = &regs->saveDSPCSTATUS;
301 palette_val = regs->save_palette_c;
302 break; 190 break;
303 default: 191 default:
304 DRM_ERROR("%s, invalid pipe number.\n", __func__); 192 DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
306 } 194 }
307 195
308 /* Pipe & plane A info */ 196 /* Pipe & plane A info */
309 *dpll_val = PSB_RVDC32(dpll_reg); 197 pipe->dpll = PSB_RVDC32(map->dpll);
310 *fp_val = PSB_RVDC32(fp_reg); 198 pipe->fp0 = PSB_RVDC32(map->fp0);
311 *pipeconf_val = PSB_RVDC32(pipeconf_reg); 199 pipe->conf = PSB_RVDC32(map->conf);
312 *htot_val = PSB_RVDC32(htot_reg); 200 pipe->htotal = PSB_RVDC32(map->htotal);
313 *hblank_val = PSB_RVDC32(hblank_reg); 201 pipe->hblank = PSB_RVDC32(map->hblank);
314 *hsync_val = PSB_RVDC32(hsync_reg); 202 pipe->hsync = PSB_RVDC32(map->hsync);
315 *vtot_val = PSB_RVDC32(vtot_reg); 203 pipe->vtotal = PSB_RVDC32(map->vtotal);
316 *vblank_val = PSB_RVDC32(vblank_reg); 204 pipe->vblank = PSB_RVDC32(map->vblank);
317 *vsync_val = PSB_RVDC32(vsync_reg); 205 pipe->vsync = PSB_RVDC32(map->vsync);
318 *pipesrc_val = PSB_RVDC32(pipesrc_reg); 206 pipe->src = PSB_RVDC32(map->src);
319 *dspstride_val = PSB_RVDC32(dspstride_reg); 207 pipe->stride = PSB_RVDC32(map->stride);
320 *dsplinoff_val = PSB_RVDC32(dsplinoff_reg); 208 pipe->linoff = PSB_RVDC32(map->linoff);
321 *dsptileoff_val = PSB_RVDC32(dsptileoff_reg); 209 pipe->tileoff = PSB_RVDC32(map->tileoff);
322 *dspsize_val = PSB_RVDC32(dspsize_reg); 210 pipe->size = PSB_RVDC32(map->size);
323 *dsppos_val = PSB_RVDC32(dsppos_reg); 211 pipe->pos = PSB_RVDC32(map->pos);
324 *dspsurf_val = PSB_RVDC32(dspsurf_reg); 212 pipe->surf = PSB_RVDC32(map->surf);
325 *dspcntr_val = PSB_RVDC32(dspcntr_reg); 213 pipe->cntr = PSB_RVDC32(map->cntr);
326 *dspstatus_val = PSB_RVDC32(dspstatus_reg); 214 pipe->status = PSB_RVDC32(map->status);
327 215
328 /*save palette (gamma) */ 216 /*save palette (gamma) */
329 for (i = 0; i < 256; i++) 217 for (i = 0; i < 256; i++)
330 palette_val[i] = PSB_RVDC32(palette_reg + (i << 2)); 218 pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
331 219
332 if (pipe == 1) { 220 if (pipenum == 1) {
333 regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); 221 regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
334 regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); 222 regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
335 223
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
349 * 237 *
350 * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio 238 * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
351 */ 239 */
352static int mdfld_restore_display_registers(struct drm_device *dev, int pipe) 240static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
353{ 241{
354 /* To get panel out of ULPS mode. */ 242 /* To get panel out of ULPS mode. */
355 u32 temp = 0; 243 u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
357 struct drm_psb_private *dev_priv = dev->dev_private; 245 struct drm_psb_private *dev_priv = dev->dev_private;
358 struct mdfld_dsi_config *dsi_config = NULL; 246 struct mdfld_dsi_config *dsi_config = NULL;
359 struct medfield_state *regs = &dev_priv->regs.mdfld; 247 struct medfield_state *regs = &dev_priv->regs.mdfld;
360 u32 i = 0; 248 struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
361 u32 dpll = 0; 249 const struct psb_offset *map = &dev_priv->regmap[pipenum];
250 u32 i;
251 u32 dpll;
362 u32 timeout = 0; 252 u32 timeout = 0;
363 253
364 /* regester */ 254 /* register */
365 u32 dpll_reg = MRST_DPLL_A;
366 u32 fp_reg = MRST_FPA0;
367 u32 pipeconf_reg = PIPEACONF;
368 u32 htot_reg = HTOTAL_A;
369 u32 hblank_reg = HBLANK_A;
370 u32 hsync_reg = HSYNC_A;
371 u32 vtot_reg = VTOTAL_A;
372 u32 vblank_reg = VBLANK_A;
373 u32 vsync_reg = VSYNC_A;
374 u32 pipesrc_reg = PIPEASRC;
375 u32 dspstride_reg = DSPASTRIDE;
376 u32 dsplinoff_reg = DSPALINOFF;
377 u32 dsptileoff_reg = DSPATILEOFF;
378 u32 dspsize_reg = DSPASIZE;
379 u32 dsppos_reg = DSPAPOS;
380 u32 dspsurf_reg = DSPASURF;
381 u32 dspstatus_reg = PIPEASTAT;
382 u32 mipi_reg = MIPI; 255 u32 mipi_reg = MIPI;
383 u32 dspcntr_reg = DSPACNTR;
384 u32 palette_reg = PALETTE_A;
385 256
386 /* values */ 257 /* values */
387 u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE; 258 u32 dpll_val = pipe->dpll;
388 u32 fp_val = regs->saveFPA0;
389 u32 pipeconf_val = regs->savePIPEACONF;
390 u32 htot_val = regs->saveHTOTAL_A;
391 u32 hblank_val = regs->saveHBLANK_A;
392 u32 hsync_val = regs->saveHSYNC_A;
393 u32 vtot_val = regs->saveVTOTAL_A;
394 u32 vblank_val = regs->saveVBLANK_A;
395 u32 vsync_val = regs->saveVSYNC_A;
396 u32 pipesrc_val = regs->savePIPEASRC;
397 u32 dspstride_val = regs->saveDSPASTRIDE;
398 u32 dsplinoff_val = regs->saveDSPALINOFF;
399 u32 dsptileoff_val = regs->saveDSPATILEOFF;
400 u32 dspsize_val = regs->saveDSPASIZE;
401 u32 dsppos_val = regs->saveDSPAPOS;
402 u32 dspsurf_val = regs->saveDSPASURF;
403 u32 dspstatus_val = regs->saveDSPASTATUS;
404 u32 mipi_val = regs->saveMIPI; 259 u32 mipi_val = regs->saveMIPI;
405 u32 dspcntr_val = regs->saveDSPACNTR;
406 u32 *palette_val = regs->save_palette_a;
407 260
408 switch (pipe) { 261 switch (pipenum) {
409 case 0: 262 case 0:
263 dpll_val &= ~DPLL_VCO_ENABLE;
410 dsi_config = dev_priv->dsi_configs[0]; 264 dsi_config = dev_priv->dsi_configs[0];
411 break; 265 break;
412 case 1: 266 case 1:
413 /* regester */ 267 dpll_val &= ~DPLL_VCO_ENABLE;
414 dpll_reg = MDFLD_DPLL_B;
415 fp_reg = MDFLD_DPLL_DIV0;
416 pipeconf_reg = PIPEBCONF;
417 htot_reg = HTOTAL_B;
418 hblank_reg = HBLANK_B;
419 hsync_reg = HSYNC_B;
420 vtot_reg = VTOTAL_B;
421 vblank_reg = VBLANK_B;
422 vsync_reg = VSYNC_B;
423 pipesrc_reg = PIPEBSRC;
424 dspstride_reg = DSPBSTRIDE;
425 dsplinoff_reg = DSPBLINOFF;
426 dsptileoff_reg = DSPBTILEOFF;
427 dspsize_reg = DSPBSIZE;
428 dsppos_reg = DSPBPOS;
429 dspsurf_reg = DSPBSURF;
430 dspcntr_reg = DSPBCNTR;
431 dspstatus_reg = PIPEBSTAT;
432 palette_reg = PALETTE_B;
433
434 /* values */
435 dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
436 fp_val = regs->saveFPB0;
437 pipeconf_val = regs->savePIPEBCONF;
438 htot_val = regs->saveHTOTAL_B;
439 hblank_val = regs->saveHBLANK_B;
440 hsync_val = regs->saveHSYNC_B;
441 vtot_val = regs->saveVTOTAL_B;
442 vblank_val = regs->saveVBLANK_B;
443 vsync_val = regs->saveVSYNC_B;
444 pipesrc_val = regs->savePIPEBSRC;
445 dspstride_val = regs->saveDSPBSTRIDE;
446 dsplinoff_val = regs->saveDSPBLINOFF;
447 dsptileoff_val = regs->saveDSPBTILEOFF;
448 dspsize_val = regs->saveDSPBSIZE;
449 dsppos_val = regs->saveDSPBPOS;
450 dspsurf_val = regs->saveDSPBSURF;
451 dspcntr_val = regs->saveDSPBCNTR;
452 dspstatus_val = regs->saveDSPBSTATUS;
453 palette_val = regs->save_palette_b;
454 break; 268 break;
455 case 2: 269 case 2:
456 /* regester */
457 pipeconf_reg = PIPECCONF;
458 htot_reg = HTOTAL_C;
459 hblank_reg = HBLANK_C;
460 hsync_reg = HSYNC_C;
461 vtot_reg = VTOTAL_C;
462 vblank_reg = VBLANK_C;
463 vsync_reg = VSYNC_C;
464 pipesrc_reg = PIPECSRC;
465 dspstride_reg = DSPCSTRIDE;
466 dsplinoff_reg = DSPCLINOFF;
467 dsptileoff_reg = DSPCTILEOFF;
468 dspsize_reg = DSPCSIZE;
469 dsppos_reg = DSPCPOS;
470 dspsurf_reg = DSPCSURF;
471 mipi_reg = MIPI_C; 270 mipi_reg = MIPI_C;
472 dspcntr_reg = DSPCCNTR;
473 dspstatus_reg = PIPECSTAT;
474 palette_reg = PALETTE_C;
475
476 /* values */
477 pipeconf_val = regs->savePIPECCONF;
478 htot_val = regs->saveHTOTAL_C;
479 hblank_val = regs->saveHBLANK_C;
480 hsync_val = regs->saveHSYNC_C;
481 vtot_val = regs->saveVTOTAL_C;
482 vblank_val = regs->saveVBLANK_C;
483 vsync_val = regs->saveVSYNC_C;
484 pipesrc_val = regs->savePIPECSRC;
485 dspstride_val = regs->saveDSPCSTRIDE;
486 dsplinoff_val = regs->saveDSPCLINOFF;
487 dsptileoff_val = regs->saveDSPCTILEOFF;
488 dspsize_val = regs->saveDSPCSIZE;
489 dsppos_val = regs->saveDSPCPOS;
490 dspsurf_val = regs->saveDSPCSURF;
491 mipi_val = regs->saveMIPI_C; 271 mipi_val = regs->saveMIPI_C;
492 dspcntr_val = regs->saveDSPCCNTR;
493 dspstatus_val = regs->saveDSPCSTATUS;
494 palette_val = regs->save_palette_c;
495
496 dsi_config = dev_priv->dsi_configs[1]; 272 dsi_config = dev_priv->dsi_configs[1];
497 break; 273 break;
498 default: 274 default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
503 /*make sure VGA plane is off. it initializes to on after reset!*/ 279 /*make sure VGA plane is off. it initializes to on after reset!*/
504 PSB_WVDC32(0x80000000, VGACNTRL); 280 PSB_WVDC32(0x80000000, VGACNTRL);
505 281
506 if (pipe == 1) { 282 if (pipenum == 1) {
507 PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg); 283 PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
508 PSB_RVDC32(dpll_reg); 284 PSB_RVDC32(map->dpll);
509 285
510 PSB_WVDC32(fp_val, fp_reg); 286 PSB_WVDC32(pipe->fp0, map->fp0);
511 } else { 287 } else {
512 288
513 dpll = PSB_RVDC32(dpll_reg); 289 dpll = PSB_RVDC32(map->dpll);
514 290
515 if (!(dpll & DPLL_VCO_ENABLE)) { 291 if (!(dpll & DPLL_VCO_ENABLE)) {
516 292
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
518 before enable the VCO */ 294 before enable the VCO */
519 if (dpll & MDFLD_PWR_GATE_EN) { 295 if (dpll & MDFLD_PWR_GATE_EN) {
520 dpll &= ~MDFLD_PWR_GATE_EN; 296 dpll &= ~MDFLD_PWR_GATE_EN;
521 PSB_WVDC32(dpll, dpll_reg); 297 PSB_WVDC32(dpll, map->dpll);
522 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 298 /* FIXME_MDFLD PO - change 500 to 1 after PO */
523 udelay(500); 299 udelay(500);
524 } 300 }
525 301
526 PSB_WVDC32(fp_val, fp_reg); 302 PSB_WVDC32(pipe->fp0, map->fp0);
527 PSB_WVDC32(dpll_val, dpll_reg); 303 PSB_WVDC32(dpll_val, map->dpll);
528 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 304 /* FIXME_MDFLD PO - change 500 to 1 after PO */
529 udelay(500); 305 udelay(500);
530 306
531 dpll_val |= DPLL_VCO_ENABLE; 307 dpll_val |= DPLL_VCO_ENABLE;
532 PSB_WVDC32(dpll_val, dpll_reg); 308 PSB_WVDC32(dpll_val, map->dpll);
533 PSB_RVDC32(dpll_reg); 309 PSB_RVDC32(map->dpll);
534 310
535 /* wait for DSI PLL to lock */ 311 /* wait for DSI PLL to lock */
536 while (timeout < 20000 && 312 while (timeout < 20000 &&
537 !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { 313 !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
538 udelay(150); 314 udelay(150);
539 timeout++; 315 timeout++;
540 } 316 }
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
547 } 323 }
548 } 324 }
549 /* Restore mode */ 325 /* Restore mode */
550 PSB_WVDC32(htot_val, htot_reg); 326 PSB_WVDC32(pipe->htotal, map->htotal);
551 PSB_WVDC32(hblank_val, hblank_reg); 327 PSB_WVDC32(pipe->hblank, map->hblank);
552 PSB_WVDC32(hsync_val, hsync_reg); 328 PSB_WVDC32(pipe->hsync, map->hsync);
553 PSB_WVDC32(vtot_val, vtot_reg); 329 PSB_WVDC32(pipe->vtotal, map->vtotal);
554 PSB_WVDC32(vblank_val, vblank_reg); 330 PSB_WVDC32(pipe->vblank, map->vblank);
555 PSB_WVDC32(vsync_val, vsync_reg); 331 PSB_WVDC32(pipe->vsync, map->vsync);
556 PSB_WVDC32(pipesrc_val, pipesrc_reg); 332 PSB_WVDC32(pipe->src, map->src);
557 PSB_WVDC32(dspstatus_val, dspstatus_reg); 333 PSB_WVDC32(pipe->status, map->status);
558 334
559 /*set up the plane*/ 335 /*set up the plane*/
560 PSB_WVDC32(dspstride_val, dspstride_reg); 336 PSB_WVDC32(pipe->stride, map->stride);
561 PSB_WVDC32(dsplinoff_val, dsplinoff_reg); 337 PSB_WVDC32(pipe->linoff, map->linoff);
562 PSB_WVDC32(dsptileoff_val, dsptileoff_reg); 338 PSB_WVDC32(pipe->tileoff, map->tileoff);
563 PSB_WVDC32(dspsize_val, dspsize_reg); 339 PSB_WVDC32(pipe->size, map->size);
564 PSB_WVDC32(dsppos_val, dsppos_reg); 340 PSB_WVDC32(pipe->pos, map->pos);
565 PSB_WVDC32(dspsurf_val, dspsurf_reg); 341 PSB_WVDC32(pipe->surf, map->surf);
566 342
567 if (pipe == 1) { 343 if (pipenum == 1) {
568 /* restore palette (gamma) */ 344 /* restore palette (gamma) */
569 /*DRM_UDELAY(50000); */ 345 /*DRM_UDELAY(50000); */
570 for (i = 0; i < 256; i++) 346 for (i = 0; i < 256; i++)
571 PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); 347 PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
572 348
573 PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL); 349 PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
574 PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); 350 PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
578 /*TODO: resume pipe*/ 354 /*TODO: resume pipe*/
579 355
580 /*enable the plane*/ 356 /*enable the plane*/
581 PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg); 357 PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
582 358
583 return 0; 359 return 0;
584 } 360 }
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
588 364
589 /*setup MIPI adapter + MIPI IP registers*/ 365 /*setup MIPI adapter + MIPI IP registers*/
590 if (dsi_config) 366 if (dsi_config)
591 mdfld_dsi_controller_init(dsi_config, pipe); 367 mdfld_dsi_controller_init(dsi_config, pipenum);
592 368
593 if (in_atomic() || in_interrupt()) 369 if (in_atomic() || in_interrupt())
594 mdelay(20); 370 mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
596 msleep(20); 372 msleep(20);
597 373
598 /*enable the plane*/ 374 /*enable the plane*/
599 PSB_WVDC32(dspcntr_val, dspcntr_reg); 375 PSB_WVDC32(pipe->cntr, map->cntr);
600 376
601 if (in_atomic() || in_interrupt()) 377 if (in_atomic() || in_interrupt())
602 mdelay(20); 378 mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
625 mdelay(1); 401 mdelay(1);
626 402
627 /*enable the pipe*/ 403 /*enable the pipe*/
628 PSB_WVDC32(pipeconf_val, pipeconf_reg); 404 PSB_WVDC32(pipe->conf, map->conf);
629 405
630 /* restore palette (gamma) */ 406 /* restore palette (gamma) */
631 /*DRM_UDELAY(50000); */ 407 /*DRM_UDELAY(50000); */
632 for (i = 0; i < 256; i++) 408 for (i = 0; i < 256; i++)
633 PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); 409 PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
634 410
635 return 0; 411 return 0;
636} 412}
@@ -667,14 +443,98 @@ static int mdfld_power_up(struct drm_device *dev)
667 return 0; 443 return 0;
668} 444}
669 445
446/* Medfield */
447static const struct psb_offset mdfld_regmap[3] = {
448 {
449 .fp0 = MRST_FPA0,
450 .fp1 = MRST_FPA1,
451 .cntr = DSPACNTR,
452 .conf = PIPEACONF,
453 .src = PIPEASRC,
454 .dpll = MRST_DPLL_A,
455 .htotal = HTOTAL_A,
456 .hblank = HBLANK_A,
457 .hsync = HSYNC_A,
458 .vtotal = VTOTAL_A,
459 .vblank = VBLANK_A,
460 .vsync = VSYNC_A,
461 .stride = DSPASTRIDE,
462 .size = DSPASIZE,
463 .pos = DSPAPOS,
464 .surf = DSPASURF,
465 .addr = MRST_DSPABASE,
466 .status = PIPEASTAT,
467 .linoff = DSPALINOFF,
468 .tileoff = DSPATILEOFF,
469 .palette = PALETTE_A,
470 },
471 {
472 .fp0 = MDFLD_DPLL_DIV0,
473 .cntr = DSPBCNTR,
474 .conf = PIPEBCONF,
475 .src = PIPEBSRC,
476 .dpll = MDFLD_DPLL_B,
477 .htotal = HTOTAL_B,
478 .hblank = HBLANK_B,
479 .hsync = HSYNC_B,
480 .vtotal = VTOTAL_B,
481 .vblank = VBLANK_B,
482 .vsync = VSYNC_B,
483 .stride = DSPBSTRIDE,
484 .size = DSPBSIZE,
485 .pos = DSPBPOS,
486 .surf = DSPBSURF,
487 .addr = MRST_DSPBBASE,
488 .status = PIPEBSTAT,
489 .linoff = DSPBLINOFF,
490 .tileoff = DSPBTILEOFF,
491 .palette = PALETTE_B,
492 },
493 {
494 .fp0 = MRST_FPA0, /* This is what the old code did ?? */
495 .cntr = DSPCCNTR,
496 .conf = PIPECCONF,
497 .src = PIPECSRC,
498 /* No DPLL_C */
499 .dpll = MRST_DPLL_A,
500 .htotal = HTOTAL_C,
501 .hblank = HBLANK_C,
502 .hsync = HSYNC_C,
503 .vtotal = VTOTAL_C,
504 .vblank = VBLANK_C,
505 .vsync = VSYNC_C,
506 .stride = DSPCSTRIDE,
507 .size = DSPBSIZE,
508 .pos = DSPCPOS,
509 .surf = DSPCSURF,
510 .addr = MDFLD_DSPCBASE,
511 .status = PIPECSTAT,
512 .linoff = DSPCLINOFF,
513 .tileoff = DSPCTILEOFF,
514 .palette = PALETTE_C,
515 },
516};
517
518static int mdfld_chip_setup(struct drm_device *dev)
519{
520 struct drm_psb_private *dev_priv = dev->dev_private;
521 if (pci_enable_msi(dev->pdev))
522 dev_warn(dev->dev, "Enabling MSI failed!\n");
523 dev_priv->regmap = mdfld_regmap;
524 return mid_chip_setup(dev);
525}
526
670const struct psb_ops mdfld_chip_ops = { 527const struct psb_ops mdfld_chip_ops = {
671 .name = "mdfld", 528 .name = "mdfld",
672 .accel_2d = 0, 529 .accel_2d = 0,
673 .pipes = 3, 530 .pipes = 3,
674 .crtcs = 3, 531 .crtcs = 3,
532 .lvds_mask = (1 << 1),
533 .hdmi_mask = (1 << 1),
534 .cursor_needs_phys = 0,
675 .sgx_offset = MRST_SGX_OFFSET, 535 .sgx_offset = MRST_SGX_OFFSET,
676 536
677 .chip_setup = mid_chip_setup, 537 .chip_setup = mdfld_chip_setup,
678 .crtc_helper = &mdfld_helper_funcs, 538 .crtc_helper = &mdfld_helper_funcs,
679 .crtc_funcs = &psb_intel_crtc_funcs, 539 .crtc_funcs = &psb_intel_crtc_funcs,
680 540
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d52358b744a0..b34ff097b979 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -869,7 +869,6 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
869 mdfld_set_pipe_timing(dsi_config, pipe); 869 mdfld_set_pipe_timing(dsi_config, pipe);
870 870
871 REG_WRITE(DSPABASE, 0x00); 871 REG_WRITE(DSPABASE, 0x00);
872 REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
873 REG_WRITE(DSPASIZE, 872 REG_WRITE(DSPASIZE,
874 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 873 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
875 874
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index baa0e14165e0..489ffd2c66e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
605 struct mdfld_dsi_config *dsi_config = 605 struct mdfld_dsi_config *dsi_config =
606 mdfld_dsi_get_config(dsi_connector); 606 mdfld_dsi_get_config(dsi_connector);
607 struct drm_device *dev = dsi_config->dev; 607 struct drm_device *dev = dsi_config->dev;
608 struct drm_psb_private *dev_priv = dev->dev_private;
609 const struct psb_offset *map = &dev_priv->regmap[pipe];
608 u32 mipi_val = 0; 610 u32 mipi_val = 0;
609 611
610 if (!dsi_connector) { 612 if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
632 pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE; 634 pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
633 635
634 /*init regs*/ 636 /*init regs*/
635 if (pipe == 0) { 637 /* FIXME: should just copy the regmap ptr ? */
636 pkg_sender->dpll_reg = MRST_DPLL_A; 638 pkg_sender->dpll_reg = map->dpll;
637 pkg_sender->dspcntr_reg = DSPACNTR; 639 pkg_sender->dspcntr_reg = map->cntr;
638 pkg_sender->pipeconf_reg = PIPEACONF; 640 pkg_sender->pipeconf_reg = map->conf;
639 pkg_sender->dsplinoff_reg = DSPALINOFF; 641 pkg_sender->dsplinoff_reg = map->linoff;
640 pkg_sender->dspsurf_reg = DSPASURF; 642 pkg_sender->dspsurf_reg = map->surf;
641 pkg_sender->pipestat_reg = PIPEASTAT; 643 pkg_sender->pipestat_reg = map->status;
642 } else if (pipe == 2) {
643 pkg_sender->dpll_reg = MRST_DPLL_A;
644 pkg_sender->dspcntr_reg = DSPCCNTR;
645 pkg_sender->pipeconf_reg = PIPECCONF;
646 pkg_sender->dsplinoff_reg = DSPCLINOFF;
647 pkg_sender->dspsurf_reg = DSPCSURF;
648 pkg_sender->pipestat_reg = PIPECSTAT;
649 }
650 644
651 pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe); 645 pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
652 pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe); 646 pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index a35a2921bdf7..3f3cd619c79f 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -50,17 +50,14 @@ struct mrst_clock_t {
50 50
51void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe) 51void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
52{ 52{
53 struct drm_psb_private *dev_priv = dev->dev_private;
54 const struct psb_offset *map = &dev_priv->regmap[pipe];
53 int count, temp; 55 int count, temp;
54 u32 pipeconf_reg = PIPEACONF;
55 56
56 switch (pipe) { 57 switch (pipe) {
57 case 0: 58 case 0:
58 break;
59 case 1: 59 case 1:
60 pipeconf_reg = PIPEBCONF;
61 break;
62 case 2: 60 case 2:
63 pipeconf_reg = PIPECCONF;
64 break; 61 break;
65 default: 62 default:
66 DRM_ERROR("Illegal Pipe Number.\n"); 63 DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
73 70
74 /* Wait for for the pipe disable to take effect. */ 71 /* Wait for for the pipe disable to take effect. */
75 for (count = 0; count < COUNT_MAX; count++) { 72 for (count = 0; count < COUNT_MAX; count++) {
76 temp = REG_READ(pipeconf_reg); 73 temp = REG_READ(map->conf);
77 if ((temp & PIPEACONF_PIPE_STATE) == 0) 74 if ((temp & PIPEACONF_PIPE_STATE) == 0)
78 break; 75 break;
79 } 76 }
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
81 78
82void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) 79void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
83{ 80{
81 struct drm_psb_private *dev_priv = dev->dev_private;
82 const struct psb_offset *map = &dev_priv->regmap[pipe];
84 int count, temp; 83 int count, temp;
85 u32 pipeconf_reg = PIPEACONF;
86 84
87 switch (pipe) { 85 switch (pipe) {
88 case 0: 86 case 0:
89 break;
90 case 1: 87 case 1:
91 pipeconf_reg = PIPEBCONF;
92 break;
93 case 2: 88 case 2:
94 pipeconf_reg = PIPECCONF;
95 break; 89 break;
96 default: 90 default:
97 DRM_ERROR("Illegal Pipe Number.\n"); 91 DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
104 98
105 /* Wait for for the pipe enable to take effect. */ 99 /* Wait for for the pipe enable to take effect. */
106 for (count = 0; count < COUNT_MAX; count++) { 100 for (count = 0; count < COUNT_MAX; count++) {
107 temp = REG_READ(pipeconf_reg); 101 temp = REG_READ(map->conf);
108 if ((temp & PIPEACONF_PIPE_STATE) == 1) 102 if ((temp & PIPEACONF_PIPE_STATE) == 1)
109 break; 103 break;
110 } 104 }
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
189 struct drm_framebuffer *old_fb) 183 struct drm_framebuffer *old_fb)
190{ 184{
191 struct drm_device *dev = crtc->dev; 185 struct drm_device *dev = crtc->dev;
192 /* struct drm_i915_master_private *master_priv; */ 186 struct drm_psb_private *dev_priv = dev->dev_private;
193 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 187 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
194 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 188 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
195 int pipe = psb_intel_crtc->pipe; 189 int pipe = psb_intel_crtc->pipe;
190 const struct psb_offset *map = &dev_priv->regmap[pipe];
196 unsigned long start, offset; 191 unsigned long start, offset;
197 int dsplinoff = DSPALINOFF;
198 int dspsurf = DSPASURF;
199 int dspstride = DSPASTRIDE;
200 int dspcntr_reg = DSPACNTR;
201 u32 dspcntr; 192 u32 dspcntr;
202 int ret; 193 int ret;
203 194
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
215 if (ret) 206 if (ret)
216 return ret; 207 return ret;
217 208
218 switch (pipe) { 209 if (pipe > 2) {
219 case 0:
220 dsplinoff = DSPALINOFF;
221 break;
222 case 1:
223 dsplinoff = DSPBLINOFF;
224 dspsurf = DSPBSURF;
225 dspstride = DSPBSTRIDE;
226 dspcntr_reg = DSPBCNTR;
227 break;
228 case 2:
229 dsplinoff = DSPCLINOFF;
230 dspsurf = DSPCSURF;
231 dspstride = DSPCSTRIDE;
232 dspcntr_reg = DSPCCNTR;
233 break;
234 default:
235 DRM_ERROR("Illegal Pipe Number.\n"); 210 DRM_ERROR("Illegal Pipe Number.\n");
236 return -EINVAL; 211 return -EINVAL;
237 } 212 }
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
242 start = psbfb->gtt->offset; 217 start = psbfb->gtt->offset;
243 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 218 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
244 219
245 REG_WRITE(dspstride, crtc->fb->pitches[0]); 220 REG_WRITE(map->stride, crtc->fb->pitches[0]);
246 dspcntr = REG_READ(dspcntr_reg); 221 dspcntr = REG_READ(map->cntr);
247 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 222 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
248 223
249 switch (crtc->fb->bits_per_pixel) { 224 switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
261 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 236 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
262 break; 237 break;
263 } 238 }
264 REG_WRITE(dspcntr_reg, dspcntr); 239 REG_WRITE(map->cntr, dspcntr);
265 240
266 dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", 241 dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
267 start, offset, x, y); 242 start, offset, x, y);
268 REG_WRITE(dsplinoff, offset); 243 REG_WRITE(map->linoff, offset);
269 REG_READ(dsplinoff); 244 REG_READ(map->linoff);
270 REG_WRITE(dspsurf, start); 245 REG_WRITE(map->surf, start);
271 REG_READ(dspsurf); 246 REG_READ(map->surf);
272 247
273 gma_power_end(dev); 248 gma_power_end(dev);
274 249
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
281 */ 256 */
282void mdfld_disable_crtc(struct drm_device *dev, int pipe) 257void mdfld_disable_crtc(struct drm_device *dev, int pipe)
283{ 258{
284 int dpll_reg = MRST_DPLL_A; 259 struct drm_psb_private *dev_priv = dev->dev_private;
285 int dspcntr_reg = DSPACNTR; 260 const struct psb_offset *map = &dev_priv->regmap[pipe];
286 int dspbase_reg = MRST_DSPABASE;
287 int pipeconf_reg = PIPEACONF;
288 u32 temp; 261 u32 temp;
289 262
290 dev_dbg(dev->dev, "pipe = %d\n", pipe); 263 dev_dbg(dev->dev, "pipe = %d\n", pipe);
291 264
292 265
293 switch (pipe) {
294 case 0:
295 break;
296 case 1:
297 dpll_reg = MDFLD_DPLL_B;
298 dspcntr_reg = DSPBCNTR;
299 dspbase_reg = DSPBSURF;
300 pipeconf_reg = PIPEBCONF;
301 break;
302 case 2:
303 dpll_reg = MRST_DPLL_A;
304 dspcntr_reg = DSPCCNTR;
305 dspbase_reg = MDFLD_DSPCBASE;
306 pipeconf_reg = PIPECCONF;
307 break;
308 default:
309 DRM_ERROR("Illegal Pipe Number.\n");
310 return;
311 }
312
313 if (pipe != 1) 266 if (pipe != 1)
314 mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe), 267 mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
315 HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); 268 HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
316 269
317 /* Disable display plane */ 270 /* Disable display plane */
318 temp = REG_READ(dspcntr_reg); 271 temp = REG_READ(map->cntr);
319 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 272 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
320 REG_WRITE(dspcntr_reg, 273 REG_WRITE(map->cntr,
321 temp & ~DISPLAY_PLANE_ENABLE); 274 temp & ~DISPLAY_PLANE_ENABLE);
322 /* Flush the plane changes */ 275 /* Flush the plane changes */
323 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 276 REG_WRITE(map->base, REG_READ(map->base));
324 REG_READ(dspbase_reg); 277 REG_READ(map->base);
325 } 278 }
326 279
327 /* FIXME_JLIU7 MDFLD_PO revisit */ 280 /* FIXME_JLIU7 MDFLD_PO revisit */
328 281
329 /* Next, disable display pipes */ 282 /* Next, disable display pipes */
330 temp = REG_READ(pipeconf_reg); 283 temp = REG_READ(map->conf);
331 if ((temp & PIPEACONF_ENABLE) != 0) { 284 if ((temp & PIPEACONF_ENABLE) != 0) {
332 temp &= ~PIPEACONF_ENABLE; 285 temp &= ~PIPEACONF_ENABLE;
333 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; 286 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
334 REG_WRITE(pipeconf_reg, temp); 287 REG_WRITE(map->conf, temp);
335 REG_READ(pipeconf_reg); 288 REG_READ(map->conf);
336 289
337 /* Wait for for the pipe disable to take effect. */ 290 /* Wait for for the pipe disable to take effect. */
338 mdfldWaitForPipeDisable(dev, pipe); 291 mdfldWaitForPipeDisable(dev, pipe);
339 } 292 }
340 293
341 temp = REG_READ(dpll_reg); 294 temp = REG_READ(map->dpll);
342 if (temp & DPLL_VCO_ENABLE) { 295 if (temp & DPLL_VCO_ENABLE) {
343 if ((pipe != 1 && 296 if ((pipe != 1 &&
344 !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) 297 !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
345 & PIPEACONF_ENABLE)) || pipe == 1) { 298 & PIPEACONF_ENABLE)) || pipe == 1) {
346 temp &= ~(DPLL_VCO_ENABLE); 299 temp &= ~(DPLL_VCO_ENABLE);
347 REG_WRITE(dpll_reg, temp); 300 REG_WRITE(map->dpll, temp);
348 REG_READ(dpll_reg); 301 REG_READ(map->dpll);
349 /* Wait for the clocks to turn off. */ 302 /* Wait for the clocks to turn off. */
350 /* FIXME_MDFLD PO may need more delay */ 303 /* FIXME_MDFLD PO may need more delay */
351 udelay(500); 304 udelay(500);
352 305
353 if (!(temp & MDFLD_PWR_GATE_EN)) { 306 if (!(temp & MDFLD_PWR_GATE_EN)) {
354 /* gating power of DPLL */ 307 /* gating power of DPLL */
355 REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN); 308 REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
356 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 309 /* FIXME_MDFLD PO - change 500 to 1 after PO */
357 udelay(5000); 310 udelay(5000);
358 } 311 }
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
373 struct drm_psb_private *dev_priv = dev->dev_private; 326 struct drm_psb_private *dev_priv = dev->dev_private;
374 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
375 int pipe = psb_intel_crtc->pipe; 328 int pipe = psb_intel_crtc->pipe;
376 int dpll_reg = MRST_DPLL_A; 329 const struct psb_offset *map = &dev_priv->regmap[pipe];
377 int dspcntr_reg = DSPACNTR;
378 int dspbase_reg = MRST_DSPABASE;
379 int pipeconf_reg = PIPEACONF;
380 u32 pipestat_reg = PIPEASTAT;
381 u32 pipeconf = dev_priv->pipeconf[pipe]; 330 u32 pipeconf = dev_priv->pipeconf[pipe];
382 u32 temp; 331 u32 temp;
383 int timeout = 0; 332 int timeout = 0;
384 333
385 dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe); 334 dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
386 335
387/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */ 336 /* Note: Old code uses pipe a stat for pipe b but that appears
388/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */ 337 to be a bug */
389
390 switch (pipe) {
391 case 0:
392 break;
393 case 1:
394 dpll_reg = DPLL_B;
395 dspcntr_reg = DSPBCNTR;
396 dspbase_reg = MRST_DSPBBASE;
397 pipeconf_reg = PIPEBCONF;
398 dpll_reg = MDFLD_DPLL_B;
399 break;
400 case 2:
401 dpll_reg = MRST_DPLL_A;
402 dspcntr_reg = DSPCCNTR;
403 dspbase_reg = MDFLD_DSPCBASE;
404 pipeconf_reg = PIPECCONF;
405 pipestat_reg = PIPECSTAT;
406 break;
407 default:
408 DRM_ERROR("Illegal Pipe Number.\n");
409 return;
410 }
411 338
412 if (!gma_power_begin(dev, true)) 339 if (!gma_power_begin(dev, true))
413 return; 340 return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
420 case DRM_MODE_DPMS_STANDBY: 347 case DRM_MODE_DPMS_STANDBY:
421 case DRM_MODE_DPMS_SUSPEND: 348 case DRM_MODE_DPMS_SUSPEND:
422 /* Enable the DPLL */ 349 /* Enable the DPLL */
423 temp = REG_READ(dpll_reg); 350 temp = REG_READ(map->dpll);
424 351
425 if ((temp & DPLL_VCO_ENABLE) == 0) { 352 if ((temp & DPLL_VCO_ENABLE) == 0) {
426 /* When ungating power of DPLL, needs to wait 0.5us 353 /* When ungating power of DPLL, needs to wait 0.5us
427 before enable the VCO */ 354 before enable the VCO */
428 if (temp & MDFLD_PWR_GATE_EN) { 355 if (temp & MDFLD_PWR_GATE_EN) {
429 temp &= ~MDFLD_PWR_GATE_EN; 356 temp &= ~MDFLD_PWR_GATE_EN;
430 REG_WRITE(dpll_reg, temp); 357 REG_WRITE(map->dpll, temp);
431 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 358 /* FIXME_MDFLD PO - change 500 to 1 after PO */
432 udelay(500); 359 udelay(500);
433 } 360 }
434 361
435 REG_WRITE(dpll_reg, temp); 362 REG_WRITE(map->dpll, temp);
436 REG_READ(dpll_reg); 363 REG_READ(map->dpll);
437 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 364 /* FIXME_MDFLD PO - change 500 to 1 after PO */
438 udelay(500); 365 udelay(500);
439 366
440 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 367 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
441 REG_READ(dpll_reg); 368 REG_READ(map->dpll);
442 369
443 /** 370 /**
444 * wait for DSI PLL to lock 371 * wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
446 * since both MIPI pipes share the same PLL. 373 * since both MIPI pipes share the same PLL.
447 */ 374 */
448 while ((pipe != 2) && (timeout < 20000) && 375 while ((pipe != 2) && (timeout < 20000) &&
449 !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { 376 !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
450 udelay(150); 377 udelay(150);
451 timeout++; 378 timeout++;
452 } 379 }
453 } 380 }
454 381
455 /* Enable the plane */ 382 /* Enable the plane */
456 temp = REG_READ(dspcntr_reg); 383 temp = REG_READ(map->cntr);
457 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 384 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
458 REG_WRITE(dspcntr_reg, 385 REG_WRITE(map->cntr,
459 temp | DISPLAY_PLANE_ENABLE); 386 temp | DISPLAY_PLANE_ENABLE);
460 /* Flush the plane changes */ 387 /* Flush the plane changes */
461 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 388 REG_WRITE(map->base, REG_READ(map->base));
462 } 389 }
463 390
464 /* Enable the pipe */ 391 /* Enable the pipe */
465 temp = REG_READ(pipeconf_reg); 392 temp = REG_READ(map->conf);
466 if ((temp & PIPEACONF_ENABLE) == 0) { 393 if ((temp & PIPEACONF_ENABLE) == 0) {
467 REG_WRITE(pipeconf_reg, pipeconf); 394 REG_WRITE(map->conf, pipeconf);
468 395
469 /* Wait for for the pipe enable to take effect. */ 396 /* Wait for for the pipe enable to take effect. */
470 mdfldWaitForPipeEnable(dev, pipe); 397 mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
473 /*workaround for sighting 3741701 Random X blank display*/ 400 /*workaround for sighting 3741701 Random X blank display*/
474 /*perform w/a in video mode only on pipe A or C*/ 401 /*perform w/a in video mode only on pipe A or C*/
475 if (pipe == 0 || pipe == 2) { 402 if (pipe == 0 || pipe == 2) {
476 REG_WRITE(pipestat_reg, REG_READ(pipestat_reg)); 403 REG_WRITE(map->status, REG_READ(map->status));
477 msleep(100); 404 msleep(100);
478 if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) 405 if (PIPE_VBLANK_STATUS & REG_READ(map->status))
479 dev_dbg(dev->dev, "OK"); 406 dev_dbg(dev->dev, "OK");
480 else { 407 else {
481 dev_dbg(dev->dev, "STUCK!!!!"); 408 dev_dbg(dev->dev, "STUCK!!!!");
482 /*shutdown controller*/ 409 /*shutdown controller*/
483 temp = REG_READ(dspcntr_reg); 410 temp = REG_READ(map->cntr);
484 REG_WRITE(dspcntr_reg, 411 REG_WRITE(map->cntr,
485 temp & ~DISPLAY_PLANE_ENABLE); 412 temp & ~DISPLAY_PLANE_ENABLE);
486 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 413 REG_WRITE(map->base, REG_READ(map->base));
487 /*mdfld_dsi_dpi_shut_down(dev, pipe);*/ 414 /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
488 REG_WRITE(0xb048, 1); 415 REG_WRITE(0xb048, 1);
489 msleep(100); 416 msleep(100);
490 temp = REG_READ(pipeconf_reg); 417 temp = REG_READ(map->conf);
491 temp &= ~PIPEACONF_ENABLE; 418 temp &= ~PIPEACONF_ENABLE;
492 REG_WRITE(pipeconf_reg, temp); 419 REG_WRITE(map->conf, temp);
493 msleep(100); /*wait for pipe disable*/ 420 msleep(100); /*wait for pipe disable*/
494 REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0); 421 REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
495 msleep(100); 422 msleep(100);
496 REG_WRITE(0xb004, REG_READ(0xb004)); 423 REG_WRITE(0xb004, REG_READ(0xb004));
497 /* try to bring the controller back up again*/ 424 /* try to bring the controller back up again*/
498 REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1); 425 REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
499 temp = REG_READ(dspcntr_reg); 426 temp = REG_READ(map->cntr);
500 REG_WRITE(dspcntr_reg, 427 REG_WRITE(map->cntr,
501 temp | DISPLAY_PLANE_ENABLE); 428 temp | DISPLAY_PLANE_ENABLE);
502 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 429 REG_WRITE(map->base, REG_READ(map->base));
503 /*mdfld_dsi_dpi_turn_on(dev, pipe);*/ 430 /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
504 REG_WRITE(0xb048, 2); 431 REG_WRITE(0xb048, 2);
505 msleep(100); 432 msleep(100);
506 temp = REG_READ(pipeconf_reg); 433 temp = REG_READ(map->conf);
507 temp |= PIPEACONF_ENABLE; 434 temp |= PIPEACONF_ENABLE;
508 REG_WRITE(pipeconf_reg, temp); 435 REG_WRITE(map->conf, temp);
509 } 436 }
510 } 437 }
511 438
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
529 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 456 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
530 457
531 /* Disable display plane */ 458 /* Disable display plane */
532 temp = REG_READ(dspcntr_reg); 459 temp = REG_READ(map->cntr);
533 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 460 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
534 REG_WRITE(dspcntr_reg, 461 REG_WRITE(map->cntr,
535 temp & ~DISPLAY_PLANE_ENABLE); 462 temp & ~DISPLAY_PLANE_ENABLE);
536 /* Flush the plane changes */ 463 /* Flush the plane changes */
537 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 464 REG_WRITE(map->base, REG_READ(map->base));
538 REG_READ(dspbase_reg); 465 REG_READ(map->base);
539 } 466 }
540 467
541 /* Next, disable display pipes */ 468 /* Next, disable display pipes */
542 temp = REG_READ(pipeconf_reg); 469 temp = REG_READ(map->conf);
543 if ((temp & PIPEACONF_ENABLE) != 0) { 470 if ((temp & PIPEACONF_ENABLE) != 0) {
544 temp &= ~PIPEACONF_ENABLE; 471 temp &= ~PIPEACONF_ENABLE;
545 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; 472 temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
546 REG_WRITE(pipeconf_reg, temp); 473 REG_WRITE(map->conf, temp);
547 REG_READ(pipeconf_reg); 474 REG_READ(map->conf);
548 475
549 /* Wait for for the pipe disable to take effect. */ 476 /* Wait for for the pipe disable to take effect. */
550 mdfldWaitForPipeDisable(dev, pipe); 477 mdfldWaitForPipeDisable(dev, pipe);
551 } 478 }
552 479
553 temp = REG_READ(dpll_reg); 480 temp = REG_READ(map->dpll);
554 if (temp & DPLL_VCO_ENABLE) { 481 if (temp & DPLL_VCO_ENABLE) {
555 if ((pipe != 1 && !((REG_READ(PIPEACONF) 482 if ((pipe != 1 && !((REG_READ(PIPEACONF)
556 | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) 483 | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
557 || pipe == 1) { 484 || pipe == 1) {
558 temp &= ~(DPLL_VCO_ENABLE); 485 temp &= ~(DPLL_VCO_ENABLE);
559 REG_WRITE(dpll_reg, temp); 486 REG_WRITE(map->dpll, temp);
560 REG_READ(dpll_reg); 487 REG_READ(map->dpll);
561 /* Wait for the clocks to turn off. */ 488 /* Wait for the clocks to turn off. */
562 /* FIXME_MDFLD PO may need more delay */ 489 /* FIXME_MDFLD PO may need more delay */
563 udelay(500); 490 udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
764 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 691 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
765 struct drm_psb_private *dev_priv = dev->dev_private; 692 struct drm_psb_private *dev_priv = dev->dev_private;
766 int pipe = psb_intel_crtc->pipe; 693 int pipe = psb_intel_crtc->pipe;
767 int fp_reg = MRST_FPA0; 694 const struct psb_offset *map = &dev_priv->regmap[pipe];
768 int dpll_reg = MRST_DPLL_A;
769 int dspcntr_reg = DSPACNTR;
770 int pipeconf_reg = PIPEACONF;
771 int htot_reg = HTOTAL_A;
772 int hblank_reg = HBLANK_A;
773 int hsync_reg = HSYNC_A;
774 int vtot_reg = VTOTAL_A;
775 int vblank_reg = VBLANK_A;
776 int vsync_reg = VSYNC_A;
777 int dspsize_reg = DSPASIZE;
778 int dsppos_reg = DSPAPOS;
779 int pipesrc_reg = PIPEASRC;
780 u32 *pipeconf = &dev_priv->pipeconf[pipe];
781 u32 *dspcntr = &dev_priv->dspcntr[pipe];
782 int refclk = 0; 695 int refclk = 0;
783 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, 696 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
784 clk_tmp = 0; 697 clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
806 } 719 }
807#endif 720#endif
808 721
809 switch (pipe) {
810 case 0:
811 break;
812 case 1:
813 fp_reg = FPB0;
814 dpll_reg = DPLL_B;
815 dspcntr_reg = DSPBCNTR;
816 pipeconf_reg = PIPEBCONF;
817 htot_reg = HTOTAL_B;
818 hblank_reg = HBLANK_B;
819 hsync_reg = HSYNC_B;
820 vtot_reg = VTOTAL_B;
821 vblank_reg = VBLANK_B;
822 vsync_reg = VSYNC_B;
823 dspsize_reg = DSPBSIZE;
824 dsppos_reg = DSPBPOS;
825 pipesrc_reg = PIPEBSRC;
826 fp_reg = MDFLD_DPLL_DIV0;
827 dpll_reg = MDFLD_DPLL_B;
828 break;
829 case 2:
830 dpll_reg = MRST_DPLL_A;
831 dspcntr_reg = DSPCCNTR;
832 pipeconf_reg = PIPECCONF;
833 htot_reg = HTOTAL_C;
834 hblank_reg = HBLANK_C;
835 hsync_reg = HSYNC_C;
836 vtot_reg = VTOTAL_C;
837 vblank_reg = VBLANK_C;
838 vsync_reg = VSYNC_C;
839 dspsize_reg = DSPCSIZE;
840 dsppos_reg = DSPCPOS;
841 pipesrc_reg = PIPECSRC;
842 break;
843 default:
844 DRM_ERROR("Illegal Pipe Number.\n");
845 return 0;
846 }
847
848 ret = check_fb(crtc->fb); 722 ret = check_fb(crtc->fb);
849 if (ret) 723 if (ret)
850 return ret; 724 return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
929 * contained within the displayable area of the screen image 803 * contained within the displayable area of the screen image
930 * (frame buffer). 804 * (frame buffer).
931 */ 805 */
932 REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16) 806 REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
933 | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1)); 807 | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
934 /* Set the CRTC with encoder mode. */ 808 /* Set the CRTC with encoder mode. */
935 REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) 809 REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
936 | (mode->crtc_vdisplay - 1)); 810 | (mode->crtc_vdisplay - 1));
937 } else { 811 } else {
938 REG_WRITE(dspsize_reg, 812 REG_WRITE(map->size,
939 ((mode->crtc_vdisplay - 1) << 16) | 813 ((mode->crtc_vdisplay - 1) << 16) |
940 (mode->crtc_hdisplay - 1)); 814 (mode->crtc_hdisplay - 1));
941 REG_WRITE(pipesrc_reg, 815 REG_WRITE(map->src,
942 ((mode->crtc_hdisplay - 1) << 16) | 816 ((mode->crtc_hdisplay - 1) << 16) |
943 (mode->crtc_vdisplay - 1)); 817 (mode->crtc_vdisplay - 1));
944 } 818 }
945 819
946 REG_WRITE(dsppos_reg, 0); 820 REG_WRITE(map->pos, 0);
947 821
948 if (psb_intel_encoder) 822 if (psb_intel_encoder)
949 drm_connector_property_get_value(connector, 823 drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
961 offsetY = (adjusted_mode->crtc_vdisplay - 835 offsetY = (adjusted_mode->crtc_vdisplay -
962 mode->crtc_vdisplay) / 2; 836 mode->crtc_vdisplay) / 2;
963 837
964 REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) | 838 REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
965 ((adjusted_mode->crtc_htotal - 1) << 16)); 839 ((adjusted_mode->crtc_htotal - 1) << 16));
966 REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) | 840 REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
967 ((adjusted_mode->crtc_vtotal - 1) << 16)); 841 ((adjusted_mode->crtc_vtotal - 1) << 16));
968 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 842 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
969 offsetX - 1) | 843 offsetX - 1) |
970 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); 844 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
971 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 845 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
972 offsetX - 1) | 846 offsetX - 1) |
973 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); 847 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
974 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 848 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
975 offsetY - 1) | 849 offsetY - 1) |
976 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); 850 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
977 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 851 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
978 offsetY - 1) | 852 offsetY - 1) |
979 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); 853 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
980 } else { 854 } else {
981 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 855 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
982 ((adjusted_mode->crtc_htotal - 1) << 16)); 856 ((adjusted_mode->crtc_htotal - 1) << 16));
983 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 857 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
984 ((adjusted_mode->crtc_vtotal - 1) << 16)); 858 ((adjusted_mode->crtc_vtotal - 1) << 16));
985 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 859 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
986 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 860 ((adjusted_mode->crtc_hblank_end - 1) << 16));
987 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 861 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
988 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 862 ((adjusted_mode->crtc_hsync_end - 1) << 16));
989 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 863 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
990 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 864 ((adjusted_mode->crtc_vblank_end - 1) << 16));
991 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 865 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
992 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 866 ((adjusted_mode->crtc_vsync_end - 1) << 16));
993 } 867 }
994 868
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1000 } 874 }
1001 875
1002 /* setup pipeconf */ 876 /* setup pipeconf */
1003 *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */ 877 dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
1004 878
1005 /* Set up the display plane register */ 879 /* Set up the display plane register */
1006 *dspcntr = REG_READ(dspcntr_reg); 880 dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
1007 *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS; 881 dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
1008 *dspcntr |= DISPLAY_PLANE_ENABLE; 882 dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
1009 883
1010 if (is_mipi2) 884 if (is_mipi2)
1011 goto mrst_crtc_mode_set_exit; 885 goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1070 clock.p1, m_conv); 944 clock.p1, m_conv);
1071 } 945 }
1072 946
1073 dpll = REG_READ(dpll_reg); 947 dpll = REG_READ(map->dpll);
1074 948
1075 if (dpll & DPLL_VCO_ENABLE) { 949 if (dpll & DPLL_VCO_ENABLE) {
1076 dpll &= ~DPLL_VCO_ENABLE; 950 dpll &= ~DPLL_VCO_ENABLE;
1077 REG_WRITE(dpll_reg, dpll); 951 REG_WRITE(map->dpll, dpll);
1078 REG_READ(dpll_reg); 952 REG_READ(map->dpll);
1079 953
1080 /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */ 954 /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
1081 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 955 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1082 udelay(500); 956 udelay(500);
1083 957
1084 /* reset M1, N1 & P1 */ 958 /* reset M1, N1 & P1 */
1085 REG_WRITE(fp_reg, 0); 959 REG_WRITE(map->fp0, 0);
1086 dpll &= ~MDFLD_P1_MASK; 960 dpll &= ~MDFLD_P1_MASK;
1087 REG_WRITE(dpll_reg, dpll); 961 REG_WRITE(map->dpll, dpll);
1088 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 962 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1089 udelay(500); 963 udelay(500);
1090 } 964 }
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1093 * enable the VCO */ 967 * enable the VCO */
1094 if (dpll & MDFLD_PWR_GATE_EN) { 968 if (dpll & MDFLD_PWR_GATE_EN) {
1095 dpll &= ~MDFLD_PWR_GATE_EN; 969 dpll &= ~MDFLD_PWR_GATE_EN;
1096 REG_WRITE(dpll_reg, dpll); 970 REG_WRITE(map->dpll, dpll);
1097 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 971 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1098 udelay(500); 972 udelay(500);
1099 } 973 }
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1134 fp = 0x000000c1; 1008 fp = 0x000000c1;
1135 } 1009 }
1136 1010
1137 REG_WRITE(fp_reg, fp); 1011 REG_WRITE(map->fp0, fp);
1138 REG_WRITE(dpll_reg, dpll); 1012 REG_WRITE(map->dpll, dpll);
1139 /* FIXME_MDFLD PO - change 500 to 1 after PO */ 1013 /* FIXME_MDFLD PO - change 500 to 1 after PO */
1140 udelay(500); 1014 udelay(500);
1141 1015
1142 dpll |= DPLL_VCO_ENABLE; 1016 dpll |= DPLL_VCO_ENABLE;
1143 REG_WRITE(dpll_reg, dpll); 1017 REG_WRITE(map->dpll, dpll);
1144 REG_READ(dpll_reg); 1018 REG_READ(map->dpll);
1145 1019
1146 /* wait for DSI PLL to lock */ 1020 /* wait for DSI PLL to lock */
1147 while (timeout < 20000 && 1021 while (timeout < 20000 &&
1148 !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { 1022 !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
1149 udelay(150); 1023 udelay(150);
1150 timeout++; 1024 timeout++;
1151 } 1025 }
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1155 1029
1156 dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi); 1030 dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
1157 1031
1158 REG_WRITE(pipeconf_reg, *pipeconf); 1032 REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
1159 REG_READ(pipeconf_reg); 1033 REG_READ(map->conf);
1160 1034
1161 /* Wait for for the pipe enable to take effect. */ 1035 /* Wait for for the pipe enable to take effect. */
1162 REG_WRITE(dspcntr_reg, *dspcntr); 1036 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
1163 psb_intel_wait_for_vblank(dev); 1037 psb_intel_wait_for_vblank(dev);
1164 1038
1165mrst_crtc_mode_set_exit: 1039mrst_crtc_mode_set_exit:
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 5eee9ad80da4..b2a790bd9899 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,139 +118,214 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
118 dev_priv->platform_rev_id); 118 dev_priv->platform_rev_id);
119} 119}
120 120
121struct vbt_header {
122 u32 signature;
123 u8 revision;
124} __packed;
125
126/* The same for r0 and r1 */
127struct vbt_r0 {
128 struct vbt_header vbt_header;
129 u8 size;
130 u8 checksum;
131} __packed;
132
133struct vbt_r10 {
134 struct vbt_header vbt_header;
135 u8 checksum;
136 u16 size;
137 u8 panel_count;
138 u8 primary_panel_idx;
139 u8 secondary_panel_idx;
140 u8 __reserved[5];
141} __packed;
142
143static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
144{
145 void __iomem *vbt_virtual;
146
147 vbt_virtual = ioremap(addr, sizeof(*vbt));
148 if (vbt_virtual == NULL)
149 return -1;
150
151 memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
152 iounmap(vbt_virtual);
153
154 return 0;
155}
156
157static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
158{
159 void __iomem *vbt_virtual;
160
161 vbt_virtual = ioremap(addr, sizeof(*vbt));
162 if (!vbt_virtual)
163 return -1;
164
165 memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
166 iounmap(vbt_virtual);
167
168 return 0;
169}
170
171static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
172{
173 struct vbt_r0 vbt;
174 void __iomem *gct_virtual;
175 struct gct_r0 gct;
176 u8 bpi;
177
178 if (read_vbt_r0(addr, &vbt))
179 return -1;
180
181 gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
182 if (!gct_virtual)
183 return -1;
184 memcpy_fromio(&gct, gct_virtual, sizeof(gct));
185 iounmap(gct_virtual);
186
187 bpi = gct.PD.BootPanelIndex;
188 dev_priv->gct_data.bpi = bpi;
189 dev_priv->gct_data.pt = gct.PD.PanelType;
190 dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
191 dev_priv->gct_data.Panel_Port_Control =
192 gct.panel[bpi].Panel_Port_Control;
193 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
194 gct.panel[bpi].Panel_MIPI_Display_Descriptor;
195
196 return 0;
197}
198
199static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
200{
201 struct vbt_r0 vbt;
202 void __iomem *gct_virtual;
203 struct gct_r1 gct;
204 u8 bpi;
205
206 if (read_vbt_r0(addr, &vbt))
207 return -1;
208
209 gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
210 if (!gct_virtual)
211 return -1;
212 memcpy_fromio(&gct, gct_virtual, sizeof(gct));
213 iounmap(gct_virtual);
214
215 bpi = gct.PD.BootPanelIndex;
216 dev_priv->gct_data.bpi = bpi;
217 dev_priv->gct_data.pt = gct.PD.PanelType;
218 dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
219 dev_priv->gct_data.Panel_Port_Control =
220 gct.panel[bpi].Panel_Port_Control;
221 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
222 gct.panel[bpi].Panel_MIPI_Display_Descriptor;
223
224 return 0;
225}
226
227static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
228{
229 struct vbt_r10 vbt;
230 void __iomem *gct_virtual;
231 struct gct_r10 *gct;
232 struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
233 struct gct_r10_timing_info *ti;
234 int ret = -1;
235
236 if (read_vbt_r10(addr, &vbt))
237 return -1;
238
239 gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
240 if (!gct)
241 return -1;
242
243 gct_virtual = ioremap(addr + sizeof(vbt),
244 sizeof(*gct) * vbt.panel_count);
245 if (!gct_virtual)
246 goto out;
247 memcpy_fromio(gct, gct_virtual, sizeof(*gct));
248 iounmap(gct_virtual);
249
250 dev_priv->gct_data.bpi = vbt.primary_panel_idx;
251 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
252 gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
253
254 ti = &gct[vbt.primary_panel_idx].DTD;
255 dp_ti->pixel_clock = ti->pixel_clock;
256 dp_ti->hactive_hi = ti->hactive_hi;
257 dp_ti->hactive_lo = ti->hactive_lo;
258 dp_ti->hblank_hi = ti->hblank_hi;
259 dp_ti->hblank_lo = ti->hblank_lo;
260 dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
261 dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
262 dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
263 dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
264 dp_ti->vactive_hi = ti->vactive_hi;
265 dp_ti->vactive_lo = ti->vactive_lo;
266 dp_ti->vblank_hi = ti->vblank_hi;
267 dp_ti->vblank_lo = ti->vblank_lo;
268 dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
269 dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
270 dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
271 dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
272
273 ret = 0;
274out:
275 kfree(gct);
276 return ret;
277}
278
121static void mid_get_vbt_data(struct drm_psb_private *dev_priv) 279static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
122{ 280{
123 struct drm_device *dev = dev_priv->dev; 281 struct drm_device *dev = dev_priv->dev;
124 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
125 u32 addr; 282 u32 addr;
126 u16 new_size; 283 u8 __iomem *vbt_virtual;
127 u8 *vbt_virtual; 284 struct vbt_header vbt_header;
128 u8 bpi;
129 u8 number_desc = 0;
130 struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
131 struct gct_r10_timing_info ti;
132 void *pGCT;
133 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 285 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
286 int ret = -1;
134 287
135 /* Get the address of the platform config vbt, B0:D2:F0;0xFC */ 288 /* Get the address of the platform config vbt */
136 pci_read_config_dword(pci_gfx_root, 0xFC, &addr); 289 pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
137 pci_dev_put(pci_gfx_root); 290 pci_dev_put(pci_gfx_root);
138 291
139 dev_dbg(dev->dev, "drm platform config address is %x\n", addr); 292 dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
140 293
141 /* check for platform config address == 0. */ 294 if (!addr)
142 /* this means fw doesn't support vbt */ 295 goto out;
143
144 if (addr == 0) {
145 vbt->size = 0;
146 return;
147 }
148 296
149 /* get the virtual address of the vbt */ 297 /* get the virtual address of the vbt */
150 vbt_virtual = ioremap(addr, sizeof(*vbt)); 298 vbt_virtual = ioremap(addr, sizeof(vbt_header));
151 if (vbt_virtual == NULL) { 299 if (!vbt_virtual)
152 vbt->size = 0; 300 goto out;
153 return;
154 }
155 301
156 memcpy(vbt, vbt_virtual, sizeof(*vbt)); 302 memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
157 iounmap(vbt_virtual); /* Free virtual address space */ 303 iounmap(vbt_virtual);
158 304
159 /* No matching signature don't process the data */ 305 if (memcmp(&vbt_header.signature, "$GCT", 4))
160 if (memcmp(vbt->signature, "$GCT", 4)) { 306 goto out;
161 vbt->size = 0; 307
162 return; 308 dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
163 }
164 309
165 dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision); 310 switch (vbt_header.revision) {
166 311 case 0x00:
167 switch (vbt->revision) { 312 ret = mid_get_vbt_data_r0(dev_priv, addr);
168 case 0:
169 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
170 vbt->size - sizeof(*vbt) + 4);
171 pGCT = vbt->oaktrail_gct;
172 bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
173 dev_priv->gct_data.bpi = bpi;
174 dev_priv->gct_data.pt =
175 ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
176 memcpy(&dev_priv->gct_data.DTD,
177 &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
178 sizeof(struct oaktrail_timing_info));
179 dev_priv->gct_data.Panel_Port_Control =
180 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
181 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
182 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
183 break; 313 break;
184 case 1: 314 case 0x01:
185 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4, 315 ret = mid_get_vbt_data_r1(dev_priv, addr);
186 vbt->size - sizeof(*vbt) + 4);
187 pGCT = vbt->oaktrail_gct;
188 bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
189 dev_priv->gct_data.bpi = bpi;
190 dev_priv->gct_data.pt =
191 ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
192 memcpy(&dev_priv->gct_data.DTD,
193 &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
194 sizeof(struct oaktrail_timing_info));
195 dev_priv->gct_data.Panel_Port_Control =
196 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
197 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
198 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
199 break; 316 break;
200 case 0x10: 317 case 0x10:
201 /*header definition changed from rev 01 (v2) to rev 10h. */ 318 ret = mid_get_vbt_data_r10(dev_priv, addr);
202 /*so, some values have changed location*/
203 new_size = vbt->checksum; /*checksum contains lo size byte*/
204 /*LSB of oaktrail_gct contains hi size byte*/
205 new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
206
207 vbt->checksum = vbt->size; /*size contains the checksum*/
208 if (new_size > 0xff)
209 vbt->size = 0xff; /*restrict size to 255*/
210 else
211 vbt->size = new_size;
212
213 /* number of descriptors defined in the GCT */
214 number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
215 bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
216 vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
217 GCT_R10_DISPLAY_DESC_SIZE * number_desc);
218 pGCT = vbt->oaktrail_gct;
219 pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
220 dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
221
222 /*copy the GCT display timings into a temp structure*/
223 memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
224
225 /*now copy the temp struct into the dev_priv->gct_data*/
226 dp_ti->pixel_clock = ti.pixel_clock;
227 dp_ti->hactive_hi = ti.hactive_hi;
228 dp_ti->hactive_lo = ti.hactive_lo;
229 dp_ti->hblank_hi = ti.hblank_hi;
230 dp_ti->hblank_lo = ti.hblank_lo;
231 dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
232 dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
233 dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
234 dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
235 dp_ti->vactive_hi = ti.vactive_hi;
236 dp_ti->vactive_lo = ti.vactive_lo;
237 dp_ti->vblank_hi = ti.vblank_hi;
238 dp_ti->vblank_lo = ti.vblank_lo;
239 dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
240 dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
241 dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
242 dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
243
244 /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
245 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
246 *((u8 *)pGCT + 0x0d);
247 dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
248 (*((u8 *)pGCT + 0x0e)) << 8;
249 break; 319 break;
250 default: 320 default:
251 dev_err(dev->dev, "Unknown revision of GCT!\n"); 321 dev_err(dev->dev, "Unknown revision of GCT!\n");
252 vbt->size = 0;
253 } 322 }
323
324out:
325 if (ret)
326 dev_err(dev->dev, "Unable to read GCT!");
327 else
328 dev_priv->has_gct = true;
254} 329}
255 330
256int mid_chip_setup(struct drm_device *dev) 331int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 2da1f368f14e..f2f9f38a5362 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,14 +19,6 @@
19 19
20/* MID device specific descriptors */ 20/* MID device specific descriptors */
21 21
22struct oaktrail_vbt {
23 s8 signature[4]; /*4 bytes,"$GCT" */
24 u8 revision;
25 u8 size;
26 u8 checksum;
27 void *oaktrail_gct;
28} __packed;
29
30struct oaktrail_timing_info { 22struct oaktrail_timing_info {
31 u16 pixel_clock; 23 u16 pixel_clock;
32 u8 hactive_lo; 24 u8 hactive_lo;
@@ -161,7 +153,7 @@ union oaktrail_panel_rx {
161 u16 panel_receiver; 153 u16 panel_receiver;
162} __packed; 154} __packed;
163 155
164struct oaktrail_gct_v1 { 156struct gct_r0 {
165 union { /*8 bits,Defined as follows: */ 157 union { /*8 bits,Defined as follows: */
166 struct { 158 struct {
167 u8 PanelType:4; /*4 bits, Bit field for panels*/ 159 u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -178,7 +170,7 @@ struct oaktrail_gct_v1 {
178 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/ 170 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
179} __packed; 171} __packed;
180 172
181struct oaktrail_gct_v2 { 173struct gct_r1 {
182 union { /*8 bits,Defined as follows: */ 174 union { /*8 bits,Defined as follows: */
183 struct { 175 struct {
184 u8 PanelType:4; /*4 bits, Bit field for panels*/ 176 u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -195,6 +187,16 @@ struct oaktrail_gct_v2 {
195 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/ 187 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
196} __packed; 188} __packed;
197 189
190struct gct_r10 {
191 struct gct_r10_timing_info DTD;
192 u16 Panel_MIPI_Display_Descriptor;
193 u16 Panel_MIPI_Receiver_Descriptor;
194 u16 Panel_Backlight_Inverter_Descriptor;
195 u8 Panel_Initial_Brightness;
196 u32 MIPI_Ctlr_Init_ptr;
197 u32 MIPI_Panel_Init_ptr;
198} __packed;
199
198struct oaktrail_gct_data { 200struct oaktrail_gct_data {
199 u8 bpi; /* boot panel index, number of panel used during boot */ 201 u8 bpi; /* boot panel index, number of panel used during boot */
200 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */ 202 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
@@ -213,9 +215,6 @@ struct oaktrail_gct_data {
213#define MODE_SETTING_IN_DSR 0x4 215#define MODE_SETTING_IN_DSR 0x4
214#define MODE_SETTING_ENCODER_DONE 0x8 216#define MODE_SETTING_ENCODER_DONE 0x8
215 217
216#define GCT_R10_HEADER_SIZE 16
217#define GCT_R10_DISPLAY_DESC_SIZE 28
218
219/* 218/*
220 * Moorestown HDMI interfaces 219 * Moorestown HDMI interfaces
221 */ 220 */
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index a39b0d0d680f..f821c835ca90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
162static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) 162static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
163{ 163{
164 struct drm_device *dev = crtc->dev; 164 struct drm_device *dev = crtc->dev;
165 struct drm_psb_private *dev_priv = dev->dev_private;
165 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 166 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
166 int pipe = psb_intel_crtc->pipe; 167 int pipe = psb_intel_crtc->pipe;
167 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
168 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
169 int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
170 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
171 u32 temp; 169 u32 temp;
172 170
173 if (!gma_power_begin(dev, true)) 171 if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
181 case DRM_MODE_DPMS_STANDBY: 179 case DRM_MODE_DPMS_STANDBY:
182 case DRM_MODE_DPMS_SUSPEND: 180 case DRM_MODE_DPMS_SUSPEND:
183 /* Enable the DPLL */ 181 /* Enable the DPLL */
184 temp = REG_READ(dpll_reg); 182 temp = REG_READ(map->dpll);
185 if ((temp & DPLL_VCO_ENABLE) == 0) { 183 if ((temp & DPLL_VCO_ENABLE) == 0) {
186 REG_WRITE(dpll_reg, temp); 184 REG_WRITE(map->dpll, temp);
187 REG_READ(dpll_reg); 185 REG_READ(map->dpll);
188 /* Wait for the clocks to stabilize. */ 186 /* Wait for the clocks to stabilize. */
189 udelay(150); 187 udelay(150);
190 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 188 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
191 REG_READ(dpll_reg); 189 REG_READ(map->dpll);
192 /* Wait for the clocks to stabilize. */ 190 /* Wait for the clocks to stabilize. */
193 udelay(150); 191 udelay(150);
194 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 192 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
195 REG_READ(dpll_reg); 193 REG_READ(map->dpll);
196 /* Wait for the clocks to stabilize. */ 194 /* Wait for the clocks to stabilize. */
197 udelay(150); 195 udelay(150);
198 } 196 }
199 /* Enable the pipe */ 197 /* Enable the pipe */
200 temp = REG_READ(pipeconf_reg); 198 temp = REG_READ(map->conf);
201 if ((temp & PIPEACONF_ENABLE) == 0) 199 if ((temp & PIPEACONF_ENABLE) == 0)
202 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 200 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
203 /* Enable the plane */ 201 /* Enable the plane */
204 temp = REG_READ(dspcntr_reg); 202 temp = REG_READ(map->cntr);
205 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 203 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
206 REG_WRITE(dspcntr_reg, 204 REG_WRITE(map->cntr,
207 temp | DISPLAY_PLANE_ENABLE); 205 temp | DISPLAY_PLANE_ENABLE);
208 /* Flush the plane changes */ 206 /* Flush the plane changes */
209 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 207 REG_WRITE(map->base, REG_READ(map->base));
210 } 208 }
211 209
212 psb_intel_crtc_load_lut(crtc); 210 psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
223 /* Disable the VGA plane that we never use */ 221 /* Disable the VGA plane that we never use */
224 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 222 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
225 /* Disable display plane */ 223 /* Disable display plane */
226 temp = REG_READ(dspcntr_reg); 224 temp = REG_READ(map->cntr);
227 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 225 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
228 REG_WRITE(dspcntr_reg, 226 REG_WRITE(map->cntr,
229 temp & ~DISPLAY_PLANE_ENABLE); 227 temp & ~DISPLAY_PLANE_ENABLE);
230 /* Flush the plane changes */ 228 /* Flush the plane changes */
231 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 229 REG_WRITE(map->base, REG_READ(map->base));
232 REG_READ(dspbase_reg); 230 REG_READ(map->base);
233 } 231 }
234 232
235 /* Next, disable display pipes */ 233 /* Next, disable display pipes */
236 temp = REG_READ(pipeconf_reg); 234 temp = REG_READ(map->conf);
237 if ((temp & PIPEACONF_ENABLE) != 0) { 235 if ((temp & PIPEACONF_ENABLE) != 0) {
238 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 236 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
239 REG_READ(pipeconf_reg); 237 REG_READ(map->conf);
240 } 238 }
241 /* Wait for for the pipe disable to take effect. */ 239 /* Wait for for the pipe disable to take effect. */
242 psb_intel_wait_for_vblank(dev); 240 psb_intel_wait_for_vblank(dev);
243 241
244 temp = REG_READ(dpll_reg); 242 temp = REG_READ(map->dpll);
245 if ((temp & DPLL_VCO_ENABLE) != 0) { 243 if ((temp & DPLL_VCO_ENABLE) != 0) {
246 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); 244 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
247 REG_READ(dpll_reg); 245 REG_READ(map->dpll);
248 } 246 }
249 247
250 /* Wait for the clocks to turn off. */ 248 /* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
292 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 290 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
293 struct drm_psb_private *dev_priv = dev->dev_private; 291 struct drm_psb_private *dev_priv = dev->dev_private;
294 int pipe = psb_intel_crtc->pipe; 292 int pipe = psb_intel_crtc->pipe;
295 int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0; 293 const struct psb_offset *map = &dev_priv->regmap[pipe];
296 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
297 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
298 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
299 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
300 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
301 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
302 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
303 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
304 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
305 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
306 int refclk = 0; 294 int refclk = 0;
307 struct oaktrail_clock_t clock; 295 struct oaktrail_clock_t clock;
308 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 296 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
350 if (oaktrail_panel_fitter_pipe(dev) == pipe) 338 if (oaktrail_panel_fitter_pipe(dev) == pipe)
351 REG_WRITE(PFIT_CONTROL, 0); 339 REG_WRITE(PFIT_CONTROL, 0);
352 340
353 REG_WRITE(pipesrc_reg, 341 REG_WRITE(map->src,
354 ((mode->crtc_hdisplay - 1) << 16) | 342 ((mode->crtc_hdisplay - 1) << 16) |
355 (mode->crtc_vdisplay - 1)); 343 (mode->crtc_vdisplay - 1));
356 344
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
369 offsetY = (adjusted_mode->crtc_vdisplay - 357 offsetY = (adjusted_mode->crtc_vdisplay -
370 mode->crtc_vdisplay) / 2; 358 mode->crtc_vdisplay) / 2;
371 359
372 REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) | 360 REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
373 ((adjusted_mode->crtc_htotal - 1) << 16)); 361 ((adjusted_mode->crtc_htotal - 1) << 16));
374 REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) | 362 REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
375 ((adjusted_mode->crtc_vtotal - 1) << 16)); 363 ((adjusted_mode->crtc_vtotal - 1) << 16));
376 REG_WRITE(hblank_reg, 364 REG_WRITE(map->hblank,
377 (adjusted_mode->crtc_hblank_start - offsetX - 1) | 365 (adjusted_mode->crtc_hblank_start - offsetX - 1) |
378 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); 366 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
379 REG_WRITE(hsync_reg, 367 REG_WRITE(map->hsync,
380 (adjusted_mode->crtc_hsync_start - offsetX - 1) | 368 (adjusted_mode->crtc_hsync_start - offsetX - 1) |
381 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); 369 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
382 REG_WRITE(vblank_reg, 370 REG_WRITE(map->vblank,
383 (adjusted_mode->crtc_vblank_start - offsetY - 1) | 371 (adjusted_mode->crtc_vblank_start - offsetY - 1) |
384 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); 372 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
385 REG_WRITE(vsync_reg, 373 REG_WRITE(map->vsync,
386 (adjusted_mode->crtc_vsync_start - offsetY - 1) | 374 (adjusted_mode->crtc_vsync_start - offsetY - 1) |
387 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); 375 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
388 } else { 376 } else {
389 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 377 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
390 ((adjusted_mode->crtc_htotal - 1) << 16)); 378 ((adjusted_mode->crtc_htotal - 1) << 16));
391 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 379 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
392 ((adjusted_mode->crtc_vtotal - 1) << 16)); 380 ((adjusted_mode->crtc_vtotal - 1) << 16));
393 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 381 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
394 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 382 ((adjusted_mode->crtc_hblank_end - 1) << 16));
395 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 383 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
396 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 384 ((adjusted_mode->crtc_hsync_end - 1) << 16));
397 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 385 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
398 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 386 ((adjusted_mode->crtc_vblank_end - 1) << 16));
399 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 387 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
400 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 388 ((adjusted_mode->crtc_vsync_end - 1) << 16));
401 } 389 }
402 390
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
408 } 396 }
409 397
410 /* setup pipeconf */ 398 /* setup pipeconf */
411 pipeconf = REG_READ(pipeconf_reg); 399 pipeconf = REG_READ(map->conf);
412 400
413 /* Set up the display plane register */ 401 /* Set up the display plane register */
414 dspcntr = REG_READ(dspcntr_reg); 402 dspcntr = REG_READ(map->cntr);
415 dspcntr |= DISPPLANE_GAMMA_ENABLE; 403 dspcntr |= DISPPLANE_GAMMA_ENABLE;
416 404
417 if (pipe == 0) 405 if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
467 mrstPrintPll("chosen", &clock); 455 mrstPrintPll("chosen", &clock);
468 456
469 if (dpll & DPLL_VCO_ENABLE) { 457 if (dpll & DPLL_VCO_ENABLE) {
470 REG_WRITE(fp_reg, fp); 458 REG_WRITE(map->fp0, fp);
471 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 459 REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
472 REG_READ(dpll_reg); 460 REG_READ(map->dpll);
473 /* Check the DPLLA lock bit PIPEACONF[29] */ 461 /* Check the DPLLA lock bit PIPEACONF[29] */
474 udelay(150); 462 udelay(150);
475 } 463 }
476 464
477 REG_WRITE(fp_reg, fp); 465 REG_WRITE(map->fp0, fp);
478 REG_WRITE(dpll_reg, dpll); 466 REG_WRITE(map->dpll, dpll);
479 REG_READ(dpll_reg); 467 REG_READ(map->dpll);
480 /* Wait for the clocks to stabilize. */ 468 /* Wait for the clocks to stabilize. */
481 udelay(150); 469 udelay(150);
482 470
483 /* write it again -- the BIOS does, after all */ 471 /* write it again -- the BIOS does, after all */
484 REG_WRITE(dpll_reg, dpll); 472 REG_WRITE(map->dpll, dpll);
485 REG_READ(dpll_reg); 473 REG_READ(map->dpll);
486 /* Wait for the clocks to stabilize. */ 474 /* Wait for the clocks to stabilize. */
487 udelay(150); 475 udelay(150);
488 476
489 REG_WRITE(pipeconf_reg, pipeconf); 477 REG_WRITE(map->conf, pipeconf);
490 REG_READ(pipeconf_reg); 478 REG_READ(map->conf);
491 psb_intel_wait_for_vblank(dev); 479 psb_intel_wait_for_vblank(dev);
492 480
493 REG_WRITE(dspcntr_reg, dspcntr); 481 REG_WRITE(map->cntr, dspcntr);
494 psb_intel_wait_for_vblank(dev); 482 psb_intel_wait_for_vblank(dev);
495 483
496oaktrail_crtc_mode_set_exit: 484oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
509 int x, int y, struct drm_framebuffer *old_fb) 497 int x, int y, struct drm_framebuffer *old_fb)
510{ 498{
511 struct drm_device *dev = crtc->dev; 499 struct drm_device *dev = crtc->dev;
500 struct drm_psb_private *dev_priv = dev->dev_private;
512 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 501 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
513 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 502 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
514 int pipe = psb_intel_crtc->pipe; 503 int pipe = psb_intel_crtc->pipe;
504 const struct psb_offset *map = &dev_priv->regmap[pipe];
515 unsigned long start, offset; 505 unsigned long start, offset;
516 506
517 int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
518 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
519 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
520 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
521 u32 dspcntr; 507 u32 dspcntr;
522 int ret = 0; 508 int ret = 0;
523 509
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
533 start = psbfb->gtt->offset; 519 start = psbfb->gtt->offset;
534 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 520 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
535 521
536 REG_WRITE(dspstride, crtc->fb->pitches[0]); 522 REG_WRITE(map->stride, crtc->fb->pitches[0]);
537 523
538 dspcntr = REG_READ(dspcntr_reg); 524 dspcntr = REG_READ(map->cntr);
539 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 525 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
540 526
541 switch (crtc->fb->bits_per_pixel) { 527 switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
557 ret = -EINVAL; 543 ret = -EINVAL;
558 goto pipe_set_base_exit; 544 goto pipe_set_base_exit;
559 } 545 }
560 REG_WRITE(dspcntr_reg, dspcntr); 546 REG_WRITE(map->cntr, dspcntr);
561 547
562 REG_WRITE(dspbase, offset); 548 REG_WRITE(map->base, offset);
563 REG_READ(dspbase); 549 REG_READ(map->base);
564 REG_WRITE(dspsurf, start); 550 REG_WRITE(map->surf, start);
565 REG_READ(dspsurf); 551 REG_READ(map->surf);
566 552
567pipe_set_base_exit: 553pipe_set_base_exit:
568 gma_power_end(dev); 554 gma_power_end(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 41d1924ea31e..0f9b7db80f6b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
187{ 187{
188 struct drm_psb_private *dev_priv = dev->dev_private; 188 struct drm_psb_private *dev_priv = dev->dev_private;
189 struct psb_save_area *regs = &dev_priv->regs; 189 struct psb_save_area *regs = &dev_priv->regs;
190 struct psb_pipe *p = &regs->pipe[0];
190 int i; 191 int i;
191 u32 pp_stat; 192 u32 pp_stat;
192 193
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
201 regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); 202 regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
202 203
203 /* Pipe & plane A info */ 204 /* Pipe & plane A info */
204 regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF); 205 p->conf = PSB_RVDC32(PIPEACONF);
205 regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC); 206 p->src = PSB_RVDC32(PIPEASRC);
206 regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0); 207 p->fp0 = PSB_RVDC32(MRST_FPA0);
207 regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1); 208 p->fp1 = PSB_RVDC32(MRST_FPA1);
208 regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A); 209 p->dpll = PSB_RVDC32(MRST_DPLL_A);
209 regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A); 210 p->htotal = PSB_RVDC32(HTOTAL_A);
210 regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A); 211 p->hblank = PSB_RVDC32(HBLANK_A);
211 regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A); 212 p->hsync = PSB_RVDC32(HSYNC_A);
212 regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A); 213 p->vtotal = PSB_RVDC32(VTOTAL_A);
213 regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A); 214 p->vblank = PSB_RVDC32(VBLANK_A);
214 regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A); 215 p->vsync = PSB_RVDC32(VSYNC_A);
215 regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A); 216 regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
216 regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR); 217 p->cntr = PSB_RVDC32(DSPACNTR);
217 regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE); 218 p->stride = PSB_RVDC32(DSPASTRIDE);
218 regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE); 219 p->addr = PSB_RVDC32(DSPABASE);
219 regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF); 220 p->surf = PSB_RVDC32(DSPASURF);
220 regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF); 221 p->linoff = PSB_RVDC32(DSPALINOFF);
221 regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF); 222 p->tileoff = PSB_RVDC32(DSPATILEOFF);
222 223
223 /* Save cursor regs */ 224 /* Save cursor regs */
224 regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR); 225 regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
227 228
228 /* Save palette (gamma) */ 229 /* Save palette (gamma) */
229 for (i = 0; i < 256; i++) 230 for (i = 0; i < 256; i++)
230 regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2)); 231 p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
231 232
232 if (dev_priv->hdmi_priv) 233 if (dev_priv->hdmi_priv)
233 oaktrail_hdmi_save(dev); 234 oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
300{ 301{
301 struct drm_psb_private *dev_priv = dev->dev_private; 302 struct drm_psb_private *dev_priv = dev->dev_private;
302 struct psb_save_area *regs = &dev_priv->regs; 303 struct psb_save_area *regs = &dev_priv->regs;
304 struct psb_pipe *p = &regs->pipe[0];
303 u32 pp_stat; 305 u32 pp_stat;
304 int i; 306 int i;
305 307
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
317 PSB_WVDC32(0x80000000, VGACNTRL); 319 PSB_WVDC32(0x80000000, VGACNTRL);
318 320
319 /* set the plls */ 321 /* set the plls */
320 PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0); 322 PSB_WVDC32(p->fp0, MRST_FPA0);
321 PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1); 323 PSB_WVDC32(p->fp1, MRST_FPA1);
322 324
323 /* Actually enable it */ 325 /* Actually enable it */
324 PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A); 326 PSB_WVDC32(p->dpll, MRST_DPLL_A);
325 DRM_UDELAY(150); 327 DRM_UDELAY(150);
326 328
327 /* Restore mode */ 329 /* Restore mode */
328 PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A); 330 PSB_WVDC32(p->htotal, HTOTAL_A);
329 PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A); 331 PSB_WVDC32(p->hblank, HBLANK_A);
330 PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A); 332 PSB_WVDC32(p->hsync, HSYNC_A);
331 PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A); 333 PSB_WVDC32(p->vtotal, VTOTAL_A);
332 PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A); 334 PSB_WVDC32(p->vblank, VBLANK_A);
333 PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A); 335 PSB_WVDC32(p->vsync, VSYNC_A);
334 PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC); 336 PSB_WVDC32(p->src, PIPEASRC);
335 PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A); 337 PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
336 338
337 /* Restore performance mode*/ 339 /* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
339 341
340 /* Enable the pipe*/ 342 /* Enable the pipe*/
341 if (dev_priv->iLVDS_enable) 343 if (dev_priv->iLVDS_enable)
342 PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF); 344 PSB_WVDC32(p->conf, PIPEACONF);
343 345
344 /* Set up the plane*/ 346 /* Set up the plane*/
345 PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF); 347 PSB_WVDC32(p->linoff, DSPALINOFF);
346 PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE); 348 PSB_WVDC32(p->stride, DSPASTRIDE);
347 PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF); 349 PSB_WVDC32(p->tileoff, DSPATILEOFF);
348 350
349 /* Enable the plane */ 351 /* Enable the plane */
350 PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR); 352 PSB_WVDC32(p->cntr, DSPACNTR);
351 PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF); 353 PSB_WVDC32(p->surf, DSPASURF);
352 354
353 /* Enable Cursor A */ 355 /* Enable Cursor A */
354 PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR); 356 PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
357 359
358 /* Restore palette (gamma) */ 360 /* Restore palette (gamma) */
359 for (i = 0; i < 256; i++) 361 for (i = 0; i < 256; i++)
360 PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2)); 362 PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
361 363
362 if (dev_priv->hdmi_priv) 364 if (dev_priv->hdmi_priv)
363 oaktrail_hdmi_restore(dev); 365 oaktrail_hdmi_restore(dev);
@@ -454,31 +456,84 @@ static int oaktrail_power_up(struct drm_device *dev)
454 return 0; 456 return 0;
455} 457}
456 458
459/* Oaktrail */
460static const struct psb_offset oaktrail_regmap[2] = {
461 {
462 .fp0 = MRST_FPA0,
463 .fp1 = MRST_FPA1,
464 .cntr = DSPACNTR,
465 .conf = PIPEACONF,
466 .src = PIPEASRC,
467 .dpll = MRST_DPLL_A,
468 .htotal = HTOTAL_A,
469 .hblank = HBLANK_A,
470 .hsync = HSYNC_A,
471 .vtotal = VTOTAL_A,
472 .vblank = VBLANK_A,
473 .vsync = VSYNC_A,
474 .stride = DSPASTRIDE,
475 .size = DSPASIZE,
476 .pos = DSPAPOS,
477 .surf = DSPASURF,
478 .addr = MRST_DSPABASE,
479 .status = PIPEASTAT,
480 .linoff = DSPALINOFF,
481 .tileoff = DSPATILEOFF,
482 .palette = PALETTE_A,
483 },
484 {
485 .fp0 = FPB0,
486 .fp1 = FPB1,
487 .cntr = DSPBCNTR,
488 .conf = PIPEBCONF,
489 .src = PIPEBSRC,
490 .dpll = DPLL_B,
491 .htotal = HTOTAL_B,
492 .hblank = HBLANK_B,
493 .hsync = HSYNC_B,
494 .vtotal = VTOTAL_B,
495 .vblank = VBLANK_B,
496 .vsync = VSYNC_B,
497 .stride = DSPBSTRIDE,
498 .size = DSPBSIZE,
499 .pos = DSPBPOS,
500 .surf = DSPBSURF,
501 .addr = DSPBBASE,
502 .status = PIPEBSTAT,
503 .linoff = DSPBLINOFF,
504 .tileoff = DSPBTILEOFF,
505 .palette = PALETTE_B,
506 },
507};
457 508
458static int oaktrail_chip_setup(struct drm_device *dev) 509static int oaktrail_chip_setup(struct drm_device *dev)
459{ 510{
460 struct drm_psb_private *dev_priv = dev->dev_private; 511 struct drm_psb_private *dev_priv = dev->dev_private;
461 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
462 int ret; 512 int ret;
463 513
514 if (pci_enable_msi(dev->pdev))
515 dev_warn(dev->dev, "Enabling MSI failed!\n");
516
517 dev_priv->regmap = oaktrail_regmap;
518
464 ret = mid_chip_setup(dev); 519 ret = mid_chip_setup(dev);
465 if (ret < 0) 520 if (ret < 0)
466 return ret; 521 return ret;
467 if (vbt->size == 0) { 522 if (!dev_priv->has_gct) {
468 /* Now pull the BIOS data */ 523 /* Now pull the BIOS data */
469 gma_intel_opregion_init(dev); 524 psb_intel_opregion_init(dev);
470 psb_intel_init_bios(dev); 525 psb_intel_init_bios(dev);
471 } 526 }
527 oaktrail_hdmi_setup(dev);
472 return 0; 528 return 0;
473} 529}
474 530
475static void oaktrail_teardown(struct drm_device *dev) 531static void oaktrail_teardown(struct drm_device *dev)
476{ 532{
477 struct drm_psb_private *dev_priv = dev->dev_private; 533 struct drm_psb_private *dev_priv = dev->dev_private;
478 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
479 534
480 oaktrail_hdmi_teardown(dev); 535 oaktrail_hdmi_teardown(dev);
481 if (vbt->size == 0) 536 if (!dev_priv->has_gct)
482 psb_intel_destroy_bios(dev); 537 psb_intel_destroy_bios(dev);
483} 538}
484 539
@@ -487,6 +542,9 @@ const struct psb_ops oaktrail_chip_ops = {
487 .accel_2d = 1, 542 .accel_2d = 1,
488 .pipes = 2, 543 .pipes = 2,
489 .crtcs = 2, 544 .crtcs = 2,
545 .hdmi_mask = (1 << 0),
546 .lvds_mask = (1 << 0),
547 .cursor_needs_phys = 0,
490 .sgx_offset = MRST_SGX_OFFSET, 548 .sgx_offset = MRST_SGX_OFFSET,
491 549
492 .chip_setup = oaktrail_chip_setup, 550 .chip_setup = oaktrail_chip_setup,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f8b367b45f66..c10899c953b9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -179,7 +179,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
179static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, 179static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
180 struct drm_display_mode *mode) 180 struct drm_display_mode *mode)
181{ 181{
182 struct drm_psb_private *dev_priv = connector->dev->dev_private;
183 if (mode->clock > 165000) 182 if (mode->clock > 165000)
184 return MODE_CLOCK_HIGH; 183 return MODE_CLOCK_HIGH;
185 if (mode->clock < 20000) 184 if (mode->clock < 20000)
@@ -188,11 +187,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
188 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 187 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
189 return MODE_NO_DBLESCAN; 188 return MODE_NO_DBLESCAN;
190 189
191 /* We assume worst case scenario of 32 bpp here, since we don't know */
192 if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
193 dev_priv->vram_stolen_size)
194 return MODE_MEM;
195
196 return MODE_OK; 190 return MODE_OK;
197} 191}
198 192
@@ -440,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
440 struct drm_psb_private *dev_priv = dev->dev_private; 434 struct drm_psb_private *dev_priv = dev->dev_private;
441 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; 435 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
442 struct psb_state *regs = &dev_priv->regs.psb; 436 struct psb_state *regs = &dev_priv->regs.psb;
437 struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
443 int i; 438 int i;
444 439
445 /* dpll */ 440 /* dpll */
@@ -450,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
450 hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE); 445 hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
451 446
452 /* pipe B */ 447 /* pipe B */
453 regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF); 448 pipeb->conf = PSB_RVDC32(PIPEBCONF);
454 regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC); 449 pipeb->src = PSB_RVDC32(PIPEBSRC);
455 regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B); 450 pipeb->htotal = PSB_RVDC32(HTOTAL_B);
456 regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B); 451 pipeb->hblank = PSB_RVDC32(HBLANK_B);
457 regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B); 452 pipeb->hsync = PSB_RVDC32(HSYNC_B);
458 regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B); 453 pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
459 regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B); 454 pipeb->vblank = PSB_RVDC32(VBLANK_B);
460 regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B); 455 pipeb->vsync = PSB_RVDC32(VSYNC_B);
461 456
462 hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF); 457 hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
463 hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC); 458 hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -469,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
469 hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B); 464 hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
470 465
471 /* plane */ 466 /* plane */
472 regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR); 467 pipeb->cntr = PSB_RVDC32(DSPBCNTR);
473 regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE); 468 pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
474 regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE); 469 pipeb->addr = PSB_RVDC32(DSPBBASE);
475 regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF); 470 pipeb->surf = PSB_RVDC32(DSPBSURF);
476 regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF); 471 pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
477 regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF); 472 pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
478 473
479 /* cursor B */ 474 /* cursor B */
480 regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR); 475 regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -483,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
483 478
484 /* save palette */ 479 /* save palette */
485 for (i = 0; i < 256; i++) 480 for (i = 0; i < 256; i++)
486 regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2)); 481 pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
487} 482}
488 483
489/* restore HDMI register state */ 484/* restore HDMI register state */
@@ -492,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
492 struct drm_psb_private *dev_priv = dev->dev_private; 487 struct drm_psb_private *dev_priv = dev->dev_private;
493 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; 488 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
494 struct psb_state *regs = &dev_priv->regs.psb; 489 struct psb_state *regs = &dev_priv->regs.psb;
490 struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
495 int i; 491 int i;
496 492
497 /* dpll */ 493 /* dpll */
@@ -503,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
503 DRM_UDELAY(150); 499 DRM_UDELAY(150);
504 500
505 /* pipe */ 501 /* pipe */
506 PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC); 502 PSB_WVDC32(pipeb->src, PIPEBSRC);
507 PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B); 503 PSB_WVDC32(pipeb->htotal, HTOTAL_B);
508 PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B); 504 PSB_WVDC32(pipeb->hblank, HBLANK_B);
509 PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B); 505 PSB_WVDC32(pipeb->hsync, HSYNC_B);
510 PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B); 506 PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
511 PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B); 507 PSB_WVDC32(pipeb->vblank, VBLANK_B);
512 PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B); 508 PSB_WVDC32(pipeb->vsync, VSYNC_B);
513 509
514 PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC); 510 PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
515 PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B); 511 PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -519,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
519 PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B); 515 PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
520 PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B); 516 PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
521 517
522 PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF); 518 PSB_WVDC32(pipeb->conf, PIPEBCONF);
523 PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF); 519 PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
524 520
525 /* plane */ 521 /* plane */
526 PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF); 522 PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
527 PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE); 523 PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
528 PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF); 524 PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
529 PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR); 525 PSB_WVDC32(pipeb->cntr, DSPBCNTR);
530 PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF); 526 PSB_WVDC32(pipeb->surf, DSPBSURF);
531 527
532 /* cursor B */ 528 /* cursor B */
533 PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR); 529 PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -536,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
536 532
537 /* restore palette */ 533 /* restore palette */
538 for (i = 0; i < 256; i++) 534 for (i = 0; i < 256; i++)
539 PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2)); 535 PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
540} 536}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 5e84fbde749b..88627e3ba1e3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -250,7 +250,7 @@ static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
250 */ 250 */
251static void oaktrail_hdmi_i2c_gpio_fix(void) 251static void oaktrail_hdmi_i2c_gpio_fix(void)
252{ 252{
253 void *base; 253 void __iomem *base;
254 unsigned int gpio_base = 0xff12c000; 254 unsigned int gpio_base = 0xff12c000;
255 int gpio_len = 0x1000; 255 int gpio_len = 0x1000;
256 u32 temp; 256 u32 temp;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 654f32b22b21..558c77fb55ec 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -257,7 +257,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
257 mode_dev->panel_fixed_mode = NULL; 257 mode_dev->panel_fixed_mode = NULL;
258 258
259 /* Use the firmware provided data on Moorestown */ 259 /* Use the firmware provided data on Moorestown */
260 if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/ 260 if (dev_priv->has_gct) {
261 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 261 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
262 if (!mode) 262 if (!mode)
263 return; 263 return;
@@ -371,7 +371,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
371 BRIGHTNESS_MAX_LEVEL); 371 BRIGHTNESS_MAX_LEVEL);
372 372
373 mode_dev->panel_wants_dither = false; 373 mode_dev->panel_wants_dither = false;
374 if (dev_priv->vbt_data.size != 0x00) 374 if (dev_priv->has_gct)
375 mode_dev->panel_wants_dither = (dev_priv->gct_data. 375 mode_dev->panel_wants_dither = (dev_priv->gct_data.
376 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE); 376 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
377 if (dev_priv->lvds_dither) 377 if (dev_priv->lvds_dither)
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 000000000000..4f186eca3a30
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,344 @@
1/*
2 * Copyright 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24#include <linux/acpi.h>
25#include <linux/acpi_io.h>
26#include "psb_drv.h"
27#include "psb_intel_reg.h"
28
29#define PCI_ASLE 0xe4
30#define PCI_ASLS 0xfc
31
32#define OPREGION_HEADER_OFFSET 0
33#define OPREGION_ACPI_OFFSET 0x100
34#define ACPI_CLID 0x01ac /* current lid state indicator */
35#define ACPI_CDCK 0x01b0 /* current docking state indicator */
36#define OPREGION_SWSCI_OFFSET 0x200
37#define OPREGION_ASLE_OFFSET 0x300
38#define OPREGION_VBT_OFFSET 0x400
39
40#define OPREGION_SIGNATURE "IntelGraphicsMem"
41#define MBOX_ACPI (1<<0)
42#define MBOX_SWSCI (1<<1)
43#define MBOX_ASLE (1<<2)
44
45struct opregion_header {
46 u8 signature[16];
47 u32 size;
48 u32 opregion_ver;
49 u8 bios_ver[32];
50 u8 vbios_ver[16];
51 u8 driver_ver[16];
52 u32 mboxes;
53 u8 reserved[164];
54} __packed;
55
56/* OpRegion mailbox #1: public ACPI methods */
57struct opregion_acpi {
58 u32 drdy; /* driver readiness */
59 u32 csts; /* notification status */
60 u32 cevt; /* current event */
61 u8 rsvd1[20];
62 u32 didl[8]; /* supported display devices ID list */
63 u32 cpdl[8]; /* currently presented display list */
64 u32 cadl[8]; /* currently active display list */
65 u32 nadl[8]; /* next active devices list */
66 u32 aslp; /* ASL sleep time-out */
67 u32 tidx; /* toggle table index */
68 u32 chpd; /* current hotplug enable indicator */
69 u32 clid; /* current lid state*/
70 u32 cdck; /* current docking state */
71 u32 sxsw; /* Sx state resume */
72 u32 evts; /* ASL supported events */
73 u32 cnot; /* current OS notification */
74 u32 nrdy; /* driver status */
75 u8 rsvd2[60];
76} __packed;
77
78/* OpRegion mailbox #2: SWSCI */
79struct opregion_swsci {
80 /*FIXME: add it later*/
81} __packed;
82
83/* OpRegion mailbox #3: ASLE */
84struct opregion_asle {
85 u32 ardy; /* driver readiness */
86 u32 aslc; /* ASLE interrupt command */
87 u32 tche; /* technology enabled indicator */
88 u32 alsi; /* current ALS illuminance reading */
89 u32 bclp; /* backlight brightness to set */
90 u32 pfit; /* panel fitting state */
91 u32 cblv; /* current brightness level */
92 u16 bclm[20]; /* backlight level duty cycle mapping table */
93 u32 cpfm; /* current panel fitting mode */
94 u32 epfm; /* enabled panel fitting modes */
95 u8 plut[74]; /* panel LUT and identifier */
96 u32 pfmb; /* PWM freq and min brightness */
97 u8 rsvd[102];
98} __packed;
99
100/* ASLE irq request bits */
101#define ASLE_SET_ALS_ILLUM (1 << 0)
102#define ASLE_SET_BACKLIGHT (1 << 1)
103#define ASLE_SET_PFIT (1 << 2)
104#define ASLE_SET_PWM_FREQ (1 << 3)
105#define ASLE_REQ_MSK 0xf
106
107/* response bits of ASLE irq request */
108#define ASLE_ALS_ILLUM_FAILED (1<<10)
109#define ASLE_BACKLIGHT_FAILED (1<<12)
110#define ASLE_PFIT_FAILED (1<<14)
111#define ASLE_PWM_FREQ_FAILED (1<<16)
112
113/* ASLE backlight brightness to set */
114#define ASLE_BCLP_VALID (1<<31)
115#define ASLE_BCLP_MSK (~(1<<31))
116
117/* ASLE panel fitting request */
118#define ASLE_PFIT_VALID (1<<31)
119#define ASLE_PFIT_CENTER (1<<0)
120#define ASLE_PFIT_STRETCH_TEXT (1<<1)
121#define ASLE_PFIT_STRETCH_GFX (1<<2)
122
123/* response bits of ASLE irq request */
124#define ASLE_ALS_ILLUM_FAILED (1<<10)
125#define ASLE_BACKLIGHT_FAILED (1<<12)
126#define ASLE_PFIT_FAILED (1<<14)
127#define ASLE_PWM_FREQ_FAILED (1<<16)
128
129/* ASLE backlight brightness to set */
130#define ASLE_BCLP_VALID (1<<31)
131#define ASLE_BCLP_MSK (~(1<<31))
132
133/* ASLE panel fitting request */
134#define ASLE_PFIT_VALID (1<<31)
135#define ASLE_PFIT_CENTER (1<<0)
136#define ASLE_PFIT_STRETCH_TEXT (1<<1)
137#define ASLE_PFIT_STRETCH_GFX (1<<2)
138
139/* PWM frequency and minimum brightness */
140#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
141#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
142#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
143#define ASLE_PFMB_PWM_VALID (1<<31)
144
145#define ASLE_CBLV_VALID (1<<31)
146
147static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
148{
149 struct drm_psb_private *dev_priv = dev->dev_private;
150 struct opregion_asle *asle = dev_priv->opregion.asle;
151 struct backlight_device *bd = dev_priv->backlight_device;
152
153 DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
154
155 if (!(bclp & ASLE_BCLP_VALID))
156 return ASLE_BACKLIGHT_FAILED;
157
158 if (bd == NULL)
159 return ASLE_BACKLIGHT_FAILED;
160
161 bclp &= ASLE_BCLP_MSK;
162 if (bclp > 255)
163 return ASLE_BACKLIGHT_FAILED;
164
165 if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
166 int max = bd->props.max_brightness;
167 bd->props.brightness = bclp * max / 255;
168 backlight_update_status(bd);
169 }
170
171 asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
172
173 return 0;
174}
175
176void psb_intel_opregion_asle_intr(struct drm_device *dev)
177{
178 struct drm_psb_private *dev_priv = dev->dev_private;
179 struct opregion_asle *asle = dev_priv->opregion.asle;
180 u32 asle_stat = 0;
181 u32 asle_req;
182
183 if (!asle)
184 return;
185
186 asle_req = asle->aslc & ASLE_REQ_MSK;
187 if (!asle_req) {
188 DRM_DEBUG_DRIVER("non asle set request??\n");
189 return;
190 }
191
192 if (asle_req & ASLE_SET_BACKLIGHT)
193 asle_stat |= asle_set_backlight(dev, asle->bclp);
194
195 asle->aslc = asle_stat;
196}
197
198#define ASLE_ALS_EN (1<<0)
199#define ASLE_BLC_EN (1<<1)
200#define ASLE_PFIT_EN (1<<2)
201#define ASLE_PFMB_EN (1<<3)
202
203void psb_intel_opregion_enable_asle(struct drm_device *dev)
204{
205 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct opregion_asle *asle = dev_priv->opregion.asle;
207
208 if (asle) {
209 /* Don't do this on Medfield or other non PC like devices, they
210 use the bit for something different altogether */
211 psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
212 psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
213
214 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
215 | ASLE_PFMB_EN;
216 asle->ardy = 1;
217 }
218}
219
220#define ACPI_EV_DISPLAY_SWITCH (1<<0)
221#define ACPI_EV_LID (1<<1)
222#define ACPI_EV_DOCK (1<<2)
223
224static struct psb_intel_opregion *system_opregion;
225
226static int psb_intel_opregion_video_event(struct notifier_block *nb,
227 unsigned long val, void *data)
228{
229 /* The only video events relevant to opregion are 0x80. These indicate
230 either a docking event, lid switch or display switch request. In
231 Linux, these are handled by the dock, button and video drivers.
232 We might want to fix the video driver to be opregion-aware in
233 future, but right now we just indicate to the firmware that the
234 request has been handled */
235
236 struct opregion_acpi *acpi;
237
238 if (!system_opregion)
239 return NOTIFY_DONE;
240
241 acpi = system_opregion->acpi;
242 acpi->csts = 0;
243
244 return NOTIFY_OK;
245}
246
247static struct notifier_block psb_intel_opregion_notifier = {
248 .notifier_call = psb_intel_opregion_video_event,
249};
250
251void psb_intel_opregion_init(struct drm_device *dev)
252{
253 struct drm_psb_private *dev_priv = dev->dev_private;
254 struct psb_intel_opregion *opregion = &dev_priv->opregion;
255
256 if (!opregion->header)
257 return;
258
259 if (opregion->acpi) {
260 /* Notify BIOS we are ready to handle ACPI video ext notifs.
261 * Right now, all the events are handled by the ACPI video
262 * module. We don't actually need to do anything with them. */
263 opregion->acpi->csts = 0;
264 opregion->acpi->drdy = 1;
265
266 system_opregion = opregion;
267 register_acpi_notifier(&psb_intel_opregion_notifier);
268 }
269
270 if (opregion->asle)
271 psb_intel_opregion_enable_asle(dev);
272}
273
274void psb_intel_opregion_fini(struct drm_device *dev)
275{
276 struct drm_psb_private *dev_priv = dev->dev_private;
277 struct psb_intel_opregion *opregion = &dev_priv->opregion;
278
279 if (!opregion->header)
280 return;
281
282 if (opregion->acpi) {
283 opregion->acpi->drdy = 0;
284
285 system_opregion = NULL;
286 unregister_acpi_notifier(&psb_intel_opregion_notifier);
287 }
288
289 /* just clear all opregion memory pointers now */
290 iounmap(opregion->header);
291 opregion->header = NULL;
292 opregion->acpi = NULL;
293 opregion->swsci = NULL;
294 opregion->asle = NULL;
295 opregion->vbt = NULL;
296}
297
298int psb_intel_opregion_setup(struct drm_device *dev)
299{
300 struct drm_psb_private *dev_priv = dev->dev_private;
301 struct psb_intel_opregion *opregion = &dev_priv->opregion;
302 u32 opregion_phy, mboxes;
303 void __iomem *base;
304 int err = 0;
305
306 pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
307 if (opregion_phy == 0) {
308 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
309 return -ENOTSUPP;
310 }
311 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
312 base = acpi_os_ioremap(opregion_phy, 8*1024);
313 if (!base)
314 return -ENOMEM;
315
316 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
317 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
318 err = -EINVAL;
319 goto err_out;
320 }
321
322 opregion->header = base;
323 opregion->vbt = base + OPREGION_VBT_OFFSET;
324
325 opregion->lid_state = base + ACPI_CLID;
326
327 mboxes = opregion->header->mboxes;
328 if (mboxes & MBOX_ACPI) {
329 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
330 opregion->acpi = base + OPREGION_ACPI_OFFSET;
331 }
332
333 if (mboxes & MBOX_ASLE) {
334 DRM_DEBUG_DRIVER("ASLE supported\n");
335 opregion->asle = base + OPREGION_ASLE_OFFSET;
336 }
337
338 return 0;
339
340err_out:
341 iounmap(base);
342 return err;
343}
344
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/opregion.h
index d946bc1b17bf..72dc6b921265 100644
--- a/drivers/gpu/drm/gma500/intel_opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Intel Corporation 2 * Copyright 2012 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,62 +20,30 @@
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 * 22 *
23 * FIXME: resolve with the i915 version
24 */ 23 */
25 24
26#include "psb_drv.h" 25#if defined(CONFIG_ACPI)
26extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
27extern void psb_intel_opregion_init(struct drm_device *dev);
28extern void psb_intel_opregion_fini(struct drm_device *dev);
29extern int psb_intel_opregion_setup(struct drm_device *dev);
27 30
28struct opregion_header { 31#else
29 u8 signature[16];
30 u32 size;
31 u32 opregion_ver;
32 u8 bios_ver[32];
33 u8 vbios_ver[16];
34 u8 driver_ver[16];
35 u32 mboxes;
36 u8 reserved[164];
37} __packed;
38 32
39struct opregion_apci { 33extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
40 /*FIXME: add it later*/
41} __packed;
42
43struct opregion_swsci {
44 /*FIXME: add it later*/
45} __packed;
46
47struct opregion_acpi {
48 /*FIXME: add it later*/
49} __packed;
50
51int gma_intel_opregion_init(struct drm_device *dev)
52{ 34{
53 struct drm_psb_private *dev_priv = dev->dev_private; 35}
54 u32 opregion_phy;
55 void *base;
56 u32 *lid_state;
57
58 dev_priv->lid_state = NULL;
59
60 pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
61 if (opregion_phy == 0)
62 return -ENOTSUPP;
63
64 base = ioremap(opregion_phy, 8*1024);
65 if (!base)
66 return -ENOMEM;
67 36
68 lid_state = base + 0x01ac; 37extern inline void psb_intel_opregion_init(struct drm_device *dev)
38{
39}
69 40
70 dev_priv->lid_state = lid_state; 41extern inline void psb_intel_opregion_fini(struct drm_device *dev)
71 dev_priv->lid_last_state = readl(lid_state); 42{
72 return 0;
73} 43}
74 44
75int gma_intel_opregion_exit(struct drm_device *dev) 45extern inline int psb_intel_opregion_setup(struct drm_device *dev)
76{ 46{
77 struct drm_psb_private *dev_priv = dev->dev_private;
78 if (dev_priv->lid_state)
79 iounmap(dev_priv->lid_state);
80 return 0; 47 return 0;
81} 48}
49#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 95d163e4f1f4..eff039bf92d4 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -197,7 +197,8 @@ static int psb_save_display_registers(struct drm_device *dev)
197 } 197 }
198 198
199 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 199 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
200 connector->funcs->save(connector); 200 if (connector->funcs->save)
201 connector->funcs->save(connector);
201 202
202 mutex_unlock(&dev->mode_config.mutex); 203 mutex_unlock(&dev->mode_config.mutex);
203 return 0; 204 return 0;
@@ -235,7 +236,8 @@ static int psb_restore_display_registers(struct drm_device *dev)
235 crtc->funcs->restore(crtc); 236 crtc->funcs->restore(crtc);
236 237
237 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 238 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
238 connector->funcs->restore(connector); 239 if (connector->funcs->restore)
240 connector->funcs->restore(connector);
239 241
240 mutex_unlock(&dev->mode_config.mutex); 242 mutex_unlock(&dev->mode_config.mutex);
241 return 0; 243 return 0;
@@ -289,17 +291,80 @@ static void psb_get_core_freq(struct drm_device *dev)
289 } 291 }
290} 292}
291 293
294/* Poulsbo */
295static const struct psb_offset psb_regmap[2] = {
296 {
297 .fp0 = FPA0,
298 .fp1 = FPA1,
299 .cntr = DSPACNTR,
300 .conf = PIPEACONF,
301 .src = PIPEASRC,
302 .dpll = DPLL_A,
303 .htotal = HTOTAL_A,
304 .hblank = HBLANK_A,
305 .hsync = HSYNC_A,
306 .vtotal = VTOTAL_A,
307 .vblank = VBLANK_A,
308 .vsync = VSYNC_A,
309 .stride = DSPASTRIDE,
310 .size = DSPASIZE,
311 .pos = DSPAPOS,
312 .base = DSPABASE,
313 .surf = DSPASURF,
314 .addr = DSPABASE,
315 .status = PIPEASTAT,
316 .linoff = DSPALINOFF,
317 .tileoff = DSPATILEOFF,
318 .palette = PALETTE_A,
319 },
320 {
321 .fp0 = FPB0,
322 .fp1 = FPB1,
323 .cntr = DSPBCNTR,
324 .conf = PIPEBCONF,
325 .src = PIPEBSRC,
326 .dpll = DPLL_B,
327 .htotal = HTOTAL_B,
328 .hblank = HBLANK_B,
329 .hsync = HSYNC_B,
330 .vtotal = VTOTAL_B,
331 .vblank = VBLANK_B,
332 .vsync = VSYNC_B,
333 .stride = DSPBSTRIDE,
334 .size = DSPBSIZE,
335 .pos = DSPBPOS,
336 .base = DSPBBASE,
337 .surf = DSPBSURF,
338 .addr = DSPBBASE,
339 .status = PIPEBSTAT,
340 .linoff = DSPBLINOFF,
341 .tileoff = DSPBTILEOFF,
342 .palette = PALETTE_B,
343 }
344};
345
292static int psb_chip_setup(struct drm_device *dev) 346static int psb_chip_setup(struct drm_device *dev)
293{ 347{
348 struct drm_psb_private *dev_priv = dev->dev_private;
349 dev_priv->regmap = psb_regmap;
294 psb_get_core_freq(dev); 350 psb_get_core_freq(dev);
295 gma_intel_setup_gmbus(dev); 351 gma_intel_setup_gmbus(dev);
296 gma_intel_opregion_init(dev); 352 psb_intel_opregion_init(dev);
297 psb_intel_init_bios(dev); 353 psb_intel_init_bios(dev);
298 return 0; 354 return 0;
299} 355}
300 356
357/* Not exactly an erratum more an irritation */
358static void psb_chip_errata(struct drm_device *dev)
359{
360 struct drm_psb_private *dev_priv = dev->dev_private;
361 psb_lid_timer_init(dev_priv);
362}
363
301static void psb_chip_teardown(struct drm_device *dev) 364static void psb_chip_teardown(struct drm_device *dev)
302{ 365{
366 struct drm_psb_private *dev_priv = dev->dev_private;
367 psb_lid_timer_takedown(dev_priv);
303 gma_intel_teardown_gmbus(dev); 368 gma_intel_teardown_gmbus(dev);
304} 369}
305 370
@@ -308,9 +373,13 @@ const struct psb_ops psb_chip_ops = {
308 .accel_2d = 1, 373 .accel_2d = 1,
309 .pipes = 2, 374 .pipes = 2,
310 .crtcs = 2, 375 .crtcs = 2,
376 .hdmi_mask = (1 << 0),
377 .lvds_mask = (1 << 1),
378 .cursor_needs_phys = 1,
311 .sgx_offset = PSB_SGX_OFFSET, 379 .sgx_offset = PSB_SGX_OFFSET,
312 .chip_setup = psb_chip_setup, 380 .chip_setup = psb_chip_setup,
313 .chip_teardown = psb_chip_teardown, 381 .chip_teardown = psb_chip_teardown,
382 .errata = psb_chip_errata,
314 383
315 .crtc_helper = &psb_intel_helper_funcs, 384 .crtc_helper = &psb_intel_helper_funcs,
316 .crtc_funcs = &psb_intel_crtc_funcs, 385 .crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c34adf9d910a..caba6e08693c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
79 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 79 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
80 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 80 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
81 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 81 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
82 { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
83 { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
84 { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
85 { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
86 { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
87 { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
88 { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
89 { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
82#endif 90#endif
83 { 0, } 91 { 0, }
84}; 92};
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
144 return; 152 return;
145} 153}
146 154
147static void psb_do_takedown(struct drm_device *dev)
148{
149}
150
151static int psb_do_init(struct drm_device *dev) 155static int psb_do_init(struct drm_device *dev)
152{ 156{
153 struct drm_psb_private *dev_priv = dev->dev_private; 157 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
172 dev_priv->gatt_free_offset = pg->mmu_gatt_start + 176 dev_priv->gatt_free_offset = pg->mmu_gatt_start +
173 (stolen_gtt << PAGE_SHIFT) * 1024; 177 (stolen_gtt << PAGE_SHIFT) * 1024;
174 178
175 if (1 || drm_debug) {
176 uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
177 uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
178 DRM_INFO("SGX core id = 0x%08x\n", core_id);
179 DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
180 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
181 _PSB_CC_REVISION_MAJOR_SHIFT,
182 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
183 _PSB_CC_REVISION_MINOR_SHIFT);
184 DRM_INFO
185 ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
186 (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
187 _PSB_CC_REVISION_MAINTENANCE_SHIFT,
188 (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
189 _PSB_CC_REVISION_DESIGNER_SHIFT);
190 }
191
192
193 spin_lock_init(&dev_priv->irqmask_lock); 179 spin_lock_init(&dev_priv->irqmask_lock);
194 spin_lock_init(&dev_priv->lock_2d); 180 spin_lock_init(&dev_priv->lock_2d);
195 181
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
204 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); 190 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
205 return 0; 191 return 0;
206out_err: 192out_err:
207 psb_do_takedown(dev);
208 return ret; 193 return ret;
209} 194}
210 195
@@ -214,18 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
214 199
215 /* Kill vblank etc here */ 200 /* Kill vblank etc here */
216 201
217 gma_backlight_exit(dev);
218
219 psb_modeset_cleanup(dev);
220 202
221 if (dev_priv) { 203 if (dev_priv) {
222 psb_lid_timer_takedown(dev_priv); 204 if (dev_priv->backlight_device)
223 gma_intel_opregion_exit(dev); 205 gma_backlight_exit(dev);
206 psb_modeset_cleanup(dev);
224 207
225 if (dev_priv->ops->chip_teardown) 208 if (dev_priv->ops->chip_teardown)
226 dev_priv->ops->chip_teardown(dev); 209 dev_priv->ops->chip_teardown(dev);
227 psb_do_takedown(dev);
228 210
211 psb_intel_opregion_fini(dev);
229 212
230 if (dev_priv->pf_pd) { 213 if (dev_priv->pf_pd) {
231 psb_mmu_free_pagedir(dev_priv->pf_pd); 214 psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -246,6 +229,7 @@ static int psb_driver_unload(struct drm_device *dev)
246 } 229 }
247 psb_gtt_takedown(dev); 230 psb_gtt_takedown(dev);
248 if (dev_priv->scratch_page) { 231 if (dev_priv->scratch_page) {
232 set_pages_wb(dev_priv->scratch_page, 1);
249 __free_page(dev_priv->scratch_page); 233 __free_page(dev_priv->scratch_page);
250 dev_priv->scratch_page = NULL; 234 dev_priv->scratch_page = NULL;
251 } 235 }
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
258 dev_priv->sgx_reg = NULL; 242 dev_priv->sgx_reg = NULL;
259 } 243 }
260 244
245 /* Destroy VBT data */
246 psb_intel_destroy_bios(dev);
247
261 kfree(dev_priv); 248 kfree(dev_priv);
262 dev->dev_private = NULL; 249 dev->dev_private = NULL;
263
264 /*destroy VBT data*/
265 psb_intel_destroy_bios(dev);
266 } 250 }
267
268 gma_power_uninit(dev); 251 gma_power_uninit(dev);
269
270 return 0; 252 return 0;
271} 253}
272 254
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
290 272
291 pci_set_master(dev->pdev); 273 pci_set_master(dev->pdev);
292 274
293 if (!IS_PSB(dev)) {
294 if (pci_enable_msi(dev->pdev))
295 dev_warn(dev->dev, "Enabling MSI failed!\n");
296 }
297
298 dev_priv->num_pipe = dev_priv->ops->pipes; 275 dev_priv->num_pipe = dev_priv->ops->pipes;
299 276
300 resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE); 277 resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -309,6 +286,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
309 if (!dev_priv->sgx_reg) 286 if (!dev_priv->sgx_reg)
310 goto out_err; 287 goto out_err;
311 288
289 psb_intel_opregion_setup(dev);
290
312 ret = dev_priv->ops->chip_setup(dev); 291 ret = dev_priv->ops->chip_setup(dev);
313 if (ret) 292 if (ret)
314 goto out_err; 293 goto out_err;
@@ -348,10 +327,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
348 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); 327 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
349 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); 328 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
350 329
351/* igd_opregion_init(&dev_priv->opregion_dev); */
352 acpi_video_register(); 330 acpi_video_register();
353 if (dev_priv->lid_state)
354 psb_lid_timer_init(dev_priv);
355 331
356 ret = drm_vblank_init(dev, dev_priv->num_pipe); 332 ret = drm_vblank_init(dev, dev_priv->num_pipe);
357 if (ret) 333 if (ret)
@@ -370,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
370 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); 346 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
371 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); 347 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
372 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 348 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
373 if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) 349
374 drm_irq_install(dev); 350 drm_irq_install(dev);
375 351
376 dev->vblank_disable_allowed = 1; 352 dev->vblank_disable_allowed = 1;
377 353
@@ -619,7 +595,7 @@ static const struct dev_pm_ops psb_pm_ops = {
619 .runtime_idle = psb_runtime_idle, 595 .runtime_idle = psb_runtime_idle,
620}; 596};
621 597
622static struct vm_operations_struct psb_gem_vm_ops = { 598static const struct vm_operations_struct psb_gem_vm_ops = {
623 .fault = psb_gem_fault, 599 .fault = psb_gem_fault,
624 .open = drm_gem_vm_open, 600 .open = drm_gem_vm_open,
625 .close = drm_gem_vm_close, 601 .close = drm_gem_vm_close,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 40ce2c9bc2e4..1bd115ecefe1 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -30,6 +30,7 @@
30#include "psb_intel_drv.h" 30#include "psb_intel_drv.h"
31#include "gtt.h" 31#include "gtt.h"
32#include "power.h" 32#include "power.h"
33#include "opregion.h"
33#include "oaktrail.h" 34#include "oaktrail.h"
34 35
35/* Append new drm mode definition here, align with libdrm definition */ 36/* Append new drm mode definition here, align with libdrm definition */
@@ -120,6 +121,7 @@ enum {
120#define PSB_HWSTAM 0x2098 121#define PSB_HWSTAM 0x2098
121#define PSB_INSTPM 0x20C0 122#define PSB_INSTPM 0x20C0
122#define PSB_INT_IDENTITY_R 0x20A4 123#define PSB_INT_IDENTITY_R 0x20A4
124#define _PSB_IRQ_ASLE (1<<0)
123#define _MDFLD_PIPEC_EVENT_FLAG (1<<2) 125#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
124#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3) 126#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
125#define _PSB_DPST_PIPEB_FLAG (1<<4) 127#define _PSB_DPST_PIPEB_FLAG (1<<4)
@@ -130,6 +132,7 @@ enum {
130#define _PSB_VSYNC_PIPEA_FLAG (1<<7) 132#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
131#define _MDFLD_MIPIA_FLAG (1<<16) 133#define _MDFLD_MIPIA_FLAG (1<<16)
132#define _MDFLD_MIPIC_FLAG (1<<17) 134#define _MDFLD_MIPIC_FLAG (1<<17)
135#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
133#define _PSB_IRQ_SGX_FLAG (1<<18) 136#define _PSB_IRQ_SGX_FLAG (1<<18)
134#define _PSB_IRQ_MSVDX_FLAG (1<<19) 137#define _PSB_IRQ_MSVDX_FLAG (1<<19)
135#define _LNC_IRQ_TOPAZ_FLAG (1<<20) 138#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
@@ -257,7 +260,8 @@ struct psb_intel_opregion {
257 struct opregion_acpi *acpi; 260 struct opregion_acpi *acpi;
258 struct opregion_swsci *swsci; 261 struct opregion_swsci *swsci;
259 struct opregion_asle *asle; 262 struct opregion_asle *asle;
260 int enabled; 263 void *vbt;
264 u32 __iomem *lid_state;
261}; 265};
262 266
263struct sdvo_device_mapping { 267struct sdvo_device_mapping {
@@ -277,50 +281,72 @@ struct intel_gmbus {
277}; 281};
278 282
279/* 283/*
284 * Register offset maps
285 */
286
287struct psb_offset {
288 u32 fp0;
289 u32 fp1;
290 u32 cntr;
291 u32 conf;
292 u32 src;
293 u32 dpll;
294 u32 dpll_md;
295 u32 htotal;
296 u32 hblank;
297 u32 hsync;
298 u32 vtotal;
299 u32 vblank;
300 u32 vsync;
301 u32 stride;
302 u32 size;
303 u32 pos;
304 u32 surf;
305 u32 addr;
306 u32 base;
307 u32 status;
308 u32 linoff;
309 u32 tileoff;
310 u32 palette;
311};
312
313/*
280 * Register save state. This is used to hold the context when the 314 * Register save state. This is used to hold the context when the
281 * device is powered off. In the case of Oaktrail this can (but does not 315 * device is powered off. In the case of Oaktrail this can (but does not
282 * yet) include screen blank. Operations occuring during the save 316 * yet) include screen blank. Operations occuring during the save
283 * update the register cache instead. 317 * update the register cache instead.
284 */ 318 */
319
320/*
321 * Common status for pipes.
322 */
323struct psb_pipe {
324 u32 fp0;
325 u32 fp1;
326 u32 cntr;
327 u32 conf;
328 u32 src;
329 u32 dpll;
330 u32 dpll_md;
331 u32 htotal;
332 u32 hblank;
333 u32 hsync;
334 u32 vtotal;
335 u32 vblank;
336 u32 vsync;
337 u32 stride;
338 u32 size;
339 u32 pos;
340 u32 base;
341 u32 surf;
342 u32 addr;
343 u32 status;
344 u32 linoff;
345 u32 tileoff;
346 u32 palette[256];
347};
348
285struct psb_state { 349struct psb_state {
286 uint32_t saveDSPACNTR;
287 uint32_t saveDSPBCNTR;
288 uint32_t savePIPEACONF;
289 uint32_t savePIPEBCONF;
290 uint32_t savePIPEASRC;
291 uint32_t savePIPEBSRC;
292 uint32_t saveFPA0;
293 uint32_t saveFPA1;
294 uint32_t saveDPLL_A;
295 uint32_t saveDPLL_A_MD;
296 uint32_t saveHTOTAL_A;
297 uint32_t saveHBLANK_A;
298 uint32_t saveHSYNC_A;
299 uint32_t saveVTOTAL_A;
300 uint32_t saveVBLANK_A;
301 uint32_t saveVSYNC_A;
302 uint32_t saveDSPASTRIDE;
303 uint32_t saveDSPASIZE;
304 uint32_t saveDSPAPOS;
305 uint32_t saveDSPABASE;
306 uint32_t saveDSPASURF;
307 uint32_t saveDSPASTATUS;
308 uint32_t saveFPB0;
309 uint32_t saveFPB1;
310 uint32_t saveDPLL_B;
311 uint32_t saveDPLL_B_MD;
312 uint32_t saveHTOTAL_B;
313 uint32_t saveHBLANK_B;
314 uint32_t saveHSYNC_B;
315 uint32_t saveVTOTAL_B;
316 uint32_t saveVBLANK_B;
317 uint32_t saveVSYNC_B;
318 uint32_t saveDSPBSTRIDE;
319 uint32_t saveDSPBSIZE;
320 uint32_t saveDSPBPOS;
321 uint32_t saveDSPBBASE;
322 uint32_t saveDSPBSURF;
323 uint32_t saveDSPBSTATUS;
324 uint32_t saveVCLK_DIVISOR_VGA0; 350 uint32_t saveVCLK_DIVISOR_VGA0;
325 uint32_t saveVCLK_DIVISOR_VGA1; 351 uint32_t saveVCLK_DIVISOR_VGA1;
326 uint32_t saveVCLK_POST_DIV; 352 uint32_t saveVCLK_POST_DIV;
@@ -335,14 +361,8 @@ struct psb_state {
335 uint32_t savePP_CONTROL; 361 uint32_t savePP_CONTROL;
336 uint32_t savePP_CYCLE; 362 uint32_t savePP_CYCLE;
337 uint32_t savePFIT_CONTROL; 363 uint32_t savePFIT_CONTROL;
338 uint32_t savePaletteA[256];
339 uint32_t savePaletteB[256];
340 uint32_t saveCLOCKGATING; 364 uint32_t saveCLOCKGATING;
341 uint32_t saveDSPARB; 365 uint32_t saveDSPARB;
342 uint32_t saveDSPATILEOFF;
343 uint32_t saveDSPBTILEOFF;
344 uint32_t saveDSPAADDR;
345 uint32_t saveDSPBADDR;
346 uint32_t savePFIT_AUTO_RATIOS; 366 uint32_t savePFIT_AUTO_RATIOS;
347 uint32_t savePFIT_PGM_RATIOS; 367 uint32_t savePFIT_PGM_RATIOS;
348 uint32_t savePP_ON_DELAYS; 368 uint32_t savePP_ON_DELAYS;
@@ -350,8 +370,6 @@ struct psb_state {
350 uint32_t savePP_DIVISOR; 370 uint32_t savePP_DIVISOR;
351 uint32_t saveBCLRPAT_A; 371 uint32_t saveBCLRPAT_A;
352 uint32_t saveBCLRPAT_B; 372 uint32_t saveBCLRPAT_B;
353 uint32_t saveDSPALINOFF;
354 uint32_t saveDSPBLINOFF;
355 uint32_t savePERF_MODE; 373 uint32_t savePERF_MODE;
356 uint32_t saveDSPFW1; 374 uint32_t saveDSPFW1;
357 uint32_t saveDSPFW2; 375 uint32_t saveDSPFW2;
@@ -366,8 +384,6 @@ struct psb_state {
366 uint32_t saveDSPBCURSOR_BASE; 384 uint32_t saveDSPBCURSOR_BASE;
367 uint32_t saveDSPACURSOR_POS; 385 uint32_t saveDSPACURSOR_POS;
368 uint32_t saveDSPBCURSOR_POS; 386 uint32_t saveDSPBCURSOR_POS;
369 uint32_t save_palette_a[256];
370 uint32_t save_palette_b[256];
371 uint32_t saveOV_OVADD; 387 uint32_t saveOV_OVADD;
372 uint32_t saveOV_OGAMC0; 388 uint32_t saveOV_OGAMC0;
373 uint32_t saveOV_OGAMC1; 389 uint32_t saveOV_OGAMC1;
@@ -390,64 +406,7 @@ struct psb_state {
390}; 406};
391 407
392struct medfield_state { 408struct medfield_state {
393 uint32_t saveDPLL_A;
394 uint32_t saveFPA0;
395 uint32_t savePIPEACONF;
396 uint32_t saveHTOTAL_A;
397 uint32_t saveHBLANK_A;
398 uint32_t saveHSYNC_A;
399 uint32_t saveVTOTAL_A;
400 uint32_t saveVBLANK_A;
401 uint32_t saveVSYNC_A;
402 uint32_t savePIPEASRC;
403 uint32_t saveDSPASTRIDE;
404 uint32_t saveDSPALINOFF;
405 uint32_t saveDSPATILEOFF;
406 uint32_t saveDSPASIZE;
407 uint32_t saveDSPAPOS;
408 uint32_t saveDSPASURF;
409 uint32_t saveDSPACNTR;
410 uint32_t saveDSPASTATUS;
411 uint32_t save_palette_a[256];
412 uint32_t saveMIPI; 409 uint32_t saveMIPI;
413
414 uint32_t saveDPLL_B;
415 uint32_t saveFPB0;
416 uint32_t savePIPEBCONF;
417 uint32_t saveHTOTAL_B;
418 uint32_t saveHBLANK_B;
419 uint32_t saveHSYNC_B;
420 uint32_t saveVTOTAL_B;
421 uint32_t saveVBLANK_B;
422 uint32_t saveVSYNC_B;
423 uint32_t savePIPEBSRC;
424 uint32_t saveDSPBSTRIDE;
425 uint32_t saveDSPBLINOFF;
426 uint32_t saveDSPBTILEOFF;
427 uint32_t saveDSPBSIZE;
428 uint32_t saveDSPBPOS;
429 uint32_t saveDSPBSURF;
430 uint32_t saveDSPBCNTR;
431 uint32_t saveDSPBSTATUS;
432 uint32_t save_palette_b[256];
433
434 uint32_t savePIPECCONF;
435 uint32_t saveHTOTAL_C;
436 uint32_t saveHBLANK_C;
437 uint32_t saveHSYNC_C;
438 uint32_t saveVTOTAL_C;
439 uint32_t saveVBLANK_C;
440 uint32_t saveVSYNC_C;
441 uint32_t savePIPECSRC;
442 uint32_t saveDSPCSTRIDE;
443 uint32_t saveDSPCLINOFF;
444 uint32_t saveDSPCTILEOFF;
445 uint32_t saveDSPCSIZE;
446 uint32_t saveDSPCPOS;
447 uint32_t saveDSPCSURF;
448 uint32_t saveDSPCCNTR;
449 uint32_t saveDSPCSTATUS;
450 uint32_t save_palette_c[256];
451 uint32_t saveMIPI_C; 410 uint32_t saveMIPI_C;
452 411
453 uint32_t savePFIT_CONTROL; 412 uint32_t savePFIT_CONTROL;
@@ -476,6 +435,7 @@ struct cdv_state {
476}; 435};
477 436
478struct psb_save_area { 437struct psb_save_area {
438 struct psb_pipe pipe[3];
479 uint32_t saveBSM; 439 uint32_t saveBSM;
480 uint32_t saveVBT; 440 uint32_t saveVBT;
481 union { 441 union {
@@ -494,15 +454,19 @@ struct psb_ops;
494struct drm_psb_private { 454struct drm_psb_private {
495 struct drm_device *dev; 455 struct drm_device *dev;
496 const struct psb_ops *ops; 456 const struct psb_ops *ops;
457 const struct psb_offset *regmap;
458
459 struct child_device_config *child_dev;
460 int child_dev_num;
497 461
498 struct psb_gtt gtt; 462 struct psb_gtt gtt;
499 463
500 /* GTT Memory manager */ 464 /* GTT Memory manager */
501 struct psb_gtt_mm *gtt_mm; 465 struct psb_gtt_mm *gtt_mm;
502 struct page *scratch_page; 466 struct page *scratch_page;
503 u32 *gtt_map; 467 u32 __iomem *gtt_map;
504 uint32_t stolen_base; 468 uint32_t stolen_base;
505 void *vram_addr; 469 u8 __iomem *vram_addr;
506 unsigned long vram_stolen_size; 470 unsigned long vram_stolen_size;
507 int gtt_initialized; 471 int gtt_initialized;
508 u16 gmch_ctrl; /* Saved GTT setup */ 472 u16 gmch_ctrl; /* Saved GTT setup */
@@ -518,8 +482,8 @@ struct drm_psb_private {
518 * Register base 482 * Register base
519 */ 483 */
520 484
521 uint8_t *sgx_reg; 485 uint8_t __iomem *sgx_reg;
522 uint8_t *vdc_reg; 486 uint8_t __iomem *vdc_reg;
523 uint32_t gatt_free_offset; 487 uint32_t gatt_free_offset;
524 488
525 /* 489 /*
@@ -543,6 +507,7 @@ struct drm_psb_private {
543 * Modesetting 507 * Modesetting
544 */ 508 */
545 struct psb_intel_mode_device mode_dev; 509 struct psb_intel_mode_device mode_dev;
510 bool modeset; /* true if we have done the mode_device setup */
546 511
547 struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE]; 512 struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
548 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE]; 513 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -605,7 +570,7 @@ struct drm_psb_private {
605 int rpm_enabled; 570 int rpm_enabled;
606 571
607 /* MID specific */ 572 /* MID specific */
608 struct oaktrail_vbt vbt_data; 573 bool has_gct;
609 struct oaktrail_gct_data gct_data; 574 struct oaktrail_gct_data gct_data;
610 575
611 /* Oaktrail HDMI state */ 576 /* Oaktrail HDMI state */
@@ -621,6 +586,11 @@ struct drm_psb_private {
621 uint32_t msi_addr; 586 uint32_t msi_addr;
622 uint32_t msi_data; 587 uint32_t msi_data;
623 588
589 /*
590 * Hotplug handling
591 */
592
593 struct work_struct hotplug_work;
624 594
625 /* 595 /*
626 * LID-Switch 596 * LID-Switch
@@ -628,7 +598,6 @@ struct drm_psb_private {
628 spinlock_t lid_lock; 598 spinlock_t lid_lock;
629 struct timer_list lid_timer; 599 struct timer_list lid_timer;
630 struct psb_intel_opregion opregion; 600 struct psb_intel_opregion opregion;
631 u32 *lid_state;
632 u32 lid_last_state; 601 u32 lid_last_state;
633 602
634 /* 603 /*
@@ -669,6 +638,8 @@ struct drm_psb_private {
669 u32 dspcntr[3]; 638 u32 dspcntr[3];
670 639
671 int mdfld_panel_id; 640 int mdfld_panel_id;
641
642 bool dplla_96mhz; /* DPLL data from the VBT */
672}; 643};
673 644
674 645
@@ -682,6 +653,9 @@ struct psb_ops {
682 int pipes; /* Number of output pipes */ 653 int pipes; /* Number of output pipes */
683 int crtcs; /* Number of CRTCs */ 654 int crtcs; /* Number of CRTCs */
684 int sgx_offset; /* Base offset of SGX device */ 655 int sgx_offset; /* Base offset of SGX device */
656 int hdmi_mask; /* Mask of HDMI CRTCs */
657 int lvds_mask; /* Mask of LVDS CRTCs */
658 int cursor_needs_phys; /* If cursor base reg need physical address */
685 659
686 /* Sub functions */ 660 /* Sub functions */
687 struct drm_crtc_helper_funcs const *crtc_helper; 661 struct drm_crtc_helper_funcs const *crtc_helper;
@@ -690,9 +664,13 @@ struct psb_ops {
690 /* Setup hooks */ 664 /* Setup hooks */
691 int (*chip_setup)(struct drm_device *dev); 665 int (*chip_setup)(struct drm_device *dev);
692 void (*chip_teardown)(struct drm_device *dev); 666 void (*chip_teardown)(struct drm_device *dev);
667 /* Optional helper caller after modeset */
668 void (*errata)(struct drm_device *dev);
693 669
694 /* Display management hooks */ 670 /* Display management hooks */
695 int (*output_init)(struct drm_device *dev); 671 int (*output_init)(struct drm_device *dev);
672 int (*hotplug)(struct drm_device *dev);
673 void (*hotplug_enable)(struct drm_device *dev, bool on);
696 /* Power management hooks */ 674 /* Power management hooks */
697 void (*init_pm)(struct drm_device *dev); 675 void (*init_pm)(struct drm_device *dev);
698 int (*save_regs)(struct drm_device *dev); 676 int (*save_regs)(struct drm_device *dev);
@@ -789,12 +767,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
789extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); 767extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
790 768
791/* 769/*
792 * intel_opregion.c
793 */
794extern int gma_intel_opregion_init(struct drm_device *dev);
795extern int gma_intel_opregion_exit(struct drm_device *dev);
796
797/*
798 * framebuffer.c 770 * framebuffer.c
799 */ 771 */
800extern int psbfb_probed(struct drm_device *dev); 772extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 2616558457c8..36c3c99612f6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
337 int x, int y, struct drm_framebuffer *old_fb) 337 int x, int y, struct drm_framebuffer *old_fb)
338{ 338{
339 struct drm_device *dev = crtc->dev; 339 struct drm_device *dev = crtc->dev;
340 /* struct drm_i915_master_private *master_priv; */ 340 struct drm_psb_private *dev_priv = dev->dev_private;
341 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 341 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
342 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 342 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
343 int pipe = psb_intel_crtc->pipe; 343 int pipe = psb_intel_crtc->pipe;
344 const struct psb_offset *map = &dev_priv->regmap[pipe];
344 unsigned long start, offset; 345 unsigned long start, offset;
345 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
346 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
347 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
348 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
349 u32 dspcntr; 346 u32 dspcntr;
350 int ret = 0; 347 int ret = 0;
351 348
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
367 364
368 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 365 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
369 366
370 REG_WRITE(dspstride, crtc->fb->pitches[0]); 367 REG_WRITE(map->stride, crtc->fb->pitches[0]);
371 368
372 dspcntr = REG_READ(dspcntr_reg); 369 dspcntr = REG_READ(map->cntr);
373 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 370 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
374 371
375 switch (crtc->fb->bits_per_pixel) { 372 switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
392 psb_gtt_unpin(psbfb->gtt); 389 psb_gtt_unpin(psbfb->gtt);
393 goto psb_intel_pipe_set_base_exit; 390 goto psb_intel_pipe_set_base_exit;
394 } 391 }
395 REG_WRITE(dspcntr_reg, dspcntr); 392 REG_WRITE(map->cntr, dspcntr);
396
397 393
398 if (0 /* FIXMEAC - check what PSB needs */) { 394 REG_WRITE(map->base, start + offset);
399 REG_WRITE(dspbase, offset); 395 REG_READ(map->base);
400 REG_READ(dspbase);
401 REG_WRITE(dspsurf, start);
402 REG_READ(dspsurf);
403 } else {
404 REG_WRITE(dspbase, start + offset);
405 REG_READ(dspbase);
406 }
407 396
408psb_intel_pipe_cleaner: 397psb_intel_pipe_cleaner:
409 /* If there was a previous display we can now unpin it */ 398 /* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
424static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) 413static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
425{ 414{
426 struct drm_device *dev = crtc->dev; 415 struct drm_device *dev = crtc->dev;
427 /* struct drm_i915_master_private *master_priv; */ 416 struct drm_psb_private *dev_priv = dev->dev_private;
428 /* struct drm_i915_private *dev_priv = dev->dev_private; */
429 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 417 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
430 int pipe = psb_intel_crtc->pipe; 418 int pipe = psb_intel_crtc->pipe;
431 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 419 const struct psb_offset *map = &dev_priv->regmap[pipe];
432 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
433 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
434 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
435 u32 temp; 420 u32 temp;
436 421
437 /* XXX: When our outputs are all unaware of DPMS modes other than off 422 /* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
442 case DRM_MODE_DPMS_STANDBY: 427 case DRM_MODE_DPMS_STANDBY:
443 case DRM_MODE_DPMS_SUSPEND: 428 case DRM_MODE_DPMS_SUSPEND:
444 /* Enable the DPLL */ 429 /* Enable the DPLL */
445 temp = REG_READ(dpll_reg); 430 temp = REG_READ(map->dpll);
446 if ((temp & DPLL_VCO_ENABLE) == 0) { 431 if ((temp & DPLL_VCO_ENABLE) == 0) {
447 REG_WRITE(dpll_reg, temp); 432 REG_WRITE(map->dpll, temp);
448 REG_READ(dpll_reg); 433 REG_READ(map->dpll);
449 /* Wait for the clocks to stabilize. */ 434 /* Wait for the clocks to stabilize. */
450 udelay(150); 435 udelay(150);
451 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 436 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
452 REG_READ(dpll_reg); 437 REG_READ(map->dpll);
453 /* Wait for the clocks to stabilize. */ 438 /* Wait for the clocks to stabilize. */
454 udelay(150); 439 udelay(150);
455 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 440 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
456 REG_READ(dpll_reg); 441 REG_READ(map->dpll);
457 /* Wait for the clocks to stabilize. */ 442 /* Wait for the clocks to stabilize. */
458 udelay(150); 443 udelay(150);
459 } 444 }
460 445
461 /* Enable the pipe */ 446 /* Enable the pipe */
462 temp = REG_READ(pipeconf_reg); 447 temp = REG_READ(map->conf);
463 if ((temp & PIPEACONF_ENABLE) == 0) 448 if ((temp & PIPEACONF_ENABLE) == 0)
464 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 449 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
465 450
466 /* Enable the plane */ 451 /* Enable the plane */
467 temp = REG_READ(dspcntr_reg); 452 temp = REG_READ(map->cntr);
468 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 453 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
469 REG_WRITE(dspcntr_reg, 454 REG_WRITE(map->cntr,
470 temp | DISPLAY_PLANE_ENABLE); 455 temp | DISPLAY_PLANE_ENABLE);
471 /* Flush the plane changes */ 456 /* Flush the plane changes */
472 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 457 REG_WRITE(map->base, REG_READ(map->base));
473 } 458 }
474 459
475 psb_intel_crtc_load_lut(crtc); 460 psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
487 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 472 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
488 473
489 /* Disable display plane */ 474 /* Disable display plane */
490 temp = REG_READ(dspcntr_reg); 475 temp = REG_READ(map->cntr);
491 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 476 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
492 REG_WRITE(dspcntr_reg, 477 REG_WRITE(map->cntr,
493 temp & ~DISPLAY_PLANE_ENABLE); 478 temp & ~DISPLAY_PLANE_ENABLE);
494 /* Flush the plane changes */ 479 /* Flush the plane changes */
495 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 480 REG_WRITE(map->base, REG_READ(map->base));
496 REG_READ(dspbase_reg); 481 REG_READ(map->base);
497 } 482 }
498 483
499 /* Next, disable display pipes */ 484 /* Next, disable display pipes */
500 temp = REG_READ(pipeconf_reg); 485 temp = REG_READ(map->conf);
501 if ((temp & PIPEACONF_ENABLE) != 0) { 486 if ((temp & PIPEACONF_ENABLE) != 0) {
502 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 487 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
503 REG_READ(pipeconf_reg); 488 REG_READ(map->conf);
504 } 489 }
505 490
506 /* Wait for vblank for the disable to take effect. */ 491 /* Wait for vblank for the disable to take effect. */
507 psb_intel_wait_for_vblank(dev); 492 psb_intel_wait_for_vblank(dev);
508 493
509 temp = REG_READ(dpll_reg); 494 temp = REG_READ(map->dpll);
510 if ((temp & DPLL_VCO_ENABLE) != 0) { 495 if ((temp & DPLL_VCO_ENABLE) != 0) {
511 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); 496 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
512 REG_READ(dpll_reg); 497 REG_READ(map->dpll);
513 } 498 }
514 499
515 /* Wait for the clocks to turn off. */ 500 /* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
589 struct drm_framebuffer *old_fb) 574 struct drm_framebuffer *old_fb)
590{ 575{
591 struct drm_device *dev = crtc->dev; 576 struct drm_device *dev = crtc->dev;
577 struct drm_psb_private *dev_priv = dev->dev_private;
592 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 578 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
593 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
594 int pipe = psb_intel_crtc->pipe; 580 int pipe = psb_intel_crtc->pipe;
595 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 581 const struct psb_offset *map = &dev_priv->regmap[pipe];
596 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
597 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
598 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
599 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
600 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
601 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
602 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
603 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
604 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
605 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
606 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
607 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
608 int refclk; 582 int refclk;
609 struct psb_intel_clock_t clock; 583 struct psb_intel_clock_t clock;
610 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 584 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
690 dpll |= PLL_REF_INPUT_DREFCLK; 664 dpll |= PLL_REF_INPUT_DREFCLK;
691 665
692 /* setup pipeconf */ 666 /* setup pipeconf */
693 pipeconf = REG_READ(pipeconf_reg); 667 pipeconf = REG_READ(map->conf);
694 668
695 /* Set up the display plane register */ 669 /* Set up the display plane register */
696 dspcntr = DISPPLANE_GAMMA_ENABLE; 670 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
712 drm_mode_debug_printmodeline(mode); 686 drm_mode_debug_printmodeline(mode);
713 687
714 if (dpll & DPLL_VCO_ENABLE) { 688 if (dpll & DPLL_VCO_ENABLE) {
715 REG_WRITE(fp_reg, fp); 689 REG_WRITE(map->fp0, fp);
716 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 690 REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
717 REG_READ(dpll_reg); 691 REG_READ(map->dpll);
718 udelay(150); 692 udelay(150);
719 } 693 }
720 694
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
747 REG_READ(LVDS); 721 REG_READ(LVDS);
748 } 722 }
749 723
750 REG_WRITE(fp_reg, fp); 724 REG_WRITE(map->fp0, fp);
751 REG_WRITE(dpll_reg, dpll); 725 REG_WRITE(map->dpll, dpll);
752 REG_READ(dpll_reg); 726 REG_READ(map->dpll);
753 /* Wait for the clocks to stabilize. */ 727 /* Wait for the clocks to stabilize. */
754 udelay(150); 728 udelay(150);
755 729
756 /* write it again -- the BIOS does, after all */ 730 /* write it again -- the BIOS does, after all */
757 REG_WRITE(dpll_reg, dpll); 731 REG_WRITE(map->dpll, dpll);
758 732
759 REG_READ(dpll_reg); 733 REG_READ(map->dpll);
760 /* Wait for the clocks to stabilize. */ 734 /* Wait for the clocks to stabilize. */
761 udelay(150); 735 udelay(150);
762 736
763 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 737 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
764 ((adjusted_mode->crtc_htotal - 1) << 16)); 738 ((adjusted_mode->crtc_htotal - 1) << 16));
765 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 739 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
766 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 740 ((adjusted_mode->crtc_hblank_end - 1) << 16));
767 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 741 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
768 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 742 ((adjusted_mode->crtc_hsync_end - 1) << 16));
769 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 743 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
770 ((adjusted_mode->crtc_vtotal - 1) << 16)); 744 ((adjusted_mode->crtc_vtotal - 1) << 16));
771 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 745 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
772 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 746 ((adjusted_mode->crtc_vblank_end - 1) << 16));
773 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 747 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
774 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 748 ((adjusted_mode->crtc_vsync_end - 1) << 16));
775 /* pipesrc and dspsize control the size that is scaled from, 749 /* pipesrc and dspsize control the size that is scaled from,
776 * which should always be the user's requested size. 750 * which should always be the user's requested size.
777 */ 751 */
778 REG_WRITE(dspsize_reg, 752 REG_WRITE(map->size,
779 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 753 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
780 REG_WRITE(dsppos_reg, 0); 754 REG_WRITE(map->pos, 0);
781 REG_WRITE(pipesrc_reg, 755 REG_WRITE(map->src,
782 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 756 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
783 REG_WRITE(pipeconf_reg, pipeconf); 757 REG_WRITE(map->conf, pipeconf);
784 REG_READ(pipeconf_reg); 758 REG_READ(map->conf);
785 759
786 psb_intel_wait_for_vblank(dev); 760 psb_intel_wait_for_vblank(dev);
787 761
788 REG_WRITE(dspcntr_reg, dspcntr); 762 REG_WRITE(map->cntr, dspcntr);
789 763
790 /* Flush the plane changes */ 764 /* Flush the plane changes */
791 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 765 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
799void psb_intel_crtc_load_lut(struct drm_crtc *crtc) 773void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
800{ 774{
801 struct drm_device *dev = crtc->dev; 775 struct drm_device *dev = crtc->dev;
802 struct drm_psb_private *dev_priv = 776 struct drm_psb_private *dev_priv = dev->dev_private;
803 (struct drm_psb_private *)dev->dev_private;
804 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 777 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
805 int palreg = PALETTE_A; 778 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
779 int palreg = map->palette;
806 int i; 780 int i;
807 781
808 /* The clocks have to be on to load the palette. */ 782 /* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
811 785
812 switch (psb_intel_crtc->pipe) { 786 switch (psb_intel_crtc->pipe) {
813 case 0: 787 case 0:
814 break;
815 case 1: 788 case 1:
816 palreg = PALETTE_B;
817 break;
818 case 2:
819 palreg = PALETTE_C;
820 break; 789 break;
821 default: 790 default:
822 dev_err(dev->dev, "Illegal Pipe Number.\n"); 791 dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
836 gma_power_end(dev); 805 gma_power_end(dev);
837 } else { 806 } else {
838 for (i = 0; i < 256; i++) { 807 for (i = 0; i < 256; i++) {
839 dev_priv->regs.psb.save_palette_a[i] = 808 dev_priv->regs.pipe[0].palette[i] =
840 ((psb_intel_crtc->lut_r[i] + 809 ((psb_intel_crtc->lut_r[i] +
841 psb_intel_crtc->lut_adj[i]) << 16) | 810 psb_intel_crtc->lut_adj[i]) << 16) |
842 ((psb_intel_crtc->lut_g[i] + 811 ((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
854static void psb_intel_crtc_save(struct drm_crtc *crtc) 823static void psb_intel_crtc_save(struct drm_crtc *crtc)
855{ 824{
856 struct drm_device *dev = crtc->dev; 825 struct drm_device *dev = crtc->dev;
857 /* struct drm_psb_private *dev_priv = 826 struct drm_psb_private *dev_priv = dev->dev_private;
858 (struct drm_psb_private *)dev->dev_private; */
859 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 827 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
860 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 828 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
861 int pipeA = (psb_intel_crtc->pipe == 0); 829 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
862 uint32_t paletteReg; 830 uint32_t paletteReg;
863 int i; 831 int i;
864 832
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
867 return; 835 return;
868 } 836 }
869 837
870 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); 838 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
871 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); 839 crtc_state->savePIPECONF = REG_READ(map->conf);
872 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); 840 crtc_state->savePIPESRC = REG_READ(map->src);
873 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); 841 crtc_state->saveFP0 = REG_READ(map->fp0);
874 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); 842 crtc_state->saveFP1 = REG_READ(map->fp1);
875 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); 843 crtc_state->saveDPLL = REG_READ(map->dpll);
876 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); 844 crtc_state->saveHTOTAL = REG_READ(map->htotal);
877 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); 845 crtc_state->saveHBLANK = REG_READ(map->hblank);
878 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); 846 crtc_state->saveHSYNC = REG_READ(map->hsync);
879 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); 847 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
880 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); 848 crtc_state->saveVBLANK = REG_READ(map->vblank);
881 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); 849 crtc_state->saveVSYNC = REG_READ(map->vsync);
882 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); 850 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
883 851
884 /*NOTE: DSPSIZE DSPPOS only for psb*/ 852 /*NOTE: DSPSIZE DSPPOS only for psb*/
885 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); 853 crtc_state->saveDSPSIZE = REG_READ(map->size);
886 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); 854 crtc_state->saveDSPPOS = REG_READ(map->pos);
887 855
888 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); 856 crtc_state->saveDSPBASE = REG_READ(map->base);
889 857
890 paletteReg = pipeA ? PALETTE_A : PALETTE_B; 858 paletteReg = map->palette;
891 for (i = 0; i < 256; ++i) 859 for (i = 0; i < 256; ++i)
892 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); 860 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
893} 861}
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
898static void psb_intel_crtc_restore(struct drm_crtc *crtc) 866static void psb_intel_crtc_restore(struct drm_crtc *crtc)
899{ 867{
900 struct drm_device *dev = crtc->dev; 868 struct drm_device *dev = crtc->dev;
901 /* struct drm_psb_private * dev_priv = 869 struct drm_psb_private *dev_priv = dev->dev_private;
902 (struct drm_psb_private *)dev->dev_private; */
903 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 870 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
904 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 871 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
905 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */ 872 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
906 int pipeA = (psb_intel_crtc->pipe == 0);
907 uint32_t paletteReg; 873 uint32_t paletteReg;
908 int i; 874 int i;
909 875
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
913 } 879 }
914 880
915 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 881 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
916 REG_WRITE(pipeA ? DPLL_A : DPLL_B, 882 REG_WRITE(map->dpll,
917 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 883 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
918 REG_READ(pipeA ? DPLL_A : DPLL_B); 884 REG_READ(map->dpll);
919 udelay(150); 885 udelay(150);
920 } 886 }
921 887
922 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); 888 REG_WRITE(map->fp0, crtc_state->saveFP0);
923 REG_READ(pipeA ? FPA0 : FPB0); 889 REG_READ(map->fp0);
924 890
925 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); 891 REG_WRITE(map->fp1, crtc_state->saveFP1);
926 REG_READ(pipeA ? FPA1 : FPB1); 892 REG_READ(map->fp1);
927 893
928 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); 894 REG_WRITE(map->dpll, crtc_state->saveDPLL);
929 REG_READ(pipeA ? DPLL_A : DPLL_B); 895 REG_READ(map->dpll);
930 udelay(150); 896 udelay(150);
931 897
932 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); 898 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
933 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); 899 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
934 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); 900 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
935 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); 901 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
936 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); 902 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
937 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); 903 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
938 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); 904 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
939 905
940 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); 906 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
941 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); 907 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
942 908
943 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC); 909 REG_WRITE(map->src, crtc_state->savePIPESRC);
944 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 910 REG_WRITE(map->base, crtc_state->saveDSPBASE);
945 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); 911 REG_WRITE(map->conf, crtc_state->savePIPECONF);
946 912
947 psb_intel_wait_for_vblank(dev); 913 psb_intel_wait_for_vblank(dev);
948 914
949 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); 915 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
950 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 916 REG_WRITE(map->base, crtc_state->saveDSPBASE);
951 917
952 psb_intel_wait_for_vblank(dev); 918 psb_intel_wait_for_vblank(dev);
953 919
954 paletteReg = pipeA ? PALETTE_A : PALETTE_B; 920 paletteReg = map->palette;
955 for (i = 0; i < 256; ++i) 921 for (i = 0; i < 256; ++i)
956 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); 922 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
957} 923}
@@ -962,6 +928,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
962 uint32_t width, uint32_t height) 928 uint32_t width, uint32_t height)
963{ 929{
964 struct drm_device *dev = crtc->dev; 930 struct drm_device *dev = crtc->dev;
931 struct drm_psb_private *dev_priv = dev->dev_private;
965 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 932 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
966 int pipe = psb_intel_crtc->pipe; 933 int pipe = psb_intel_crtc->pipe;
967 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 934 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -969,8 +936,10 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
969 uint32_t temp; 936 uint32_t temp;
970 size_t addr = 0; 937 size_t addr = 0;
971 struct gtt_range *gt; 938 struct gtt_range *gt;
939 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
972 struct drm_gem_object *obj; 940 struct drm_gem_object *obj;
973 int ret; 941 void *tmp_dst, *tmp_src;
942 int ret, i, cursor_pages;
974 943
975 /* if we want to turn of the cursor ignore width and height */ 944 /* if we want to turn of the cursor ignore width and height */
976 if (!handle) { 945 if (!handle) {
@@ -1019,10 +988,32 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
1019 return ret; 988 return ret;
1020 } 989 }
1021 990
991 if (dev_priv->ops->cursor_needs_phys) {
992 if (cursor_gt == NULL) {
993 dev_err(dev->dev, "No hardware cursor mem available");
994 return -ENOMEM;
995 }
1022 996
1023 addr = gt->offset; /* Or resource.start ??? */ 997 /* Prevent overflow */
998 if (gt->npage > 4)
999 cursor_pages = 4;
1000 else
1001 cursor_pages = gt->npage;
1002
1003 /* Copy the cursor to cursor mem */
1004 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
1005 for (i = 0; i < cursor_pages; i++) {
1006 tmp_src = kmap(gt->pages[i]);
1007 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
1008 kunmap(gt->pages[i]);
1009 tmp_dst += PAGE_SIZE;
1010 }
1024 1011
1025 psb_intel_crtc->cursor_addr = addr; 1012 addr = psb_intel_crtc->cursor_addr;
1013 } else {
1014 addr = gt->offset; /* Or resource.start ??? */
1015 psb_intel_crtc->cursor_addr = addr;
1016 }
1026 1017
1027 temp = 0; 1018 temp = 0;
1028 /* set the pipe for the cursor */ 1019 /* set the pipe for the cursor */
@@ -1115,34 +1106,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1115 struct drm_crtc *crtc) 1106 struct drm_crtc *crtc)
1116{ 1107{
1117 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1108 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1109 struct drm_psb_private *dev_priv = dev->dev_private;
1118 int pipe = psb_intel_crtc->pipe; 1110 int pipe = psb_intel_crtc->pipe;
1111 const struct psb_offset *map = &dev_priv->regmap[pipe];
1119 u32 dpll; 1112 u32 dpll;
1120 u32 fp; 1113 u32 fp;
1121 struct psb_intel_clock_t clock; 1114 struct psb_intel_clock_t clock;
1122 bool is_lvds; 1115 bool is_lvds;
1123 struct drm_psb_private *dev_priv = dev->dev_private; 1116 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1124 1117
1125 if (gma_power_begin(dev, false)) { 1118 if (gma_power_begin(dev, false)) {
1126 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); 1119 dpll = REG_READ(map->dpll);
1127 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1120 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1128 fp = REG_READ((pipe == 0) ? FPA0 : FPB0); 1121 fp = REG_READ(map->fp0);
1129 else 1122 else
1130 fp = REG_READ((pipe == 0) ? FPA1 : FPB1); 1123 fp = REG_READ(map->fp1);
1131 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); 1124 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1132 gma_power_end(dev); 1125 gma_power_end(dev);
1133 } else { 1126 } else {
1134 dpll = (pipe == 0) ? 1127 dpll = p->dpll;
1135 dev_priv->regs.psb.saveDPLL_A :
1136 dev_priv->regs.psb.saveDPLL_B;
1137 1128
1138 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1129 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1139 fp = (pipe == 0) ? 1130 fp = p->fp0;
1140 dev_priv->regs.psb.saveFPA0 :
1141 dev_priv->regs.psb.saveFPB0;
1142 else 1131 else
1143 fp = (pipe == 0) ? 1132 fp = p->fp1;
1144 dev_priv->regs.psb.saveFPA1 :
1145 dev_priv->regs.psb.saveFPB1;
1146 1133
1147 is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS & 1134 is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
1148 LVDS_PORT_EN); 1135 LVDS_PORT_EN);
@@ -1202,26 +1189,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1202 int vtot; 1189 int vtot;
1203 int vsync; 1190 int vsync;
1204 struct drm_psb_private *dev_priv = dev->dev_private; 1191 struct drm_psb_private *dev_priv = dev->dev_private;
1192 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1193 const struct psb_offset *map = &dev_priv->regmap[pipe];
1205 1194
1206 if (gma_power_begin(dev, false)) { 1195 if (gma_power_begin(dev, false)) {
1207 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); 1196 htot = REG_READ(map->htotal);
1208 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); 1197 hsync = REG_READ(map->hsync);
1209 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); 1198 vtot = REG_READ(map->vtotal);
1210 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); 1199 vsync = REG_READ(map->vsync);
1211 gma_power_end(dev); 1200 gma_power_end(dev);
1212 } else { 1201 } else {
1213 htot = (pipe == 0) ? 1202 htot = p->htotal;
1214 dev_priv->regs.psb.saveHTOTAL_A : 1203 hsync = p->hsync;
1215 dev_priv->regs.psb.saveHTOTAL_B; 1204 vtot = p->vtotal;
1216 hsync = (pipe == 0) ? 1205 vsync = p->vsync;
1217 dev_priv->regs.psb.saveHSYNC_A :
1218 dev_priv->regs.psb.saveHSYNC_B;
1219 vtot = (pipe == 0) ?
1220 dev_priv->regs.psb.saveVTOTAL_A :
1221 dev_priv->regs.psb.saveVTOTAL_B;
1222 vsync = (pipe == 0) ?
1223 dev_priv->regs.psb.saveVSYNC_A :
1224 dev_priv->regs.psb.saveVSYNC_B;
1225 } 1206 }
1226 1207
1227 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 1208 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1257,6 +1238,9 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1257 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 1238 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1258 psb_intel_crtc->cursor_obj = NULL; 1239 psb_intel_crtc->cursor_obj = NULL;
1259 } 1240 }
1241
1242 if (psb_intel_crtc->cursor_gt != NULL)
1243 psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
1260 kfree(psb_intel_crtc->crtc_state); 1244 kfree(psb_intel_crtc->crtc_state);
1261 drm_crtc_cleanup(crtc); 1245 drm_crtc_cleanup(crtc);
1262 kfree(psb_intel_crtc); 1246 kfree(psb_intel_crtc);
@@ -1285,13 +1269,33 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1285 * Set the default value of cursor control and base register 1269 * Set the default value of cursor control and base register
1286 * to zero. This is a workaround for h/w defect on Oaktrail 1270 * to zero. This is a workaround for h/w defect on Oaktrail
1287 */ 1271 */
1288static void psb_intel_cursor_init(struct drm_device *dev, int pipe) 1272static void psb_intel_cursor_init(struct drm_device *dev,
1273 struct psb_intel_crtc *psb_intel_crtc)
1289{ 1274{
1275 struct drm_psb_private *dev_priv = dev->dev_private;
1290 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; 1276 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
1291 u32 base[3] = { CURABASE, CURBBASE, CURCBASE }; 1277 u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
1278 struct gtt_range *cursor_gt;
1279
1280 if (dev_priv->ops->cursor_needs_phys) {
1281 /* Allocate 4 pages of stolen mem for a hardware cursor. That
1282 * is enough for the 64 x 64 ARGB cursors we support.
1283 */
1284 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
1285 if (!cursor_gt) {
1286 psb_intel_crtc->cursor_gt = NULL;
1287 goto out;
1288 }
1289 psb_intel_crtc->cursor_gt = cursor_gt;
1290 psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
1291 cursor_gt->offset;
1292 } else {
1293 psb_intel_crtc->cursor_gt = NULL;
1294 }
1292 1295
1293 REG_WRITE(control[pipe], 0); 1296out:
1294 REG_WRITE(base[pipe], 0); 1297 REG_WRITE(control[psb_intel_crtc->pipe], 0);
1298 REG_WRITE(base[psb_intel_crtc->pipe], 0);
1295} 1299}
1296 1300
1297void psb_intel_crtc_init(struct drm_device *dev, int pipe, 1301void psb_intel_crtc_init(struct drm_device *dev, int pipe,
@@ -1357,7 +1361,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1357 psb_intel_crtc->mode_set.connectors = 1361 psb_intel_crtc->mode_set.connectors =
1358 (struct drm_connector **) (psb_intel_crtc + 1); 1362 (struct drm_connector **) (psb_intel_crtc + 1);
1359 psb_intel_crtc->mode_set.num_connectors = 0; 1363 psb_intel_crtc->mode_set.num_connectors = 0;
1360 psb_intel_cursor_init(dev, pipe); 1364 psb_intel_cursor_init(dev, psb_intel_crtc);
1361} 1365}
1362 1366
1363int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1367int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index f40535e56689..2515f83248cb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -106,11 +106,6 @@ struct psb_intel_mode_device {
106 size_t(*bo_offset) (struct drm_device *dev, void *bo); 106 size_t(*bo_offset) (struct drm_device *dev, void *bo);
107 107
108 /* 108 /*
109 * Cursor (Can go ?)
110 */
111 int cursor_needs_physical;
112
113 /*
114 * LVDS info 109 * LVDS info
115 */ 110 */
116 int backlight_duty_cycle; /* restore backlight to this value */ 111 int backlight_duty_cycle; /* restore backlight to this value */
@@ -176,6 +171,7 @@ struct psb_intel_crtc {
176 int pipe; 171 int pipe;
177 int plane; 172 int plane;
178 uint32_t cursor_addr; 173 uint32_t cursor_addr;
174 struct gtt_range *cursor_gt;
179 u8 lut_r[256], lut_g[256], lut_b[256]; 175 u8 lut_r[256], lut_g[256], lut_b[256];
180 u8 lut_adj[256]; 176 u8 lut_adj[256];
181 struct psb_intel_framebuffer *fbdev_fb; 177 struct psb_intel_framebuffer *fbdev_fb;
@@ -193,6 +189,9 @@ struct psb_intel_crtc {
193 /*crtc mode setting flags*/ 189 /*crtc mode setting flags*/
194 u32 mode_flags; 190 u32 mode_flags;
195 191
192 bool active;
193 bool crtc_enable;
194
196 /* Saved Crtc HW states */ 195 /* Saved Crtc HW states */
197 struct psb_intel_crtc_state *crtc_state; 196 struct psb_intel_crtc_state *crtc_state;
198}; 197};
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index e89d3a2e8fdc..8e8c8efb0a89 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -91,6 +91,9 @@
91 91
92#define BLC_PWM_CTL 0x61254 92#define BLC_PWM_CTL 0x61254
93#define BLC_PWM_CTL2 0x61250 93#define BLC_PWM_CTL2 0x61250
94#define PWM_ENABLE (1 << 31)
95#define PWM_LEGACY_MODE (1 << 30)
96#define PWM_PIPE_B (1 << 29)
94#define BLC_PWM_CTL_C 0x62254 97#define BLC_PWM_CTL_C 0x62254
95#define BLC_PWM_CTL2_C 0x62250 98#define BLC_PWM_CTL2_C 0x62250
96#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 99#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
@@ -216,7 +219,7 @@
216#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ 219#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
217#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 220#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
218#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 221#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
219#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 222#define DPLL_FPA0h1_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
220#define DPLL_LOCK (1 << 15) /* CDV */ 223#define DPLL_LOCK (1 << 15) /* CDV */
221 224
222/* 225/*
@@ -343,6 +346,9 @@
343#define FP_M2_DIV_SHIFT 0 346#define FP_M2_DIV_SHIFT 0
344 347
345#define PORT_HOTPLUG_EN 0x61110 348#define PORT_HOTPLUG_EN 0x61110
349#define HDMIB_HOTPLUG_INT_EN (1 << 29)
350#define HDMIC_HOTPLUG_INT_EN (1 << 28)
351#define HDMID_HOTPLUG_INT_EN (1 << 27)
346#define SDVOB_HOTPLUG_INT_EN (1 << 26) 352#define SDVOB_HOTPLUG_INT_EN (1 << 26)
347#define SDVOC_HOTPLUG_INT_EN (1 << 25) 353#define SDVOC_HOTPLUG_INT_EN (1 << 25)
348#define TV_HOTPLUG_INT_EN (1 << 18) 354#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -501,10 +507,12 @@
501#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17) 507#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
502#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) 508#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
503#define PIPE_TE_ENABLE (1UL << 22) 509#define PIPE_TE_ENABLE (1UL << 22)
510#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
504#define PIPE_DPST_EVENT_ENABLE (1UL << 23) 511#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
505#define PIPE_VSYNC_ENABL (1UL << 25) 512#define PIPE_VSYNC_ENABL (1UL << 25)
506#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26) 513#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
507#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27) 514#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
515#define PIPE_FIFO_UNDERRUN (1UL << 31)
508#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \ 516#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
509 PIPE_HDMI_AUDIO_BUFFER_DONE) 517 PIPE_HDMI_AUDIO_BUFFER_DONE)
510#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16)) 518#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
@@ -569,12 +577,27 @@ struct dpst_guardband {
569#define PIPE_PIXEL_MASK 0x00ffffff 577#define PIPE_PIXEL_MASK 0x00ffffff
570#define PIPE_PIXEL_SHIFT 0 578#define PIPE_PIXEL_SHIFT 0
571 579
580#define FW_BLC_SELF 0x20e0
581#define FW_BLC_SELF_EN (1<<15)
582
572#define DSPARB 0x70030 583#define DSPARB 0x70030
573#define DSPFW1 0x70034 584#define DSPFW1 0x70034
585#define DSP_FIFO_SR_WM_MASK 0xFF800000
586#define DSP_FIFO_SR_WM_SHIFT 23
587#define CURSOR_B_FIFO_WM_MASK 0x003F0000
588#define CURSOR_B_FIFO_WM_SHIFT 16
574#define DSPFW2 0x70038 589#define DSPFW2 0x70038
590#define CURSOR_A_FIFO_WM_MASK 0x3F00
591#define CURSOR_A_FIFO_WM_SHIFT 8
592#define DSP_PLANE_C_FIFO_WM_MASK 0x7F
593#define DSP_PLANE_C_FIFO_WM_SHIFT 0
575#define DSPFW3 0x7003c 594#define DSPFW3 0x7003c
576#define DSPFW4 0x70050 595#define DSPFW4 0x70050
577#define DSPFW5 0x70054 596#define DSPFW5 0x70054
597#define DSP_PLANE_B_FIFO_WM1_SHIFT 24
598#define DSP_PLANE_A_FIFO_WM1_SHIFT 16
599#define CURSOR_B_FIFO_WM1_SHIFT 8
600#define CURSOR_FIFO_SR_WM1_SHIFT 0
578#define DSPFW6 0x70058 601#define DSPFW6 0x70058
579#define DSPCHICKENBIT 0x70400 602#define DSPCHICKENBIT 0x70400
580#define DSPACNTR 0x70180 603#define DSPACNTR 0x70180
@@ -1290,6 +1313,15 @@ No status bits are changed.
1290#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24) 1313#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
1291#define SB_N_CB_TUNE_SHIFT 24 1314#define SB_N_CB_TUNE_SHIFT 24
1292 1315
1316/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
1317#define SB_REF_DPLLA 0x8010
1318#define SB_REF_DPLLB 0x8030
1319#define REF_CLK_MASK (0x3 << 13)
1320#define REF_CLK_CORE (0 << 13)
1321#define REF_CLK_DPLL (1 << 13)
1322#define REF_CLK_DPLLA (2 << 13)
1323/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
1324
1293#define _SB_REF_A 0x8018 1325#define _SB_REF_A 0x8018
1294#define _SB_REF_B 0x8038 1326#define _SB_REF_B 0x8038
1295#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B) 1327#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
@@ -1313,6 +1345,7 @@ No status bits are changed.
1313 1345
1314#define LANE_PLL_MASK (0x7 << 20) 1346#define LANE_PLL_MASK (0x7 << 20)
1315#define LANE_PLL_ENABLE (0x3 << 20) 1347#define LANE_PLL_ENABLE (0x3 << 20)
1348#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
1316 1349
1317 1350
1318#endif 1351#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 36330cabcea2..d39b15be7649 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1141,7 +1141,6 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1141static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, 1141static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
1142 struct drm_display_mode *mode) 1142 struct drm_display_mode *mode)
1143{ 1143{
1144 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1145 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector); 1144 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1146 1145
1147 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1146 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1161,11 +1160,6 @@ static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
1161 return MODE_PANEL; 1160 return MODE_PANEL;
1162 } 1161 }
1163 1162
1164 /* We assume worst case scenario of 32 bpp here, since we don't know */
1165 if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
1166 dev_priv->vram_stolen_size)
1167 return MODE_MEM;
1168
1169 return MODE_OK; 1163 return MODE_OK;
1170} 1164}
1171 1165
@@ -2044,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
2044 struct drm_device *dev = connector->base.base.dev; 2038 struct drm_device *dev = connector->base.base.dev;
2045 2039
2046 intel_attach_force_audio_property(&connector->base.base); 2040 intel_attach_force_audio_property(&connector->base.base);
2047 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2041 intel_attach_broadcast_rgb_property(&connector->base.base);
2048 intel_attach_broadcast_rgb_property(&connector->base.base);
2049 */ 2042 */
2050} 2043}
2051 2044
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 1869586457b1..8652cdf3f03f 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -190,6 +190,9 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
190 */ 190 */
191static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) 191static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
192{ 192{
193 if (vdc_stat & _PSB_IRQ_ASLE)
194 psb_intel_opregion_asle_intr(dev);
195
193 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG) 196 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
194 mid_pipe_event_handler(dev, 0); 197 mid_pipe_event_handler(dev, 0);
195 198
@@ -199,11 +202,9 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
199 202
200irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) 203irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
201{ 204{
202 struct drm_device *dev = (struct drm_device *) arg; 205 struct drm_device *dev = arg;
203 struct drm_psb_private *dev_priv = 206 struct drm_psb_private *dev_priv = dev->dev_private;
204 (struct drm_psb_private *) dev->dev_private; 207 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
205
206 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
207 int handled = 0; 208 int handled = 0;
208 209
209 spin_lock(&dev_priv->irqmask_lock); 210 spin_lock(&dev_priv->irqmask_lock);
@@ -220,6 +221,8 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
220 221
221 if (vdc_stat & _PSB_IRQ_SGX_FLAG) 222 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
222 sgx_int = 1; 223 sgx_int = 1;
224 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
225 hotplug_int = 1;
223 226
224 vdc_stat &= dev_priv->vdc_irq_mask; 227 vdc_stat &= dev_priv->vdc_irq_mask;
225 spin_unlock(&dev_priv->irqmask_lock); 228 spin_unlock(&dev_priv->irqmask_lock);
@@ -241,6 +244,13 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
241 handled = 1; 244 handled = 1;
242 } 245 }
243 246
247 /* Note: this bit has other meanings on some devices, so we will
248 need to address that later if it ever matters */
249 if (hotplug_int && dev_priv->ops->hotplug) {
250 handled = dev_priv->ops->hotplug(dev);
251 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
252 }
253
244 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); 254 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
245 (void) PSB_RVDC32(PSB_INT_IDENTITY_R); 255 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
246 DRM_READMEMORYBARRIER(); 256 DRM_READMEMORYBARRIER();
@@ -273,6 +283,11 @@ void psb_irq_preinstall(struct drm_device *dev)
273 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; 283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
274 */ 284 */
275 285
286 /* Revisit this area - want per device masks ? */
287 if (dev_priv->ops->hotplug)
288 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
289 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
290
276 /* This register is safe even if display island is off */ 291 /* This register is safe even if display island is off */
277 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 292 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
278 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 293 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
@@ -305,18 +320,23 @@ int psb_irq_postinstall(struct drm_device *dev)
305 else 320 else
306 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
307 322
323 if (dev_priv->ops->hotplug_enable)
324 dev_priv->ops->hotplug_enable(dev, true);
325
308 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 326 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
309 return 0; 327 return 0;
310} 328}
311 329
312void psb_irq_uninstall(struct drm_device *dev) 330void psb_irq_uninstall(struct drm_device *dev)
313{ 331{
314 struct drm_psb_private *dev_priv = 332 struct drm_psb_private *dev_priv = dev->dev_private;
315 (struct drm_psb_private *) dev->dev_private;
316 unsigned long irqflags; 333 unsigned long irqflags;
317 334
318 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 335 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
319 336
337 if (dev_priv->ops->hotplug_enable)
338 dev_priv->ops->hotplug_enable(dev, false);
339
320 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
321 341
322 if (dev->vblank_enabled[0]) 342 if (dev->vblank_enabled[0])
@@ -406,7 +426,7 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
406 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); 426 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
407 427
408 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 428 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
409 PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), 429 PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
410 PWM_CONTROL_LOGIC); 430 PWM_CONTROL_LOGIC);
411 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 431 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
412 432
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe6bf3..1d2ebb5e530f 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -29,7 +29,7 @@ static void psb_lid_timer_func(unsigned long data)
29 struct drm_device *dev = (struct drm_device *)dev_priv->dev; 29 struct drm_device *dev = (struct drm_device *)dev_priv->dev;
30 struct timer_list *lid_timer = &dev_priv->lid_timer; 30 struct timer_list *lid_timer = &dev_priv->lid_timer;
31 unsigned long irq_flags; 31 unsigned long irq_flags;
32 u32 *lid_state = dev_priv->lid_state; 32 u32 __iomem *lid_state = dev_priv->opregion.lid_state;
33 u32 pp_status; 33 u32 pp_status;
34 34
35 if (readl(lid_state) == dev_priv->lid_last_state) 35 if (readl(lid_state) == dev_priv->lid_last_state)
@@ -40,10 +40,16 @@ static void psb_lid_timer_func(unsigned long data)
40 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); 40 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
41 do { 41 do {
42 pp_status = REG_READ(PP_STATUS); 42 pp_status = REG_READ(PP_STATUS);
43 } while ((pp_status & PP_ON) == 0); 43 } while ((pp_status & PP_ON) == 0 &&
44 (pp_status & PP_SEQUENCE_MASK) != 0);
44 45
45 /*FIXME: should be backlight level before*/ 46 if (REG_READ(PP_STATUS) & PP_ON) {
46 psb_intel_lvds_set_brightness(dev, 100); 47 /*FIXME: should be backlight level before*/
48 psb_intel_lvds_set_brightness(dev, 100);
49 } else {
50 DRM_DEBUG("LVDS panel never powered up");
51 return;
52 }
47 } else { 53 } else {
48 psb_intel_lvds_set_brightness(dev, 0); 54 psb_intel_lvds_set_brightness(dev, 0);
49 55
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b4..2e9268da58d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,17 +11,21 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
11 i915_gem_evict.o \ 11 i915_gem_evict.o \
12 i915_gem_execbuffer.o \ 12 i915_gem_execbuffer.o \
13 i915_gem_gtt.o \ 13 i915_gem_gtt.o \
14 i915_gem_stolen.o \
14 i915_gem_tiling.o \ 15 i915_gem_tiling.o \
16 i915_sysfs.o \
15 i915_trace_points.o \ 17 i915_trace_points.o \
16 intel_display.o \ 18 intel_display.o \
17 intel_crt.o \ 19 intel_crt.o \
18 intel_lvds.o \ 20 intel_lvds.o \
19 intel_bios.o \ 21 intel_bios.o \
22 intel_ddi.o \
20 intel_dp.o \ 23 intel_dp.o \
21 intel_hdmi.o \ 24 intel_hdmi.o \
22 intel_sdvo.o \ 25 intel_sdvo.o \
23 intel_modes.o \ 26 intel_modes.o \
24 intel_panel.o \ 27 intel_panel.o \
28 intel_pm.o \
25 intel_i2c.o \ 29 intel_i2c.o \
26 intel_fb.o \ 30 intel_fb.o \
27 intel_tv.o \ 31 intel_tv.o \
@@ -34,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
34 dvo_ch7017.o \ 38 dvo_ch7017.o \
35 dvo_ivch.o \ 39 dvo_ivch.o \
36 dvo_tfp410.o \ 40 dvo_tfp410.o \
37 dvo_sil164.o 41 dvo_sil164.o \
42 i915_gem_dmabuf.o
38 43
39i915-$(CONFIG_COMPAT) += i915_ioc32.o 44i915-$(CONFIG_COMPAT) += i915_ioc32.o
40 45
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6162a1681f0..eb2b3c25b9e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
47 FLUSHING_LIST, 47 FLUSHING_LIST,
48 INACTIVE_LIST, 48 INACTIVE_LIST,
49 PINNED_LIST, 49 PINNED_LIST,
50 DEFERRED_FREE_LIST,
51}; 50};
52 51
53static const char *yesno(int v) 52static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
178 seq_printf(m, "Inactive:\n"); 177 seq_printf(m, "Inactive:\n");
179 head = &dev_priv->mm.inactive_list; 178 head = &dev_priv->mm.inactive_list;
180 break; 179 break;
181 case PINNED_LIST:
182 seq_printf(m, "Pinned:\n");
183 head = &dev_priv->mm.pinned_list;
184 break;
185 case FLUSHING_LIST: 180 case FLUSHING_LIST:
186 seq_printf(m, "Flushing:\n"); 181 seq_printf(m, "Flushing:\n");
187 head = &dev_priv->mm.flushing_list; 182 head = &dev_priv->mm.flushing_list;
188 break; 183 break;
189 case DEFERRED_FREE_LIST:
190 seq_printf(m, "Deferred free:\n");
191 head = &dev_priv->mm.deferred_free_list;
192 break;
193 default: 184 default:
194 mutex_unlock(&dev->struct_mutex); 185 mutex_unlock(&dev->struct_mutex);
195 return -EINVAL; 186 return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
252 count, mappable_count, size, mappable_size); 243 count, mappable_count, size, mappable_size);
253 244
254 size = count = mappable_size = mappable_count = 0; 245 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.pinned_list, mm_list);
256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size);
258
259 size = count = mappable_size = mappable_count = 0;
260 count_objects(&dev_priv->mm.inactive_list, mm_list); 246 count_objects(&dev_priv->mm.inactive_list, mm_list);
261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 247 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
262 count, mappable_count, size, mappable_size); 248 count, mappable_count, size, mappable_size);
263 249
264 size = count = mappable_size = mappable_count = 0; 250 size = count = mappable_size = mappable_count = 0;
265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
267 count, mappable_count, size, mappable_size);
268
269 size = count = mappable_size = mappable_count = 0;
270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 251 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
271 if (obj->fault_mappable) { 252 if (obj->fault_mappable) {
272 size += obj->gtt_space->size; 253 size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
294{ 275{
295 struct drm_info_node *node = (struct drm_info_node *) m->private; 276 struct drm_info_node *node = (struct drm_info_node *) m->private;
296 struct drm_device *dev = node->minor->dev; 277 struct drm_device *dev = node->minor->dev;
278 uintptr_t list = (uintptr_t) node->info_ent->data;
297 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj; 280 struct drm_i915_gem_object *obj;
299 size_t total_obj_size, total_gtt_size; 281 size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
305 287
306 total_obj_size = total_gtt_size = count = 0; 288 total_obj_size = total_gtt_size = count = 0;
307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 289 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
290 if (list == PINNED_LIST && obj->pin_count == 0)
291 continue;
292
308 seq_printf(m, " "); 293 seq_printf(m, " ");
309 describe_obj(m, obj); 294 describe_obj(m, obj);
310 seq_printf(m, "\n"); 295 seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
321 return 0; 306 return 0;
322} 307}
323 308
324
325static int i915_gem_pageflip_info(struct seq_file *m, void *data) 309static int i915_gem_pageflip_info(struct seq_file *m, void *data)
326{ 310{
327 struct drm_info_node *node = (struct drm_info_node *) m->private; 311 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
430 if (ring->get_seqno) { 414 if (ring->get_seqno) {
431 seq_printf(m, "Current sequence (%s): %d\n", 415 seq_printf(m, "Current sequence (%s): %d\n",
432 ring->name, ring->get_seqno(ring)); 416 ring->name, ring->get_seqno(ring));
433 seq_printf(m, "Waiter sequence (%s): %d\n",
434 ring->name, ring->waiting_seqno);
435 seq_printf(m, "IRQ sequence (%s): %d\n",
436 ring->name, ring->irq_seqno);
437 } 417 }
438} 418}
439 419
@@ -468,7 +448,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
468 if (ret) 448 if (ret)
469 return ret; 449 return ret;
470 450
471 if (!HAS_PCH_SPLIT(dev)) { 451 if (IS_VALLEYVIEW(dev)) {
452 seq_printf(m, "Display IER:\t%08x\n",
453 I915_READ(VLV_IER));
454 seq_printf(m, "Display IIR:\t%08x\n",
455 I915_READ(VLV_IIR));
456 seq_printf(m, "Display IIR_RW:\t%08x\n",
457 I915_READ(VLV_IIR_RW));
458 seq_printf(m, "Display IMR:\t%08x\n",
459 I915_READ(VLV_IMR));
460 for_each_pipe(pipe)
461 seq_printf(m, "Pipe %c stat:\t%08x\n",
462 pipe_name(pipe),
463 I915_READ(PIPESTAT(pipe)));
464
465 seq_printf(m, "Master IER:\t%08x\n",
466 I915_READ(VLV_MASTER_IER));
467
468 seq_printf(m, "Render IER:\t%08x\n",
469 I915_READ(GTIER));
470 seq_printf(m, "Render IIR:\t%08x\n",
471 I915_READ(GTIIR));
472 seq_printf(m, "Render IMR:\t%08x\n",
473 I915_READ(GTIMR));
474
475 seq_printf(m, "PM IER:\t\t%08x\n",
476 I915_READ(GEN6_PMIER));
477 seq_printf(m, "PM IIR:\t\t%08x\n",
478 I915_READ(GEN6_PMIIR));
479 seq_printf(m, "PM IMR:\t\t%08x\n",
480 I915_READ(GEN6_PMIMR));
481
482 seq_printf(m, "Port hotplug:\t%08x\n",
483 I915_READ(PORT_HOTPLUG_EN));
484 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
485 I915_READ(VLV_DPFLIPSTAT));
486 seq_printf(m, "DPINVGTT:\t%08x\n",
487 I915_READ(DPINVGTT));
488
489 } else if (!HAS_PCH_SPLIT(dev)) {
472 seq_printf(m, "Interrupt enable: %08x\n", 490 seq_printf(m, "Interrupt enable: %08x\n",
473 I915_READ(IER)); 491 I915_READ(IER));
474 seq_printf(m, "Interrupt identity: %08x\n", 492 seq_printf(m, "Interrupt identity: %08x\n",
@@ -564,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
564 return 0; 582 return 0;
565} 583}
566 584
567static int i915_ringbuffer_data(struct seq_file *m, void *data)
568{
569 struct drm_info_node *node = (struct drm_info_node *) m->private;
570 struct drm_device *dev = node->minor->dev;
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 struct intel_ring_buffer *ring;
573 int ret;
574
575 ret = mutex_lock_interruptible(&dev->struct_mutex);
576 if (ret)
577 return ret;
578
579 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
580 if (!ring->obj) {
581 seq_printf(m, "No ringbuffer setup\n");
582 } else {
583 const u8 __iomem *virt = ring->virtual_start;
584 uint32_t off;
585
586 for (off = 0; off < ring->size; off += 4) {
587 uint32_t *ptr = (uint32_t *)(virt + off);
588 seq_printf(m, "%08x : %08x\n", off, *ptr);
589 }
590 }
591 mutex_unlock(&dev->struct_mutex);
592
593 return 0;
594}
595
596static int i915_ringbuffer_info(struct seq_file *m, void *data)
597{
598 struct drm_info_node *node = (struct drm_info_node *) m->private;
599 struct drm_device *dev = node->minor->dev;
600 drm_i915_private_t *dev_priv = dev->dev_private;
601 struct intel_ring_buffer *ring;
602 int ret;
603
604 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
605 if (ring->size == 0)
606 return 0;
607
608 ret = mutex_lock_interruptible(&dev->struct_mutex);
609 if (ret)
610 return ret;
611
612 seq_printf(m, "Ring %s:\n", ring->name);
613 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
614 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
615 seq_printf(m, " Size : %08x\n", ring->size);
616 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
617 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
619 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
620 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
621 }
622 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
623 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
624
625 mutex_unlock(&dev->struct_mutex);
626
627 return 0;
628}
629
630static const char *ring_str(int ring) 585static const char *ring_str(int ring)
631{ 586{
632 switch (ring) { 587 switch (ring) {
@@ -704,6 +659,7 @@ static void i915_ring_error_state(struct seq_file *m,
704 struct drm_i915_error_state *error, 659 struct drm_i915_error_state *error,
705 unsigned ring) 660 unsigned ring)
706{ 661{
662 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
707 seq_printf(m, "%s command stream:\n", ring_str(ring)); 663 seq_printf(m, "%s command stream:\n", ring_str(ring));
708 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 664 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
709 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 665 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +674,8 @@ static void i915_ring_error_state(struct seq_file *m,
718 if (INTEL_INFO(dev)->gen >= 4) 674 if (INTEL_INFO(dev)->gen >= 4)
719 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 675 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
720 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 676 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
677 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
721 if (INTEL_INFO(dev)->gen >= 6) { 678 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
723 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 679 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
724 seq_printf(m, " SYNC_0: 0x%08x\n", 680 seq_printf(m, " SYNC_0: 0x%08x\n",
725 error->semaphore_mboxes[ring][0]); 681 error->semaphore_mboxes[ring][0]);
@@ -727,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
727 error->semaphore_mboxes[ring][1]); 683 error->semaphore_mboxes[ring][1]);
728 } 684 }
729 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 685 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
686 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
730 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 687 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
731 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 688 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
732} 689}
733 690
691struct i915_error_state_file_priv {
692 struct drm_device *dev;
693 struct drm_i915_error_state *error;
694};
695
734static int i915_error_state(struct seq_file *m, void *unused) 696static int i915_error_state(struct seq_file *m, void *unused)
735{ 697{
736 struct drm_info_node *node = (struct drm_info_node *) m->private; 698 struct i915_error_state_file_priv *error_priv = m->private;
737 struct drm_device *dev = node->minor->dev; 699 struct drm_device *dev = error_priv->dev;
738 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
739 struct drm_i915_error_state *error; 701 struct drm_i915_error_state *error = error_priv->error;
740 unsigned long flags; 702 struct intel_ring_buffer *ring;
741 int i, j, page, offset, elt; 703 int i, j, page, offset, elt;
742 704
743 spin_lock_irqsave(&dev_priv->error_lock, flags); 705 if (!error) {
744 if (!dev_priv->first_error) {
745 seq_printf(m, "no error state collected\n"); 706 seq_printf(m, "no error state collected\n");
746 goto out; 707 return 0;
747 } 708 }
748 709
749 error = dev_priv->first_error;
750
751 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 710 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
752 error->time.tv_usec); 711 error->time.tv_usec);
753 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 712 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
754 seq_printf(m, "EIR: 0x%08x\n", error->eir); 713 seq_printf(m, "EIR: 0x%08x\n", error->eir);
714 seq_printf(m, "IER: 0x%08x\n", error->ier);
755 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 715 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
756 716
757 for (i = 0; i < dev_priv->num_fence_regs; i++) 717 for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -762,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
762 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 722 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
763 } 723 }
764 724
765 i915_ring_error_state(m, dev, error, RCS); 725 for_each_ring(ring, dev_priv, i)
766 if (HAS_BLT(dev)) 726 i915_ring_error_state(m, dev, error, i);
767 i915_ring_error_state(m, dev, error, BCS);
768 if (HAS_BSD(dev))
769 i915_ring_error_state(m, dev, error, VCS);
770 727
771 if (error->active_bo) 728 if (error->active_bo)
772 print_error_buffers(m, "Active", 729 print_error_buffers(m, "Active",
@@ -828,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
828 if (error->display) 785 if (error->display)
829 intel_display_print_error_state(m, dev, error->display); 786 intel_display_print_error_state(m, dev, error->display);
830 787
831out: 788 return 0;
789}
790
791static ssize_t
792i915_error_state_write(struct file *filp,
793 const char __user *ubuf,
794 size_t cnt,
795 loff_t *ppos)
796{
797 struct seq_file *m = filp->private_data;
798 struct i915_error_state_file_priv *error_priv = m->private;
799 struct drm_device *dev = error_priv->dev;
800
801 DRM_DEBUG_DRIVER("Resetting error state\n");
802
803 mutex_lock(&dev->struct_mutex);
804 i915_destroy_error_state(dev);
805 mutex_unlock(&dev->struct_mutex);
806
807 return cnt;
808}
809
810static int i915_error_state_open(struct inode *inode, struct file *file)
811{
812 struct drm_device *dev = inode->i_private;
813 drm_i915_private_t *dev_priv = dev->dev_private;
814 struct i915_error_state_file_priv *error_priv;
815 unsigned long flags;
816
817 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
818 if (!error_priv)
819 return -ENOMEM;
820
821 error_priv->dev = dev;
822
823 spin_lock_irqsave(&dev_priv->error_lock, flags);
824 error_priv->error = dev_priv->first_error;
825 if (error_priv->error)
826 kref_get(&error_priv->error->ref);
832 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 827 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
833 828
834 return 0; 829 return single_open(file, i915_error_state, error_priv);
830}
831
832static int i915_error_state_release(struct inode *inode, struct file *file)
833{
834 struct seq_file *m = file->private_data;
835 struct i915_error_state_file_priv *error_priv = m->private;
836
837 if (error_priv->error)
838 kref_put(&error_priv->error->ref, i915_error_state_free);
839 kfree(error_priv);
840
841 return single_release(inode, file);
835} 842}
836 843
844static const struct file_operations i915_error_state_fops = {
845 .owner = THIS_MODULE,
846 .open = i915_error_state_open,
847 .read = seq_read,
848 .write = i915_error_state_write,
849 .llseek = default_llseek,
850 .release = i915_error_state_release,
851};
852
837static int i915_rstdby_delays(struct seq_file *m, void *unused) 853static int i915_rstdby_delays(struct seq_file *m, void *unused)
838{ 854{
839 struct drm_info_node *node = (struct drm_info_node *) m->private; 855 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1132,6 +1148,17 @@ static int gen6_drpc_info(struct seq_file *m)
1132 1148
1133 seq_printf(m, "Core Power Down: %s\n", 1149 seq_printf(m, "Core Power Down: %s\n",
1134 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1150 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1151
1152 /* Not exactly sure what this is */
1153 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1154 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1155 seq_printf(m, "RC6 residency since boot: %u\n",
1156 I915_READ(GEN6_GT_GFX_RC6));
1157 seq_printf(m, "RC6+ residency since boot: %u\n",
1158 I915_READ(GEN6_GT_GFX_RC6p));
1159 seq_printf(m, "RC6++ residency since boot: %u\n",
1160 I915_READ(GEN6_GT_GFX_RC6pp));
1161
1135 return 0; 1162 return 0;
1136} 1163}
1137 1164
@@ -1306,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
1306 struct drm_device *dev = node->minor->dev; 1333 struct drm_device *dev = node->minor->dev;
1307 drm_i915_private_t *dev_priv = dev->dev_private; 1334 drm_i915_private_t *dev_priv = dev->dev_private;
1308 struct intel_opregion *opregion = &dev_priv->opregion; 1335 struct intel_opregion *opregion = &dev_priv->opregion;
1336 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1309 int ret; 1337 int ret;
1310 1338
1339 if (data == NULL)
1340 return -ENOMEM;
1341
1311 ret = mutex_lock_interruptible(&dev->struct_mutex); 1342 ret = mutex_lock_interruptible(&dev->struct_mutex);
1312 if (ret) 1343 if (ret)
1313 return ret; 1344 goto out;
1314 1345
1315 if (opregion->header) 1346 if (opregion->header) {
1316 seq_write(m, opregion->header, OPREGION_SIZE); 1347 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1348 seq_write(m, data, OPREGION_SIZE);
1349 }
1317 1350
1318 mutex_unlock(&dev->struct_mutex); 1351 mutex_unlock(&dev->struct_mutex);
1319 1352
1353out:
1354 kfree(data);
1320 return 0; 1355 return 0;
1321} 1356}
1322 1357
@@ -1505,6 +1540,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1505 return 0; 1540 return 0;
1506} 1541}
1507 1542
1543static int i915_dpio_info(struct seq_file *m, void *data)
1544{
1545 struct drm_info_node *node = (struct drm_info_node *) m->private;
1546 struct drm_device *dev = node->minor->dev;
1547 struct drm_i915_private *dev_priv = dev->dev_private;
1548 int ret;
1549
1550
1551 if (!IS_VALLEYVIEW(dev)) {
1552 seq_printf(m, "unsupported\n");
1553 return 0;
1554 }
1555
1556 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1557 if (ret)
1558 return ret;
1559
1560 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1561
1562 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1563 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1564 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1565 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1566
1567 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1568 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1569 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1570 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1571
1572 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1573 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1574 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1575 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1576
1577 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1578 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1579 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1580 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1581
1582 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1583 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1584
1585 mutex_unlock(&dev->mode_config.mutex);
1586
1587 return 0;
1588}
1589
1508static ssize_t 1590static ssize_t
1509i915_wedged_read(struct file *filp, 1591i915_wedged_read(struct file *filp,
1510 char __user *ubuf, 1592 char __user *ubuf,
@@ -1562,6 +1644,65 @@ static const struct file_operations i915_wedged_fops = {
1562}; 1644};
1563 1645
1564static ssize_t 1646static ssize_t
1647i915_ring_stop_read(struct file *filp,
1648 char __user *ubuf,
1649 size_t max,
1650 loff_t *ppos)
1651{
1652 struct drm_device *dev = filp->private_data;
1653 drm_i915_private_t *dev_priv = dev->dev_private;
1654 char buf[20];
1655 int len;
1656
1657 len = snprintf(buf, sizeof(buf),
1658 "0x%08x\n", dev_priv->stop_rings);
1659
1660 if (len > sizeof(buf))
1661 len = sizeof(buf);
1662
1663 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1664}
1665
1666static ssize_t
1667i915_ring_stop_write(struct file *filp,
1668 const char __user *ubuf,
1669 size_t cnt,
1670 loff_t *ppos)
1671{
1672 struct drm_device *dev = filp->private_data;
1673 struct drm_i915_private *dev_priv = dev->dev_private;
1674 char buf[20];
1675 int val = 0;
1676
1677 if (cnt > 0) {
1678 if (cnt > sizeof(buf) - 1)
1679 return -EINVAL;
1680
1681 if (copy_from_user(buf, ubuf, cnt))
1682 return -EFAULT;
1683 buf[cnt] = 0;
1684
1685 val = simple_strtoul(buf, NULL, 0);
1686 }
1687
1688 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1689
1690 mutex_lock(&dev->struct_mutex);
1691 dev_priv->stop_rings = val;
1692 mutex_unlock(&dev->struct_mutex);
1693
1694 return cnt;
1695}
1696
1697static const struct file_operations i915_ring_stop_fops = {
1698 .owner = THIS_MODULE,
1699 .open = simple_open,
1700 .read = i915_ring_stop_read,
1701 .write = i915_ring_stop_write,
1702 .llseek = default_llseek,
1703};
1704
1705static ssize_t
1565i915_max_freq_read(struct file *filp, 1706i915_max_freq_read(struct file *filp,
1566 char __user *ubuf, 1707 char __user *ubuf,
1567 size_t max, 1708 size_t max,
@@ -1738,7 +1879,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
1738 return 0; 1879 return 0;
1739} 1880}
1740 1881
1741int i915_forcewake_release(struct inode *inode, struct file *file) 1882static int i915_forcewake_release(struct inode *inode, struct file *file)
1742{ 1883{
1743 struct drm_device *dev = inode->i_private; 1884 struct drm_device *dev = inode->i_private;
1744 struct drm_i915_private *dev_priv = dev->dev_private; 1885 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1803,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
1803 {"i915_capabilities", i915_capabilities, 0}, 1944 {"i915_capabilities", i915_capabilities, 0},
1804 {"i915_gem_objects", i915_gem_object_info, 0}, 1945 {"i915_gem_objects", i915_gem_object_info, 0},
1805 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1946 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1947 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
1806 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1948 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1807 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1949 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1808 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1950 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1809 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1810 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1811 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1951 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1812 {"i915_gem_request", i915_gem_request_info, 0}, 1952 {"i915_gem_request", i915_gem_request_info, 0},
1813 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1953 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1816,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
1816 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1956 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1817 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1957 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1818 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1958 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1819 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1820 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1821 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1822 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1823 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1824 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1825 {"i915_error_state", i915_error_state, 0},
1826 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1959 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1827 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1960 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1828 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1961 {"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1839,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = {
1839 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1972 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1840 {"i915_swizzle_info", i915_swizzle_info, 0}, 1973 {"i915_swizzle_info", i915_swizzle_info, 0},
1841 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 1974 {"i915_ppgtt_info", i915_ppgtt_info, 0},
1975 {"i915_dpio", i915_dpio_info, 0},
1842}; 1976};
1843#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1977#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1844 1978
@@ -1867,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
1867 &i915_cache_sharing_fops); 2001 &i915_cache_sharing_fops);
1868 if (ret) 2002 if (ret)
1869 return ret; 2003 return ret;
2004 ret = i915_debugfs_create(minor->debugfs_root, minor,
2005 "i915_ring_stop",
2006 &i915_ring_stop_fops);
2007 if (ret)
2008 return ret;
2009
2010 ret = i915_debugfs_create(minor->debugfs_root, minor,
2011 "i915_error_state",
2012 &i915_error_state_fops);
2013 if (ret)
2014 return ret;
1870 2015
1871 return drm_debugfs_create_files(i915_debugfs_list, 2016 return drm_debugfs_create_files(i915_debugfs_list,
1872 I915_DEBUGFS_ENTRIES, 2017 I915_DEBUGFS_ENTRIES,
@@ -1885,6 +2030,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1885 1, minor); 2030 1, minor);
1886 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2031 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1887 1, minor); 2032 1, minor);
2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2034 1, minor);
1888} 2035}
1889 2036
1890#endif /* CONFIG_DEBUG_FS */ 2037#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba60f3c8f911..f94792626b94 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "drmP.h" 31#include "drmP.h"
30#include "drm.h" 32#include "drm.h"
31#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
@@ -34,15 +36,62 @@
34#include "i915_drm.h" 36#include "i915_drm.h"
35#include "i915_drv.h" 37#include "i915_drv.h"
36#include "i915_trace.h" 38#include "i915_trace.h"
37#include "../../../platform/x86/intel_ips.h"
38#include <linux/pci.h> 39#include <linux/pci.h>
39#include <linux/vgaarb.h> 40#include <linux/vgaarb.h>
40#include <linux/acpi.h> 41#include <linux/acpi.h>
41#include <linux/pnp.h> 42#include <linux/pnp.h>
42#include <linux/vga_switcheroo.h> 43#include <linux/vga_switcheroo.h>
43#include <linux/slab.h> 44#include <linux/slab.h>
44#include <linux/module.h>
45#include <acpi/video.h> 45#include <acpi/video.h>
46#include <asm/pat.h>
47
48#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49
50#define BEGIN_LP_RING(n) \
51 intel_ring_begin(LP_RING(dev_priv), (n))
52
53#define OUT_RING(x) \
54 intel_ring_emit(LP_RING(dev_priv), x)
55
56#define ADVANCE_LP_RING() \
57 intel_ring_advance(LP_RING(dev_priv))
58
59/**
60 * Lock test for when it's just for synchronization of ring access.
61 *
62 * In that case, we don't need to do it when GEM is initialized as nobody else
63 * has access to the ring.
64 */
65#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \
68} while (0)
69
70static inline u32
71intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
72{
73 if (I915_NEED_GFX_HWS(dev_priv->dev))
74 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
75 else
76 return intel_read_status_page(LP_RING(dev_priv), reg);
77}
78
79#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
81#define I915_BREADCRUMB_INDEX 0x21
82
83void i915_update_dri1_breadcrumb(struct drm_device *dev)
84{
85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv;
87
88 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv)
91 master_priv->sarea_priv->last_dispatch =
92 READ_BREADCRUMB(dev_priv);
93 }
94}
46 95
47static void i915_write_hws_pga(struct drm_device *dev) 96static void i915_write_hws_pga(struct drm_device *dev)
48{ 97{
@@ -97,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
97 146
98 if (ring->status_page.gfx_addr) { 147 if (ring->status_page.gfx_addr) {
99 ring->status_page.gfx_addr = 0; 148 ring->status_page.gfx_addr = 0;
100 drm_core_ioremapfree(&dev_priv->hws_map, dev); 149 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
101 } 150 }
102 151
103 /* Need to rewrite hardware status page */ 152 /* Need to rewrite hardware status page */
@@ -195,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
195 244
196 /* Allow hardware batchbuffers unless told otherwise. 245 /* Allow hardware batchbuffers unless told otherwise.
197 */ 246 */
198 dev_priv->allow_batchbuffer = 1; 247 dev_priv->dri1.allow_batchbuffer = 1;
199 248
200 return 0; 249 return 0;
201} 250}
@@ -207,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
207 256
208 DRM_DEBUG_DRIVER("%s\n", __func__); 257 DRM_DEBUG_DRIVER("%s\n", __func__);
209 258
210 if (ring->map.handle == NULL) { 259 if (ring->virtual_start == NULL) {
211 DRM_ERROR("can not ioremap virtual address for" 260 DRM_ERROR("can not ioremap virtual address for"
212 " ring buffer\n"); 261 " ring buffer\n");
213 return -ENOMEM; 262 return -ENOMEM;
@@ -236,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
236 drm_i915_init_t *init = data; 285 drm_i915_init_t *init = data;
237 int retcode = 0; 286 int retcode = 0;
238 287
288 if (drm_core_check_feature(dev, DRIVER_MODESET))
289 return -ENODEV;
290
239 switch (init->func) { 291 switch (init->func) {
240 case I915_INIT_DMA: 292 case I915_INIT_DMA:
241 retcode = i915_initialize(dev, init); 293 retcode = i915_initialize(dev, init);
@@ -578,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
578{ 630{
579 int ret; 631 int ret;
580 632
633 if (drm_core_check_feature(dev, DRIVER_MODESET))
634 return -ENODEV;
635
581 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 636 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
582 637
583 mutex_lock(&dev->struct_mutex); 638 mutex_lock(&dev->struct_mutex);
@@ -598,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
598 int ret; 653 int ret;
599 struct drm_clip_rect *cliprects = NULL; 654 struct drm_clip_rect *cliprects = NULL;
600 655
601 if (!dev_priv->allow_batchbuffer) { 656 if (drm_core_check_feature(dev, DRIVER_MODESET))
657 return -ENODEV;
658
659 if (!dev_priv->dri1.allow_batchbuffer) {
602 DRM_ERROR("Batchbuffer ioctl disabled\n"); 660 DRM_ERROR("Batchbuffer ioctl disabled\n");
603 return -EINVAL; 661 return -EINVAL;
604 } 662 }
@@ -655,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
655 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 713 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
656 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 714 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
657 715
716 if (drm_core_check_feature(dev, DRIVER_MODESET))
717 return -ENODEV;
718
658 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 719 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
659 720
660 if (cmdbuf->num_cliprects < 0) 721 if (cmdbuf->num_cliprects < 0)
@@ -706,11 +767,166 @@ fail_batch_free:
706 return ret; 767 return ret;
707} 768}
708 769
770static int i915_emit_irq(struct drm_device * dev)
771{
772 drm_i915_private_t *dev_priv = dev->dev_private;
773 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
774
775 i915_kernel_lost_context(dev);
776
777 DRM_DEBUG_DRIVER("\n");
778
779 dev_priv->counter++;
780 if (dev_priv->counter > 0x7FFFFFFFUL)
781 dev_priv->counter = 1;
782 if (master_priv->sarea_priv)
783 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
784
785 if (BEGIN_LP_RING(4) == 0) {
786 OUT_RING(MI_STORE_DWORD_INDEX);
787 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
788 OUT_RING(dev_priv->counter);
789 OUT_RING(MI_USER_INTERRUPT);
790 ADVANCE_LP_RING();
791 }
792
793 return dev_priv->counter;
794}
795
796static int i915_wait_irq(struct drm_device * dev, int irq_nr)
797{
798 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
799 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
800 int ret = 0;
801 struct intel_ring_buffer *ring = LP_RING(dev_priv);
802
803 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
804 READ_BREADCRUMB(dev_priv));
805
806 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
807 if (master_priv->sarea_priv)
808 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
809 return 0;
810 }
811
812 if (master_priv->sarea_priv)
813 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
814
815 if (ring->irq_get(ring)) {
816 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
817 READ_BREADCRUMB(dev_priv) >= irq_nr);
818 ring->irq_put(ring);
819 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
820 ret = -EBUSY;
821
822 if (ret == -EBUSY) {
823 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
824 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
825 }
826
827 return ret;
828}
829
830/* Needs the lock as it touches the ring.
831 */
832static int i915_irq_emit(struct drm_device *dev, void *data,
833 struct drm_file *file_priv)
834{
835 drm_i915_private_t *dev_priv = dev->dev_private;
836 drm_i915_irq_emit_t *emit = data;
837 int result;
838
839 if (drm_core_check_feature(dev, DRIVER_MODESET))
840 return -ENODEV;
841
842 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
843 DRM_ERROR("called with no initialization\n");
844 return -EINVAL;
845 }
846
847 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
848
849 mutex_lock(&dev->struct_mutex);
850 result = i915_emit_irq(dev);
851 mutex_unlock(&dev->struct_mutex);
852
853 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
854 DRM_ERROR("copy_to_user\n");
855 return -EFAULT;
856 }
857
858 return 0;
859}
860
861/* Doesn't need the hardware lock.
862 */
863static int i915_irq_wait(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865{
866 drm_i915_private_t *dev_priv = dev->dev_private;
867 drm_i915_irq_wait_t *irqwait = data;
868
869 if (drm_core_check_feature(dev, DRIVER_MODESET))
870 return -ENODEV;
871
872 if (!dev_priv) {
873 DRM_ERROR("called with no initialization\n");
874 return -EINVAL;
875 }
876
877 return i915_wait_irq(dev, irqwait->irq_seq);
878}
879
880static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 drm_i915_private_t *dev_priv = dev->dev_private;
884 drm_i915_vblank_pipe_t *pipe = data;
885
886 if (drm_core_check_feature(dev, DRIVER_MODESET))
887 return -ENODEV;
888
889 if (!dev_priv) {
890 DRM_ERROR("called with no initialization\n");
891 return -EINVAL;
892 }
893
894 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
895
896 return 0;
897}
898
899/**
900 * Schedule buffer swap at given vertical blank.
901 */
902static int i915_vblank_swap(struct drm_device *dev, void *data,
903 struct drm_file *file_priv)
904{
905 /* The delayed swap mechanism was fundamentally racy, and has been
906 * removed. The model was that the client requested a delayed flip/swap
907 * from the kernel, then waited for vblank before continuing to perform
908 * rendering. The problem was that the kernel might wake the client
909 * up before it dispatched the vblank swap (since the lock has to be
910 * held while touching the ringbuffer), in which case the client would
911 * clear and start the next frame before the swap occurred, and
912 * flicker would occur in addition to likely missing the vblank.
913 *
914 * In the absence of this ioctl, userland falls back to a correct path
915 * of waiting for a vblank, then dispatching the swap on its own.
916 * Context switching to userland and back is plenty fast enough for
917 * meeting the requirements of vblank swapping.
918 */
919 return -EINVAL;
920}
921
709static int i915_flip_bufs(struct drm_device *dev, void *data, 922static int i915_flip_bufs(struct drm_device *dev, void *data,
710 struct drm_file *file_priv) 923 struct drm_file *file_priv)
711{ 924{
712 int ret; 925 int ret;
713 926
927 if (drm_core_check_feature(dev, DRIVER_MODESET))
928 return -ENODEV;
929
714 DRM_DEBUG_DRIVER("%s\n", __func__); 930 DRM_DEBUG_DRIVER("%s\n", __func__);
715 931
716 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 932 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -739,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
739 value = dev->pdev->irq ? 1 : 0; 955 value = dev->pdev->irq ? 1 : 0;
740 break; 956 break;
741 case I915_PARAM_ALLOW_BATCHBUFFER: 957 case I915_PARAM_ALLOW_BATCHBUFFER:
742 value = dev_priv->allow_batchbuffer ? 1 : 0; 958 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
743 break; 959 break;
744 case I915_PARAM_LAST_DISPATCH: 960 case I915_PARAM_LAST_DISPATCH:
745 value = READ_BREADCRUMB(dev_priv); 961 value = READ_BREADCRUMB(dev_priv);
@@ -748,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
748 value = dev->pci_device; 964 value = dev->pci_device;
749 break; 965 break;
750 case I915_PARAM_HAS_GEM: 966 case I915_PARAM_HAS_GEM:
751 value = dev_priv->has_gem; 967 value = 1;
752 break; 968 break;
753 case I915_PARAM_NUM_FENCES_AVAIL: 969 case I915_PARAM_NUM_FENCES_AVAIL:
754 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 970 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -761,13 +977,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
761 break; 977 break;
762 case I915_PARAM_HAS_EXECBUF2: 978 case I915_PARAM_HAS_EXECBUF2:
763 /* depends on GEM */ 979 /* depends on GEM */
764 value = dev_priv->has_gem; 980 value = 1;
765 break; 981 break;
766 case I915_PARAM_HAS_BSD: 982 case I915_PARAM_HAS_BSD:
767 value = HAS_BSD(dev); 983 value = intel_ring_initialized(&dev_priv->ring[VCS]);
768 break; 984 break;
769 case I915_PARAM_HAS_BLT: 985 case I915_PARAM_HAS_BLT:
770 value = HAS_BLT(dev); 986 value = intel_ring_initialized(&dev_priv->ring[BCS]);
771 break; 987 break;
772 case I915_PARAM_HAS_RELAXED_FENCING: 988 case I915_PARAM_HAS_RELAXED_FENCING:
773 value = 1; 989 value = 1;
@@ -787,6 +1003,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
787 case I915_PARAM_HAS_LLC: 1003 case I915_PARAM_HAS_LLC:
788 value = HAS_LLC(dev); 1004 value = HAS_LLC(dev);
789 break; 1005 break;
1006 case I915_PARAM_HAS_ALIASING_PPGTT:
1007 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1008 break;
790 default: 1009 default:
791 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1010 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
792 param->param); 1011 param->param);
@@ -816,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
816 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 1035 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
817 break; 1036 break;
818 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 1037 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
819 dev_priv->tex_lru_log_granularity = param->value;
820 break; 1038 break;
821 case I915_SETPARAM_ALLOW_BATCHBUFFER: 1039 case I915_SETPARAM_ALLOW_BATCHBUFFER:
822 dev_priv->allow_batchbuffer = param->value; 1040 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
823 break; 1041 break;
824 case I915_SETPARAM_NUM_USED_FENCES: 1042 case I915_SETPARAM_NUM_USED_FENCES:
825 if (param->value > dev_priv->num_fence_regs || 1043 if (param->value > dev_priv->num_fence_regs ||
@@ -844,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
844 drm_i915_hws_addr_t *hws = data; 1062 drm_i915_hws_addr_t *hws = data;
845 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1063 struct intel_ring_buffer *ring = LP_RING(dev_priv);
846 1064
1065 if (drm_core_check_feature(dev, DRIVER_MODESET))
1066 return -ENODEV;
1067
847 if (!I915_NEED_GFX_HWS(dev)) 1068 if (!I915_NEED_GFX_HWS(dev))
848 return -EINVAL; 1069 return -EINVAL;
849 1070
@@ -861,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
861 1082
862 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1083 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
863 1084
864 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 1085 dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
865 dev_priv->hws_map.size = 4*1024; 1086 4096);
866 dev_priv->hws_map.type = 0; 1087 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
867 dev_priv->hws_map.flags = 0;
868 dev_priv->hws_map.mtrr = 0;
869
870 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
871 if (dev_priv->hws_map.handle == NULL) {
872 i915_dma_cleanup(dev); 1088 i915_dma_cleanup(dev);
873 ring->status_page.gfx_addr = 0; 1089 ring->status_page.gfx_addr = 0;
874 DRM_ERROR("can not ioremap virtual address for" 1090 DRM_ERROR("can not ioremap virtual address for"
875 " G33 hw status page\n"); 1091 " G33 hw status page\n");
876 return -ENOMEM; 1092 return -ENOMEM;
877 } 1093 }
878 ring->status_page.page_addr = 1094
879 (void __force __iomem *)dev_priv->hws_map.handle; 1095 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
880 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
881 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 1096 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
882 1097
883 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 1098 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1013,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
1013 release_resource(&dev_priv->mch_res); 1228 release_resource(&dev_priv->mch_res);
1014} 1229}
1015 1230
1016#define PTE_ADDRESS_MASK 0xfffff000
1017#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1018#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1019#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1020#define PTE_MAPPING_TYPE_CACHED (3 << 1)
1021#define PTE_MAPPING_TYPE_MASK (3 << 1)
1022#define PTE_VALID (1 << 0)
1023
1024/**
1025 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1026 * a physical one
1027 * @dev: drm device
1028 * @offset: address to translate
1029 *
1030 * Some chip functions require allocations from stolen space and need the
1031 * physical address of the memory in question.
1032 */
1033static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1034{
1035 struct drm_i915_private *dev_priv = dev->dev_private;
1036 struct pci_dev *pdev = dev_priv->bridge_dev;
1037 u32 base;
1038
1039#if 0
1040 /* On the machines I have tested the Graphics Base of Stolen Memory
1041 * is unreliable, so compute the base by subtracting the stolen memory
1042 * from the Top of Low Usable DRAM which is where the BIOS places
1043 * the graphics stolen memory.
1044 */
1045 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1046 /* top 32bits are reserved = 0 */
1047 pci_read_config_dword(pdev, 0xA4, &base);
1048 } else {
1049 /* XXX presume 8xx is the same as i915 */
1050 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1051 }
1052#else
1053 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1054 u16 val;
1055 pci_read_config_word(pdev, 0xb0, &val);
1056 base = val >> 4 << 20;
1057 } else {
1058 u8 val;
1059 pci_read_config_byte(pdev, 0x9c, &val);
1060 base = val >> 3 << 27;
1061 }
1062 base -= dev_priv->mm.gtt->stolen_size;
1063#endif
1064
1065 return base + offset;
1066}
1067
1068static void i915_warn_stolen(struct drm_device *dev)
1069{
1070 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1071 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1072}
1073
1074static void i915_setup_compression(struct drm_device *dev, int size)
1075{
1076 struct drm_i915_private *dev_priv = dev->dev_private;
1077 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1078 unsigned long cfb_base;
1079 unsigned long ll_base = 0;
1080
1081 /* Just in case the BIOS is doing something questionable. */
1082 intel_disable_fbc(dev);
1083
1084 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1085 if (compressed_fb)
1086 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1087 if (!compressed_fb)
1088 goto err;
1089
1090 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1091 if (!cfb_base)
1092 goto err_fb;
1093
1094 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1095 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1096 4096, 4096, 0);
1097 if (compressed_llb)
1098 compressed_llb = drm_mm_get_block(compressed_llb,
1099 4096, 4096);
1100 if (!compressed_llb)
1101 goto err_fb;
1102
1103 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1104 if (!ll_base)
1105 goto err_llb;
1106 }
1107
1108 dev_priv->cfb_size = size;
1109
1110 dev_priv->compressed_fb = compressed_fb;
1111 if (HAS_PCH_SPLIT(dev))
1112 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1113 else if (IS_GM45(dev)) {
1114 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1115 } else {
1116 I915_WRITE(FBC_CFB_BASE, cfb_base);
1117 I915_WRITE(FBC_LL_BASE, ll_base);
1118 dev_priv->compressed_llb = compressed_llb;
1119 }
1120
1121 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1122 cfb_base, ll_base, size >> 20);
1123 return;
1124
1125err_llb:
1126 drm_mm_put_block(compressed_llb);
1127err_fb:
1128 drm_mm_put_block(compressed_fb);
1129err:
1130 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1131 i915_warn_stolen(dev);
1132}
1133
1134static void i915_cleanup_compression(struct drm_device *dev)
1135{
1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137
1138 drm_mm_put_block(dev_priv->compressed_fb);
1139 if (dev_priv->compressed_llb)
1140 drm_mm_put_block(dev_priv->compressed_llb);
1141}
1142
1143/* true = enable decode, false = disable decoder */ 1231/* true = enable decode, false = disable decoder */
1144static unsigned int i915_vga_set_decode(void *cookie, bool state) 1232static unsigned int i915_vga_set_decode(void *cookie, bool state)
1145{ 1233{
@@ -1158,14 +1246,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1158 struct drm_device *dev = pci_get_drvdata(pdev); 1246 struct drm_device *dev = pci_get_drvdata(pdev);
1159 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1247 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1160 if (state == VGA_SWITCHEROO_ON) { 1248 if (state == VGA_SWITCHEROO_ON) {
1161 printk(KERN_INFO "i915: switched on\n"); 1249 pr_info("switched on\n");
1162 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1250 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1163 /* i915 resume handler doesn't set to D0 */ 1251 /* i915 resume handler doesn't set to D0 */
1164 pci_set_power_state(dev->pdev, PCI_D0); 1252 pci_set_power_state(dev->pdev, PCI_D0);
1165 i915_resume(dev); 1253 i915_resume(dev);
1166 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1254 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1167 } else { 1255 } else {
1168 printk(KERN_ERR "i915: switched off\n"); 1256 pr_err("switched off\n");
1169 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1257 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1170 i915_suspend(dev, pmm); 1258 i915_suspend(dev, pmm);
1171 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1259 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1183,88 +1271,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1183 return can_switch; 1271 return can_switch;
1184} 1272}
1185 1273
1186static bool 1274static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1187intel_enable_ppgtt(struct drm_device *dev) 1275 .set_gpu_state = i915_switcheroo_set_state,
1188{ 1276 .reprobe = NULL,
1189 if (i915_enable_ppgtt >= 0) 1277 .can_switch = i915_switcheroo_can_switch,
1190 return i915_enable_ppgtt; 1278};
1191
1192#ifdef CONFIG_INTEL_IOMMU
1193 /* Disable ppgtt on SNB if VT-d is on. */
1194 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1195 return false;
1196#endif
1197
1198 return true;
1199}
1200
1201static int i915_load_gem_init(struct drm_device *dev)
1202{
1203 struct drm_i915_private *dev_priv = dev->dev_private;
1204 unsigned long prealloc_size, gtt_size, mappable_size;
1205 int ret;
1206
1207 prealloc_size = dev_priv->mm.gtt->stolen_size;
1208 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1209 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1210
1211 /* Basic memrange allocator for stolen space */
1212 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1213
1214 mutex_lock(&dev->struct_mutex);
1215 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1216 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1217 * aperture accordingly when using aliasing ppgtt. */
1218 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
1219 /* For paranoia keep the guard page in between. */
1220 gtt_size -= PAGE_SIZE;
1221
1222 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
1223
1224 ret = i915_gem_init_aliasing_ppgtt(dev);
1225 if (ret) {
1226 mutex_unlock(&dev->struct_mutex);
1227 return ret;
1228 }
1229 } else {
1230 /* Let GEM Manage all of the aperture.
1231 *
1232 * However, leave one page at the end still bound to the scratch
1233 * page. There are a number of places where the hardware
1234 * apparently prefetches past the end of the object, and we've
1235 * seen multiple hangs with the GPU head pointer stuck in a
1236 * batchbuffer bound at the last page of the aperture. One page
1237 * should be enough to keep any prefetching inside of the
1238 * aperture.
1239 */
1240 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1241 }
1242
1243 ret = i915_gem_init_hw(dev);
1244 mutex_unlock(&dev->struct_mutex);
1245 if (ret) {
1246 i915_gem_cleanup_aliasing_ppgtt(dev);
1247 return ret;
1248 }
1249
1250 /* Try to set up FBC with a reasonable compressed buffer size */
1251 if (I915_HAS_FBC(dev) && i915_powersave) {
1252 int cfb_size;
1253
1254 /* Leave 1M for line length buffer & misc. */
1255
1256 /* Try to get a 32M buffer... */
1257 if (prealloc_size > (36*1024*1024))
1258 cfb_size = 32*1024*1024;
1259 else /* fall back to 7/8 of the stolen space */
1260 cfb_size = prealloc_size * 7 / 8;
1261 i915_setup_compression(dev, cfb_size);
1262 }
1263
1264 /* Allow hardware batchbuffers unless told otherwise. */
1265 dev_priv->allow_batchbuffer = 1;
1266 return 0;
1267}
1268 1279
1269static int i915_load_modeset_init(struct drm_device *dev) 1280static int i915_load_modeset_init(struct drm_device *dev)
1270{ 1281{
@@ -1288,22 +1299,22 @@ static int i915_load_modeset_init(struct drm_device *dev)
1288 1299
1289 intel_register_dsm_handler(); 1300 intel_register_dsm_handler();
1290 1301
1291 ret = vga_switcheroo_register_client(dev->pdev, 1302 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1292 i915_switcheroo_set_state,
1293 NULL,
1294 i915_switcheroo_can_switch);
1295 if (ret) 1303 if (ret)
1296 goto cleanup_vga_client; 1304 goto cleanup_vga_client;
1297 1305
1298 /* IIR "flip pending" bit means done if this bit is set */ 1306 /* Initialise stolen first so that we may reserve preallocated
1299 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1307 * objects for the BIOS to KMS transition.
1300 dev_priv->flip_pending_is_done = true; 1308 */
1309 ret = i915_gem_init_stolen(dev);
1310 if (ret)
1311 goto cleanup_vga_switcheroo;
1301 1312
1302 intel_modeset_init(dev); 1313 intel_modeset_init(dev);
1303 1314
1304 ret = i915_load_gem_init(dev); 1315 ret = i915_gem_init(dev);
1305 if (ret) 1316 if (ret)
1306 goto cleanup_vga_switcheroo; 1317 goto cleanup_gem_stolen;
1307 1318
1308 intel_modeset_gem_init(dev); 1319 intel_modeset_gem_init(dev);
1309 1320
@@ -1333,6 +1344,8 @@ cleanup_gem:
1333 i915_gem_cleanup_ringbuffer(dev); 1344 i915_gem_cleanup_ringbuffer(dev);
1334 mutex_unlock(&dev->struct_mutex); 1345 mutex_unlock(&dev->struct_mutex);
1335 i915_gem_cleanup_aliasing_ppgtt(dev); 1346 i915_gem_cleanup_aliasing_ppgtt(dev);
1347cleanup_gem_stolen:
1348 i915_gem_cleanup_stolen(dev);
1336cleanup_vga_switcheroo: 1349cleanup_vga_switcheroo:
1337 vga_switcheroo_unregister_client(dev->pdev); 1350 vga_switcheroo_unregister_client(dev->pdev);
1338cleanup_vga_client: 1351cleanup_vga_client:
@@ -1365,572 +1378,26 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1365 master->driver_priv = NULL; 1378 master->driver_priv = NULL;
1366} 1379}
1367 1380
1368static void i915_pineview_get_mem_freq(struct drm_device *dev) 1381static void
1369{ 1382i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1370 drm_i915_private_t *dev_priv = dev->dev_private; 1383 unsigned long size)
1371 u32 tmp;
1372
1373 tmp = I915_READ(CLKCFG);
1374
1375 switch (tmp & CLKCFG_FSB_MASK) {
1376 case CLKCFG_FSB_533:
1377 dev_priv->fsb_freq = 533; /* 133*4 */
1378 break;
1379 case CLKCFG_FSB_800:
1380 dev_priv->fsb_freq = 800; /* 200*4 */
1381 break;
1382 case CLKCFG_FSB_667:
1383 dev_priv->fsb_freq = 667; /* 167*4 */
1384 break;
1385 case CLKCFG_FSB_400:
1386 dev_priv->fsb_freq = 400; /* 100*4 */
1387 break;
1388 }
1389
1390 switch (tmp & CLKCFG_MEM_MASK) {
1391 case CLKCFG_MEM_533:
1392 dev_priv->mem_freq = 533;
1393 break;
1394 case CLKCFG_MEM_667:
1395 dev_priv->mem_freq = 667;
1396 break;
1397 case CLKCFG_MEM_800:
1398 dev_priv->mem_freq = 800;
1399 break;
1400 }
1401
1402 /* detect pineview DDR3 setting */
1403 tmp = I915_READ(CSHRDDR3CTL);
1404 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1405}
1406
1407static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1408{
1409 drm_i915_private_t *dev_priv = dev->dev_private;
1410 u16 ddrpll, csipll;
1411
1412 ddrpll = I915_READ16(DDRMPLL1);
1413 csipll = I915_READ16(CSIPLL0);
1414
1415 switch (ddrpll & 0xff) {
1416 case 0xc:
1417 dev_priv->mem_freq = 800;
1418 break;
1419 case 0x10:
1420 dev_priv->mem_freq = 1066;
1421 break;
1422 case 0x14:
1423 dev_priv->mem_freq = 1333;
1424 break;
1425 case 0x18:
1426 dev_priv->mem_freq = 1600;
1427 break;
1428 default:
1429 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1430 ddrpll & 0xff);
1431 dev_priv->mem_freq = 0;
1432 break;
1433 }
1434
1435 dev_priv->r_t = dev_priv->mem_freq;
1436
1437 switch (csipll & 0x3ff) {
1438 case 0x00c:
1439 dev_priv->fsb_freq = 3200;
1440 break;
1441 case 0x00e:
1442 dev_priv->fsb_freq = 3733;
1443 break;
1444 case 0x010:
1445 dev_priv->fsb_freq = 4266;
1446 break;
1447 case 0x012:
1448 dev_priv->fsb_freq = 4800;
1449 break;
1450 case 0x014:
1451 dev_priv->fsb_freq = 5333;
1452 break;
1453 case 0x016:
1454 dev_priv->fsb_freq = 5866;
1455 break;
1456 case 0x018:
1457 dev_priv->fsb_freq = 6400;
1458 break;
1459 default:
1460 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1461 csipll & 0x3ff);
1462 dev_priv->fsb_freq = 0;
1463 break;
1464 }
1465
1466 if (dev_priv->fsb_freq == 3200) {
1467 dev_priv->c_m = 0;
1468 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1469 dev_priv->c_m = 1;
1470 } else {
1471 dev_priv->c_m = 2;
1472 }
1473}
1474
1475static const struct cparams {
1476 u16 i;
1477 u16 t;
1478 u16 m;
1479 u16 c;
1480} cparams[] = {
1481 { 1, 1333, 301, 28664 },
1482 { 1, 1066, 294, 24460 },
1483 { 1, 800, 294, 25192 },
1484 { 0, 1333, 276, 27605 },
1485 { 0, 1066, 276, 27605 },
1486 { 0, 800, 231, 23784 },
1487};
1488
1489unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1490{
1491 u64 total_count, diff, ret;
1492 u32 count1, count2, count3, m = 0, c = 0;
1493 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1494 int i;
1495
1496 diff1 = now - dev_priv->last_time1;
1497
1498 /* Prevent division-by-zero if we are asking too fast.
1499 * Also, we don't get interesting results if we are polling
1500 * faster than once in 10ms, so just return the saved value
1501 * in such cases.
1502 */
1503 if (diff1 <= 10)
1504 return dev_priv->chipset_power;
1505
1506 count1 = I915_READ(DMIEC);
1507 count2 = I915_READ(DDREC);
1508 count3 = I915_READ(CSIEC);
1509
1510 total_count = count1 + count2 + count3;
1511
1512 /* FIXME: handle per-counter overflow */
1513 if (total_count < dev_priv->last_count1) {
1514 diff = ~0UL - dev_priv->last_count1;
1515 diff += total_count;
1516 } else {
1517 diff = total_count - dev_priv->last_count1;
1518 }
1519
1520 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1521 if (cparams[i].i == dev_priv->c_m &&
1522 cparams[i].t == dev_priv->r_t) {
1523 m = cparams[i].m;
1524 c = cparams[i].c;
1525 break;
1526 }
1527 }
1528
1529 diff = div_u64(diff, diff1);
1530 ret = ((m * diff) + c);
1531 ret = div_u64(ret, 10);
1532
1533 dev_priv->last_count1 = total_count;
1534 dev_priv->last_time1 = now;
1535
1536 dev_priv->chipset_power = ret;
1537
1538 return ret;
1539}
1540
1541unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1542{
1543 unsigned long m, x, b;
1544 u32 tsfs;
1545
1546 tsfs = I915_READ(TSFS);
1547
1548 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1549 x = I915_READ8(TR1);
1550
1551 b = tsfs & TSFS_INTR_MASK;
1552
1553 return ((m * x) / 127) - b;
1554}
1555
1556static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1557{
1558 static const struct v_table {
1559 u16 vd; /* in .1 mil */
1560 u16 vm; /* in .1 mil */
1561 } v_table[] = {
1562 { 0, 0, },
1563 { 375, 0, },
1564 { 500, 0, },
1565 { 625, 0, },
1566 { 750, 0, },
1567 { 875, 0, },
1568 { 1000, 0, },
1569 { 1125, 0, },
1570 { 4125, 3000, },
1571 { 4125, 3000, },
1572 { 4125, 3000, },
1573 { 4125, 3000, },
1574 { 4125, 3000, },
1575 { 4125, 3000, },
1576 { 4125, 3000, },
1577 { 4125, 3000, },
1578 { 4125, 3000, },
1579 { 4125, 3000, },
1580 { 4125, 3000, },
1581 { 4125, 3000, },
1582 { 4125, 3000, },
1583 { 4125, 3000, },
1584 { 4125, 3000, },
1585 { 4125, 3000, },
1586 { 4125, 3000, },
1587 { 4125, 3000, },
1588 { 4125, 3000, },
1589 { 4125, 3000, },
1590 { 4125, 3000, },
1591 { 4125, 3000, },
1592 { 4125, 3000, },
1593 { 4125, 3000, },
1594 { 4250, 3125, },
1595 { 4375, 3250, },
1596 { 4500, 3375, },
1597 { 4625, 3500, },
1598 { 4750, 3625, },
1599 { 4875, 3750, },
1600 { 5000, 3875, },
1601 { 5125, 4000, },
1602 { 5250, 4125, },
1603 { 5375, 4250, },
1604 { 5500, 4375, },
1605 { 5625, 4500, },
1606 { 5750, 4625, },
1607 { 5875, 4750, },
1608 { 6000, 4875, },
1609 { 6125, 5000, },
1610 { 6250, 5125, },
1611 { 6375, 5250, },
1612 { 6500, 5375, },
1613 { 6625, 5500, },
1614 { 6750, 5625, },
1615 { 6875, 5750, },
1616 { 7000, 5875, },
1617 { 7125, 6000, },
1618 { 7250, 6125, },
1619 { 7375, 6250, },
1620 { 7500, 6375, },
1621 { 7625, 6500, },
1622 { 7750, 6625, },
1623 { 7875, 6750, },
1624 { 8000, 6875, },
1625 { 8125, 7000, },
1626 { 8250, 7125, },
1627 { 8375, 7250, },
1628 { 8500, 7375, },
1629 { 8625, 7500, },
1630 { 8750, 7625, },
1631 { 8875, 7750, },
1632 { 9000, 7875, },
1633 { 9125, 8000, },
1634 { 9250, 8125, },
1635 { 9375, 8250, },
1636 { 9500, 8375, },
1637 { 9625, 8500, },
1638 { 9750, 8625, },
1639 { 9875, 8750, },
1640 { 10000, 8875, },
1641 { 10125, 9000, },
1642 { 10250, 9125, },
1643 { 10375, 9250, },
1644 { 10500, 9375, },
1645 { 10625, 9500, },
1646 { 10750, 9625, },
1647 { 10875, 9750, },
1648 { 11000, 9875, },
1649 { 11125, 10000, },
1650 { 11250, 10125, },
1651 { 11375, 10250, },
1652 { 11500, 10375, },
1653 { 11625, 10500, },
1654 { 11750, 10625, },
1655 { 11875, 10750, },
1656 { 12000, 10875, },
1657 { 12125, 11000, },
1658 { 12250, 11125, },
1659 { 12375, 11250, },
1660 { 12500, 11375, },
1661 { 12625, 11500, },
1662 { 12750, 11625, },
1663 { 12875, 11750, },
1664 { 13000, 11875, },
1665 { 13125, 12000, },
1666 { 13250, 12125, },
1667 { 13375, 12250, },
1668 { 13500, 12375, },
1669 { 13625, 12500, },
1670 { 13750, 12625, },
1671 { 13875, 12750, },
1672 { 14000, 12875, },
1673 { 14125, 13000, },
1674 { 14250, 13125, },
1675 { 14375, 13250, },
1676 { 14500, 13375, },
1677 { 14625, 13500, },
1678 { 14750, 13625, },
1679 { 14875, 13750, },
1680 { 15000, 13875, },
1681 { 15125, 14000, },
1682 { 15250, 14125, },
1683 { 15375, 14250, },
1684 { 15500, 14375, },
1685 { 15625, 14500, },
1686 { 15750, 14625, },
1687 { 15875, 14750, },
1688 { 16000, 14875, },
1689 { 16125, 15000, },
1690 };
1691 if (dev_priv->info->is_mobile)
1692 return v_table[pxvid].vm;
1693 else
1694 return v_table[pxvid].vd;
1695}
1696
1697void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1698{ 1384{
1699 struct timespec now, diff1; 1385 dev_priv->mm.gtt_mtrr = -1;
1700 u64 diff;
1701 unsigned long diffms;
1702 u32 count;
1703
1704 if (dev_priv->info->gen != 5)
1705 return;
1706
1707 getrawmonotonic(&now);
1708 diff1 = timespec_sub(now, dev_priv->last_time2);
1709 1386
1710 /* Don't divide by 0 */ 1387#if defined(CONFIG_X86_PAT)
1711 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1388 if (cpu_has_pat)
1712 if (!diffms)
1713 return; 1389 return;
1390#endif
1714 1391
1715 count = I915_READ(GFXEC); 1392 /* Set up a WC MTRR for non-PAT systems. This is more common than
1716 1393 * one would think, because the kernel disables PAT on first
1717 if (count < dev_priv->last_count2) { 1394 * generation Core chips because WC PAT gets overridden by a UC
1718 diff = ~0UL - dev_priv->last_count2; 1395 * MTRR if present. Even if a UC MTRR isn't present.
1719 diff += count; 1396 */
1720 } else { 1397 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
1721 diff = count - dev_priv->last_count2; 1398 if (dev_priv->mm.gtt_mtrr < 0) {
1722 } 1399 DRM_INFO("MTRR allocation failed. Graphics "
1723 1400 "performance may suffer.\n");
1724 dev_priv->last_count2 = count;
1725 dev_priv->last_time2 = now;
1726
1727 /* More magic constants... */
1728 diff = diff * 1181;
1729 diff = div_u64(diff, diffms * 10);
1730 dev_priv->gfx_power = diff;
1731}
1732
1733unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1734{
1735 unsigned long t, corr, state1, corr2, state2;
1736 u32 pxvid, ext_v;
1737
1738 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1739 pxvid = (pxvid >> 24) & 0x7f;
1740 ext_v = pvid_to_extvid(dev_priv, pxvid);
1741
1742 state1 = ext_v;
1743
1744 t = i915_mch_val(dev_priv);
1745
1746 /* Revel in the empirically derived constants */
1747
1748 /* Correction factor in 1/100000 units */
1749 if (t > 80)
1750 corr = ((t * 2349) + 135940);
1751 else if (t >= 50)
1752 corr = ((t * 964) + 29317);
1753 else /* < 50 */
1754 corr = ((t * 301) + 1004);
1755
1756 corr = corr * ((150142 * state1) / 10000 - 78642);
1757 corr /= 100000;
1758 corr2 = (corr * dev_priv->corr);
1759
1760 state2 = (corr2 * state1) / 10000;
1761 state2 /= 100; /* convert to mW */
1762
1763 i915_update_gfx_val(dev_priv);
1764
1765 return dev_priv->gfx_power + state2;
1766}
1767
1768/* Global for IPS driver to get at the current i915 device */
1769static struct drm_i915_private *i915_mch_dev;
1770/*
1771 * Lock protecting IPS related data structures
1772 * - i915_mch_dev
1773 * - dev_priv->max_delay
1774 * - dev_priv->min_delay
1775 * - dev_priv->fmax
1776 * - dev_priv->gpu_busy
1777 */
1778static DEFINE_SPINLOCK(mchdev_lock);
1779
1780/**
1781 * i915_read_mch_val - return value for IPS use
1782 *
1783 * Calculate and return a value for the IPS driver to use when deciding whether
1784 * we have thermal and power headroom to increase CPU or GPU power budget.
1785 */
1786unsigned long i915_read_mch_val(void)
1787{
1788 struct drm_i915_private *dev_priv;
1789 unsigned long chipset_val, graphics_val, ret = 0;
1790
1791 spin_lock(&mchdev_lock);
1792 if (!i915_mch_dev)
1793 goto out_unlock;
1794 dev_priv = i915_mch_dev;
1795
1796 chipset_val = i915_chipset_val(dev_priv);
1797 graphics_val = i915_gfx_val(dev_priv);
1798
1799 ret = chipset_val + graphics_val;
1800
1801out_unlock:
1802 spin_unlock(&mchdev_lock);
1803
1804 return ret;
1805}
1806EXPORT_SYMBOL_GPL(i915_read_mch_val);
1807
1808/**
1809 * i915_gpu_raise - raise GPU frequency limit
1810 *
1811 * Raise the limit; IPS indicates we have thermal headroom.
1812 */
1813bool i915_gpu_raise(void)
1814{
1815 struct drm_i915_private *dev_priv;
1816 bool ret = true;
1817
1818 spin_lock(&mchdev_lock);
1819 if (!i915_mch_dev) {
1820 ret = false;
1821 goto out_unlock;
1822 }
1823 dev_priv = i915_mch_dev;
1824
1825 if (dev_priv->max_delay > dev_priv->fmax)
1826 dev_priv->max_delay--;
1827
1828out_unlock:
1829 spin_unlock(&mchdev_lock);
1830
1831 return ret;
1832}
1833EXPORT_SYMBOL_GPL(i915_gpu_raise);
1834
1835/**
1836 * i915_gpu_lower - lower GPU frequency limit
1837 *
1838 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1839 * frequency maximum.
1840 */
1841bool i915_gpu_lower(void)
1842{
1843 struct drm_i915_private *dev_priv;
1844 bool ret = true;
1845
1846 spin_lock(&mchdev_lock);
1847 if (!i915_mch_dev) {
1848 ret = false;
1849 goto out_unlock;
1850 }
1851 dev_priv = i915_mch_dev;
1852
1853 if (dev_priv->max_delay < dev_priv->min_delay)
1854 dev_priv->max_delay++;
1855
1856out_unlock:
1857 spin_unlock(&mchdev_lock);
1858
1859 return ret;
1860}
1861EXPORT_SYMBOL_GPL(i915_gpu_lower);
1862
1863/**
1864 * i915_gpu_busy - indicate GPU business to IPS
1865 *
1866 * Tell the IPS driver whether or not the GPU is busy.
1867 */
1868bool i915_gpu_busy(void)
1869{
1870 struct drm_i915_private *dev_priv;
1871 bool ret = false;
1872
1873 spin_lock(&mchdev_lock);
1874 if (!i915_mch_dev)
1875 goto out_unlock;
1876 dev_priv = i915_mch_dev;
1877
1878 ret = dev_priv->busy;
1879
1880out_unlock:
1881 spin_unlock(&mchdev_lock);
1882
1883 return ret;
1884}
1885EXPORT_SYMBOL_GPL(i915_gpu_busy);
1886
1887/**
1888 * i915_gpu_turbo_disable - disable graphics turbo
1889 *
1890 * Disable graphics turbo by resetting the max frequency and setting the
1891 * current frequency to the default.
1892 */
1893bool i915_gpu_turbo_disable(void)
1894{
1895 struct drm_i915_private *dev_priv;
1896 bool ret = true;
1897
1898 spin_lock(&mchdev_lock);
1899 if (!i915_mch_dev) {
1900 ret = false;
1901 goto out_unlock;
1902 }
1903 dev_priv = i915_mch_dev;
1904
1905 dev_priv->max_delay = dev_priv->fstart;
1906
1907 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
1908 ret = false;
1909
1910out_unlock:
1911 spin_unlock(&mchdev_lock);
1912
1913 return ret;
1914}
1915EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1916
1917/**
1918 * Tells the intel_ips driver that the i915 driver is now loaded, if
1919 * IPS got loaded first.
1920 *
1921 * This awkward dance is so that neither module has to depend on the
1922 * other in order for IPS to do the appropriate communication of
1923 * GPU turbo limits to i915.
1924 */
1925static void
1926ips_ping_for_i915_load(void)
1927{
1928 void (*link)(void);
1929
1930 link = symbol_get(ips_link_to_i915_driver);
1931 if (link) {
1932 link();
1933 symbol_put(ips_link_to_i915_driver);
1934 } 1401 }
1935} 1402}
1936 1403
@@ -1948,8 +1415,16 @@ ips_ping_for_i915_load(void)
1948int i915_driver_load(struct drm_device *dev, unsigned long flags) 1415int i915_driver_load(struct drm_device *dev, unsigned long flags)
1949{ 1416{
1950 struct drm_i915_private *dev_priv; 1417 struct drm_i915_private *dev_priv;
1418 struct intel_device_info *info;
1951 int ret = 0, mmio_bar; 1419 int ret = 0, mmio_bar;
1952 uint32_t agp_size; 1420 uint32_t aperture_size;
1421
1422 info = (struct intel_device_info *) flags;
1423
1424 /* Refuse to load on gen6+ without kms enabled. */
1425 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1426 return -ENODEV;
1427
1953 1428
1954 /* i915 has 4 more counters */ 1429 /* i915 has 4 more counters */
1955 dev->counters += 4; 1430 dev->counters += 4;
@@ -1964,7 +1439,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1964 1439
1965 dev->dev_private = (void *)dev_priv; 1440 dev->dev_private = (void *)dev_priv;
1966 dev_priv->dev = dev; 1441 dev_priv->dev = dev;
1967 dev_priv->info = (struct intel_device_info *) flags; 1442 dev_priv->info = info;
1968 1443
1969 if (i915_get_bridge_dev(dev)) { 1444 if (i915_get_bridge_dev(dev)) {
1970 ret = -EIO; 1445 ret = -EIO;
@@ -2003,27 +1478,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2003 goto out_rmmap; 1478 goto out_rmmap;
2004 } 1479 }
2005 1480
2006 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1481 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
2007 1482
2008 dev_priv->mm.gtt_mapping = 1483 dev_priv->mm.gtt_mapping =
2009 io_mapping_create_wc(dev->agp->base, agp_size); 1484 io_mapping_create_wc(dev->agp->base, aperture_size);
2010 if (dev_priv->mm.gtt_mapping == NULL) { 1485 if (dev_priv->mm.gtt_mapping == NULL) {
2011 ret = -EIO; 1486 ret = -EIO;
2012 goto out_rmmap; 1487 goto out_rmmap;
2013 } 1488 }
2014 1489
2015 /* Set up a WC MTRR for non-PAT systems. This is more common than 1490 i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
2016 * one would think, because the kernel disables PAT on first
2017 * generation Core chips because WC PAT gets overridden by a UC
2018 * MTRR if present. Even if a UC MTRR isn't present.
2019 */
2020 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
2021 agp_size,
2022 MTRR_TYPE_WRCOMB, 1);
2023 if (dev_priv->mm.gtt_mtrr < 0) {
2024 DRM_INFO("MTRR allocation failed. Graphics "
2025 "performance may suffer.\n");
2026 }
2027 1491
2028 /* The i915 workqueue is primarily used for batched retirement of 1492 /* The i915 workqueue is primarily used for batched retirement of
2029 * requests (and thus managing bo) once the task has been completed 1493 * requests (and thus managing bo) once the task has been completed
@@ -2047,9 +1511,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2047 goto out_mtrrfree; 1511 goto out_mtrrfree;
2048 } 1512 }
2049 1513
2050 /* enable GEM by default */
2051 dev_priv->has_gem = 1;
2052
2053 intel_irq_init(dev); 1514 intel_irq_init(dev);
2054 1515
2055 /* Try to make sure MCHBAR is enabled before poking at it */ 1516 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -2069,11 +1530,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2069 goto out_gem_unload; 1530 goto out_gem_unload;
2070 } 1531 }
2071 1532
2072 if (IS_PINEVIEW(dev))
2073 i915_pineview_get_mem_freq(dev);
2074 else if (IS_GEN5(dev))
2075 i915_ironlake_get_mem_freq(dev);
2076
2077 /* On the 945G/GM, the chipset reports the MSI capability on the 1533 /* On the 945G/GM, the chipset reports the MSI capability on the
2078 * integrated graphics even though the support isn't actually there 1534 * integrated graphics even though the support isn't actually there
2079 * according to the published specs. It doesn't appear to function 1535 * according to the published specs. It doesn't appear to function
@@ -2093,7 +1549,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2093 spin_lock_init(&dev_priv->error_lock); 1549 spin_lock_init(&dev_priv->error_lock);
2094 spin_lock_init(&dev_priv->rps_lock); 1550 spin_lock_init(&dev_priv->rps_lock);
2095 1551
2096 if (IS_IVYBRIDGE(dev)) 1552 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2097 dev_priv->num_pipe = 3; 1553 dev_priv->num_pipe = 3;
2098 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1554 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
2099 dev_priv->num_pipe = 2; 1555 dev_priv->num_pipe = 2;
@@ -2117,6 +1573,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2117 } 1573 }
2118 } 1574 }
2119 1575
1576 i915_setup_sysfs(dev);
1577
2120 /* Must be done after probing outputs */ 1578 /* Must be done after probing outputs */
2121 intel_opregion_init(dev); 1579 intel_opregion_init(dev);
2122 acpi_video_register(); 1580 acpi_video_register();
@@ -2124,14 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2124 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1582 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2125 (unsigned long) dev); 1583 (unsigned long) dev);
2126 1584
2127 if (IS_GEN5(dev)) { 1585 if (IS_GEN5(dev))
2128 spin_lock(&mchdev_lock); 1586 intel_gpu_ips_init(dev_priv);
2129 i915_mch_dev = dev_priv;
2130 dev_priv->mchdev_lock = &mchdev_lock;
2131 spin_unlock(&mchdev_lock);
2132
2133 ips_ping_for_i915_load();
2134 }
2135 1587
2136 return 0; 1588 return 0;
2137 1589
@@ -2166,17 +1618,18 @@ int i915_driver_unload(struct drm_device *dev)
2166 struct drm_i915_private *dev_priv = dev->dev_private; 1618 struct drm_i915_private *dev_priv = dev->dev_private;
2167 int ret; 1619 int ret;
2168 1620
2169 spin_lock(&mchdev_lock); 1621 intel_gpu_ips_teardown();
2170 i915_mch_dev = NULL; 1622
2171 spin_unlock(&mchdev_lock); 1623 i915_teardown_sysfs(dev);
2172 1624
2173 if (dev_priv->mm.inactive_shrinker.shrink) 1625 if (dev_priv->mm.inactive_shrinker.shrink)
2174 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1626 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2175 1627
2176 mutex_lock(&dev->struct_mutex); 1628 mutex_lock(&dev->struct_mutex);
2177 ret = i915_gpu_idle(dev, true); 1629 ret = i915_gpu_idle(dev);
2178 if (ret) 1630 if (ret)
2179 DRM_ERROR("failed to idle hardware: %d\n", ret); 1631 DRM_ERROR("failed to idle hardware: %d\n", ret);
1632 i915_gem_retire_requests(dev);
2180 mutex_unlock(&dev->struct_mutex); 1633 mutex_unlock(&dev->struct_mutex);
2181 1634
2182 /* Cancel the retire work handler, which should be idle now. */ 1635 /* Cancel the retire work handler, which should be idle now. */
@@ -2228,8 +1681,7 @@ int i915_driver_unload(struct drm_device *dev)
2228 i915_gem_cleanup_ringbuffer(dev); 1681 i915_gem_cleanup_ringbuffer(dev);
2229 mutex_unlock(&dev->struct_mutex); 1682 mutex_unlock(&dev->struct_mutex);
2230 i915_gem_cleanup_aliasing_ppgtt(dev); 1683 i915_gem_cleanup_aliasing_ppgtt(dev);
2231 if (I915_HAS_FBC(dev) && i915_powersave) 1684 i915_gem_cleanup_stolen(dev);
2232 i915_cleanup_compression(dev);
2233 drm_mm_takedown(&dev_priv->mm.stolen); 1685 drm_mm_takedown(&dev_priv->mm.stolen);
2234 1686
2235 intel_cleanup_overlay(dev); 1687 intel_cleanup_overlay(dev);
@@ -2277,7 +1729,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2277 * mode setting case, we want to restore the kernel's initial mode (just 1729 * mode setting case, we want to restore the kernel's initial mode (just
2278 * in case the last client left us in a bad state). 1730 * in case the last client left us in a bad state).
2279 * 1731 *
2280 * Additionally, in the non-mode setting case, we'll tear down the AGP 1732 * Additionally, in the non-mode setting case, we'll tear down the GTT
2281 * and DMA structures, since the kernel won't be using them, and clea 1733 * and DMA structures, since the kernel won't be using them, and clea
2282 * up any GEM state. 1734 * up any GEM state.
2283 */ 1735 */
@@ -2322,7 +1774,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
2322 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1774 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2323 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1775 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2324 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1776 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2325 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1777 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2326 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 1778 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
2327 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1779 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2328 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1780 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -2355,16 +1807,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
2355 1807
2356int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1808int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
2357 1809
2358/** 1810/*
2359 * Determine if the device really is AGP or not. 1811 * This is really ugly: Because old userspace abused the linux agp interface to
2360 * 1812 * manage the gtt, we need to claim that all intel devices are agp. For
2361 * All Intel graphics chipsets are treated as AGP, even if they are really 1813 * otherwise the drm core refuses to initialize the agp support code.
2362 * PCI-e.
2363 *
2364 * \param dev The device to be tested.
2365 *
2366 * \returns
2367 * A value of 1 is always retured to indictate every i9x5 is AGP.
2368 */ 1814 */
2369int i915_driver_device_is_agp(struct drm_device * dev) 1815int i915_driver_device_is_agp(struct drm_device * dev)
2370{ 1816{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ae8a64f9f845..238a52165833 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
84 "Use panel (LVDS/eDP) downclocking for power savings " 84 "Use panel (LVDS/eDP) downclocking for power savings "
85 "(default: false)"); 85 "(default: false)");
86 86
87int i915_lvds_channel_mode __read_mostly;
88module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
89MODULE_PARM_DESC(lvds_channel_mode,
90 "Specify LVDS channel mode "
91 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
92
87int i915_panel_use_ssc __read_mostly = -1; 93int i915_panel_use_ssc __read_mostly = -1;
88module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 94module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
89MODULE_PARM_DESC(lvds_use_ssc, 95MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
93int i915_vbt_sdvo_panel_type __read_mostly = -1; 99int i915_vbt_sdvo_panel_type __read_mostly = -1;
94module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 100module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
95MODULE_PARM_DESC(vbt_sdvo_panel_type, 101MODULE_PARM_DESC(vbt_sdvo_panel_type,
96 "Override selection of SDVO panel mode in the VBT " 102 "Override/Ignore selection of SDVO panel mode in the VBT "
97 "(default: auto)"); 103 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
98 104
99static bool i915_try_reset __read_mostly = true; 105static bool i915_try_reset __read_mostly = true;
100module_param_named(reset, i915_try_reset, bool, 0600); 106module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
209 .gen = 5, 215 .gen = 5,
210 .need_gfx_hws = 1, .has_hotplug = 1, 216 .need_gfx_hws = 1, .has_hotplug = 1,
211 .has_bsd_ring = 1, 217 .has_bsd_ring = 1,
218 .has_pch_split = 1,
212}; 219};
213 220
214static const struct intel_device_info intel_ironlake_m_info = { 221static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
216 .need_gfx_hws = 1, .has_hotplug = 1, 223 .need_gfx_hws = 1, .has_hotplug = 1,
217 .has_fbc = 1, 224 .has_fbc = 1,
218 .has_bsd_ring = 1, 225 .has_bsd_ring = 1,
226 .has_pch_split = 1,
219}; 227};
220 228
221static const struct intel_device_info intel_sandybridge_d_info = { 229static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
224 .has_bsd_ring = 1, 232 .has_bsd_ring = 1,
225 .has_blt_ring = 1, 233 .has_blt_ring = 1,
226 .has_llc = 1, 234 .has_llc = 1,
235 .has_pch_split = 1,
227}; 236};
228 237
229static const struct intel_device_info intel_sandybridge_m_info = { 238static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +242,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
233 .has_bsd_ring = 1, 242 .has_bsd_ring = 1,
234 .has_blt_ring = 1, 243 .has_blt_ring = 1,
235 .has_llc = 1, 244 .has_llc = 1,
245 .has_pch_split = 1,
236}; 246};
237 247
238static const struct intel_device_info intel_ivybridge_d_info = { 248static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +251,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
241 .has_bsd_ring = 1, 251 .has_bsd_ring = 1,
242 .has_blt_ring = 1, 252 .has_blt_ring = 1,
243 .has_llc = 1, 253 .has_llc = 1,
254 .has_pch_split = 1,
244}; 255};
245 256
246static const struct intel_device_info intel_ivybridge_m_info = { 257static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +261,43 @@ static const struct intel_device_info intel_ivybridge_m_info = {
250 .has_bsd_ring = 1, 261 .has_bsd_ring = 1,
251 .has_blt_ring = 1, 262 .has_blt_ring = 1,
252 .has_llc = 1, 263 .has_llc = 1,
264 .has_pch_split = 1,
265};
266
267static const struct intel_device_info intel_valleyview_m_info = {
268 .gen = 7, .is_mobile = 1,
269 .need_gfx_hws = 1, .has_hotplug = 1,
270 .has_fbc = 0,
271 .has_bsd_ring = 1,
272 .has_blt_ring = 1,
273 .is_valleyview = 1,
274};
275
276static const struct intel_device_info intel_valleyview_d_info = {
277 .gen = 7,
278 .need_gfx_hws = 1, .has_hotplug = 1,
279 .has_fbc = 0,
280 .has_bsd_ring = 1,
281 .has_blt_ring = 1,
282 .is_valleyview = 1,
283};
284
285static const struct intel_device_info intel_haswell_d_info = {
286 .is_haswell = 1, .gen = 7,
287 .need_gfx_hws = 1, .has_hotplug = 1,
288 .has_bsd_ring = 1,
289 .has_blt_ring = 1,
290 .has_llc = 1,
291 .has_pch_split = 1,
292};
293
294static const struct intel_device_info intel_haswell_m_info = {
295 .is_haswell = 1, .gen = 7, .is_mobile = 1,
296 .need_gfx_hws = 1, .has_hotplug = 1,
297 .has_bsd_ring = 1,
298 .has_blt_ring = 1,
299 .has_llc = 1,
300 .has_pch_split = 1,
253}; 301};
254 302
255static const struct pci_device_id pciidlist[] = { /* aka */ 303static const struct pci_device_id pciidlist[] = { /* aka */
@@ -297,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
297 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 345 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
298 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 346 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
299 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 347 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
348 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
349 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
350 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
351 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
352 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
353 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
354 INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
300 {0, 0, 0} 355 {0, 0, 0}
301}; 356};
302 357
@@ -308,6 +363,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
308#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 363#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
309#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 364#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
310#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 365#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
366#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
311 367
312void intel_detect_pch(struct drm_device *dev) 368void intel_detect_pch(struct drm_device *dev)
313{ 369{
@@ -328,20 +384,45 @@ void intel_detect_pch(struct drm_device *dev)
328 384
329 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 385 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
330 dev_priv->pch_type = PCH_IBX; 386 dev_priv->pch_type = PCH_IBX;
387 dev_priv->num_pch_pll = 2;
331 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 388 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
332 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 389 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
333 dev_priv->pch_type = PCH_CPT; 390 dev_priv->pch_type = PCH_CPT;
391 dev_priv->num_pch_pll = 2;
334 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 392 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
335 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 393 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
336 /* PantherPoint is CPT compatible */ 394 /* PantherPoint is CPT compatible */
337 dev_priv->pch_type = PCH_CPT; 395 dev_priv->pch_type = PCH_CPT;
396 dev_priv->num_pch_pll = 2;
338 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 397 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
398 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
399 dev_priv->pch_type = PCH_LPT;
400 dev_priv->num_pch_pll = 0;
401 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
339 } 402 }
403 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
340 } 404 }
341 pci_dev_put(pch); 405 pci_dev_put(pch);
342 } 406 }
343} 407}
344 408
409bool i915_semaphore_is_enabled(struct drm_device *dev)
410{
411 if (INTEL_INFO(dev)->gen < 6)
412 return 0;
413
414 if (i915_semaphores >= 0)
415 return i915_semaphores;
416
417#ifdef CONFIG_INTEL_IOMMU
418 /* Enable semaphores on SNB when IO remapping is off */
419 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
420 return false;
421#endif
422
423 return 1;
424}
425
345void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 426void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
346{ 427{
347 int count; 428 int count;
@@ -366,7 +447,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
366 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) 447 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
367 udelay(10); 448 udelay(10);
368 449
369 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); 450 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
370 POSTING_READ(FORCEWAKE_MT); 451 POSTING_READ(FORCEWAKE_MT);
371 452
372 count = 0; 453 count = 0;
@@ -408,7 +489,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
408 489
409void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 490void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
410{ 491{
411 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); 492 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
412 /* The below doubles as a POSTING_READ */ 493 /* The below doubles as a POSTING_READ */
413 gen6_gt_check_fifodbg(dev_priv); 494 gen6_gt_check_fifodbg(dev_priv);
414} 495}
@@ -446,6 +527,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
446 return ret; 527 return ret;
447} 528}
448 529
530void vlv_force_wake_get(struct drm_i915_private *dev_priv)
531{
532 int count;
533
534 count = 0;
535
536 /* Already awake? */
537 if ((I915_READ(0x130094) & 0xa1) == 0xa1)
538 return;
539
540 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
541 POSTING_READ(FORCEWAKE_VLV);
542
543 count = 0;
544 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
545 udelay(10);
546}
547
548void vlv_force_wake_put(struct drm_i915_private *dev_priv)
549{
550 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
551 /* FIXME: confirm VLV behavior with Punit folks */
552 POSTING_READ(FORCEWAKE_VLV);
553}
554
449static int i915_drm_freeze(struct drm_device *dev) 555static int i915_drm_freeze(struct drm_device *dev)
450{ 556{
451 struct drm_i915_private *dev_priv = dev->dev_private; 557 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -525,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
525 631
526 /* KMS EnterVT equivalent */ 632 /* KMS EnterVT equivalent */
527 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 633 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
634 if (HAS_PCH_SPLIT(dev))
635 ironlake_init_pch_refclk(dev);
636
528 mutex_lock(&dev->struct_mutex); 637 mutex_lock(&dev->struct_mutex);
529 dev_priv->mm.suspended = 0; 638 dev_priv->mm.suspended = 0;
530 639
531 error = i915_gem_init_hw(dev); 640 error = i915_gem_init_hw(dev);
532 mutex_unlock(&dev->struct_mutex); 641 mutex_unlock(&dev->struct_mutex);
533 642
534 if (HAS_PCH_SPLIT(dev)) 643 intel_modeset_init_hw(dev);
535 ironlake_init_pch_refclk(dev);
536
537 drm_mode_config_reset(dev); 644 drm_mode_config_reset(dev);
538 drm_irq_install(dev); 645 drm_irq_install(dev);
539 646
@@ -541,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
541 mutex_lock(&dev->mode_config.mutex); 648 mutex_lock(&dev->mode_config.mutex);
542 drm_helper_resume_force_mode(dev); 649 drm_helper_resume_force_mode(dev);
543 mutex_unlock(&dev->mode_config.mutex); 650 mutex_unlock(&dev->mode_config.mutex);
544
545 if (IS_IRONLAKE_M(dev))
546 ironlake_enable_rc6(dev);
547 } 651 }
548 652
549 intel_opregion_init(dev); 653 intel_opregion_init(dev);
@@ -576,7 +680,7 @@ int i915_resume(struct drm_device *dev)
576 return 0; 680 return 0;
577} 681}
578 682
579static int i8xx_do_reset(struct drm_device *dev, u8 flags) 683static int i8xx_do_reset(struct drm_device *dev)
580{ 684{
581 struct drm_i915_private *dev_priv = dev->dev_private; 685 struct drm_i915_private *dev_priv = dev->dev_private;
582 686
@@ -610,11 +714,12 @@ static int i965_reset_complete(struct drm_device *dev)
610{ 714{
611 u8 gdrst; 715 u8 gdrst;
612 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 716 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
613 return gdrst & 0x1; 717 return (gdrst & GRDOM_RESET_ENABLE) == 0;
614} 718}
615 719
616static int i965_do_reset(struct drm_device *dev, u8 flags) 720static int i965_do_reset(struct drm_device *dev)
617{ 721{
722 int ret;
618 u8 gdrst; 723 u8 gdrst;
619 724
620 /* 725 /*
@@ -623,20 +728,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
623 * triggers the reset; when done, the hardware will clear it. 728 * triggers the reset; when done, the hardware will clear it.
624 */ 729 */
625 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 730 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
626 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); 731 pci_write_config_byte(dev->pdev, I965_GDRST,
732 gdrst | GRDOM_RENDER |
733 GRDOM_RESET_ENABLE);
734 ret = wait_for(i965_reset_complete(dev), 500);
735 if (ret)
736 return ret;
737
738 /* We can't reset render&media without also resetting display ... */
739 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
740 pci_write_config_byte(dev->pdev, I965_GDRST,
741 gdrst | GRDOM_MEDIA |
742 GRDOM_RESET_ENABLE);
627 743
628 return wait_for(i965_reset_complete(dev), 500); 744 return wait_for(i965_reset_complete(dev), 500);
629} 745}
630 746
631static int ironlake_do_reset(struct drm_device *dev, u8 flags) 747static int ironlake_do_reset(struct drm_device *dev)
632{ 748{
633 struct drm_i915_private *dev_priv = dev->dev_private; 749 struct drm_i915_private *dev_priv = dev->dev_private;
634 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 750 u32 gdrst;
635 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); 751 int ret;
752
753 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
754 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
755 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
756 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
757 if (ret)
758 return ret;
759
760 /* We can't reset render&media without also resetting display ... */
761 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
762 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
763 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
636 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 764 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
637} 765}
638 766
639static int gen6_do_reset(struct drm_device *dev, u8 flags) 767static int gen6_do_reset(struct drm_device *dev)
640{ 768{
641 struct drm_i915_private *dev_priv = dev->dev_private; 769 struct drm_i915_private *dev_priv = dev->dev_private;
642 int ret; 770 int ret;
@@ -671,10 +799,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
671 return ret; 799 return ret;
672} 800}
673 801
802static int intel_gpu_reset(struct drm_device *dev)
803{
804 struct drm_i915_private *dev_priv = dev->dev_private;
805 int ret = -ENODEV;
806
807 switch (INTEL_INFO(dev)->gen) {
808 case 7:
809 case 6:
810 ret = gen6_do_reset(dev);
811 break;
812 case 5:
813 ret = ironlake_do_reset(dev);
814 break;
815 case 4:
816 ret = i965_do_reset(dev);
817 break;
818 case 2:
819 ret = i8xx_do_reset(dev);
820 break;
821 }
822
823 /* Also reset the gpu hangman. */
824 if (dev_priv->stop_rings) {
825 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
826 dev_priv->stop_rings = 0;
827 if (ret == -ENODEV) {
828 DRM_ERROR("Reset not implemented, but ignoring "
829 "error for simulated gpu hangs\n");
830 ret = 0;
831 }
832 }
833
834 return ret;
835}
836
674/** 837/**
675 * i915_reset - reset chip after a hang 838 * i915_reset - reset chip after a hang
676 * @dev: drm device to reset 839 * @dev: drm device to reset
677 * @flags: reset domains
678 * 840 *
679 * Reset the chip. Useful if a hang is detected. Returns zero on successful 841 * Reset the chip. Useful if a hang is detected. Returns zero on successful
680 * reset or otherwise an error code. 842 * reset or otherwise an error code.
@@ -687,14 +849,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
687 * - re-init interrupt state 849 * - re-init interrupt state
688 * - re-init display 850 * - re-init display
689 */ 851 */
690int i915_reset(struct drm_device *dev, u8 flags) 852int i915_reset(struct drm_device *dev)
691{ 853{
692 drm_i915_private_t *dev_priv = dev->dev_private; 854 drm_i915_private_t *dev_priv = dev->dev_private;
693 /*
694 * We really should only reset the display subsystem if we actually
695 * need to
696 */
697 bool need_display = true;
698 int ret; 855 int ret;
699 856
700 if (!i915_try_reset) 857 if (!i915_try_reset)
@@ -703,26 +860,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
703 if (!mutex_trylock(&dev->struct_mutex)) 860 if (!mutex_trylock(&dev->struct_mutex))
704 return -EBUSY; 861 return -EBUSY;
705 862
863 dev_priv->stop_rings = 0;
864
706 i915_gem_reset(dev); 865 i915_gem_reset(dev);
707 866
708 ret = -ENODEV; 867 ret = -ENODEV;
709 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 868 if (get_seconds() - dev_priv->last_gpu_reset < 5)
710 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 869 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
711 } else switch (INTEL_INFO(dev)->gen) { 870 else
712 case 7: 871 ret = intel_gpu_reset(dev);
713 case 6: 872
714 ret = gen6_do_reset(dev, flags);
715 break;
716 case 5:
717 ret = ironlake_do_reset(dev, flags);
718 break;
719 case 4:
720 ret = i965_do_reset(dev, flags);
721 break;
722 case 2:
723 ret = i8xx_do_reset(dev, flags);
724 break;
725 }
726 dev_priv->last_gpu_reset = get_seconds(); 873 dev_priv->last_gpu_reset = get_seconds();
727 if (ret) { 874 if (ret) {
728 DRM_ERROR("Failed to reset chip.\n"); 875 DRM_ERROR("Failed to reset chip.\n");
@@ -746,36 +893,27 @@ int i915_reset(struct drm_device *dev, u8 flags)
746 */ 893 */
747 if (drm_core_check_feature(dev, DRIVER_MODESET) || 894 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
748 !dev_priv->mm.suspended) { 895 !dev_priv->mm.suspended) {
896 struct intel_ring_buffer *ring;
897 int i;
898
749 dev_priv->mm.suspended = 0; 899 dev_priv->mm.suspended = 0;
750 900
751 i915_gem_init_swizzling(dev); 901 i915_gem_init_swizzling(dev);
752 902
753 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); 903 for_each_ring(ring, dev_priv, i)
754 if (HAS_BSD(dev)) 904 ring->init(ring);
755 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
756 if (HAS_BLT(dev))
757 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
758 905
759 i915_gem_init_ppgtt(dev); 906 i915_gem_init_ppgtt(dev);
760 907
761 mutex_unlock(&dev->struct_mutex); 908 mutex_unlock(&dev->struct_mutex);
762 drm_irq_uninstall(dev);
763 drm_mode_config_reset(dev);
764 drm_irq_install(dev);
765 mutex_lock(&dev->struct_mutex);
766 }
767 909
768 mutex_unlock(&dev->struct_mutex); 910 if (drm_core_check_feature(dev, DRIVER_MODESET))
911 intel_modeset_init_hw(dev);
769 912
770 /* 913 drm_irq_uninstall(dev);
771 * Perform a full modeset as on later generations, e.g. Ironlake, we may 914 drm_irq_install(dev);
772 * need to retrain the display link and cannot just restore the register 915 } else {
773 * values. 916 mutex_unlock(&dev->struct_mutex);
774 */
775 if (need_display) {
776 mutex_lock(&dev->mode_config.mutex);
777 drm_helper_resume_force_mode(dev);
778 mutex_unlock(&dev->mode_config.mutex);
779 } 917 }
780 918
781 return 0; 919 return 0;
@@ -874,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = {
874 .restore = i915_pm_resume, 1012 .restore = i915_pm_resume,
875}; 1013};
876 1014
877static struct vm_operations_struct i915_gem_vm_ops = { 1015static const struct vm_operations_struct i915_gem_vm_ops = {
878 .fault = i915_gem_fault, 1016 .fault = i915_gem_fault,
879 .open = drm_gem_vm_open, 1017 .open = drm_gem_vm_open,
880 .close = drm_gem_vm_close, 1018 .close = drm_gem_vm_close,
@@ -901,7 +1039,7 @@ static struct drm_driver driver = {
901 */ 1039 */
902 .driver_features = 1040 .driver_features =
903 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1041 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
904 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 1042 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
905 .load = i915_driver_load, 1043 .load = i915_driver_load,
906 .unload = i915_driver_unload, 1044 .unload = i915_driver_unload,
907 .open = i915_driver_open, 1045 .open = i915_driver_open,
@@ -924,6 +1062,12 @@ static struct drm_driver driver = {
924 .gem_init_object = i915_gem_init_object, 1062 .gem_init_object = i915_gem_init_object,
925 .gem_free_object = i915_gem_free_object, 1063 .gem_free_object = i915_gem_free_object,
926 .gem_vm_ops = &i915_gem_vm_ops, 1064 .gem_vm_ops = &i915_gem_vm_ops,
1065
1066 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1067 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1068 .gem_prime_export = i915_gem_prime_export,
1069 .gem_prime_import = i915_gem_prime_import,
1070
927 .dumb_create = i915_gem_dumb_create, 1071 .dumb_create = i915_gem_dumb_create,
928 .dumb_map_offset = i915_gem_mmap_gtt, 1072 .dumb_map_offset = i915_gem_mmap_gtt,
929 .dumb_destroy = i915_gem_dumb_destroy, 1073 .dumb_destroy = i915_gem_dumb_destroy,
@@ -993,6 +1137,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
993MODULE_DESCRIPTION(DRIVER_DESC); 1137MODULE_DESCRIPTION(DRIVER_DESC);
994MODULE_LICENSE("GPL and additional rights"); 1138MODULE_LICENSE("GPL and additional rights");
995 1139
1140/* We give fast paths for the really cool registers */
1141#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1142 (((dev_priv)->info->gen >= 6) && \
1143 ((reg) < 0x40000) && \
1144 ((reg) != FORCEWAKE)) && \
1145 (!IS_VALLEYVIEW((dev_priv)->dev))
1146
996#define __i915_read(x, y) \ 1147#define __i915_read(x, y) \
997u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1148u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
998 u##x val = 0; \ 1149 u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..377c21f531e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include <drm/intel-gtt.h> 39#include <drm/intel-gtt.h>
40#include <linux/backlight.h> 40#include <linux/backlight.h>
41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
41 43
42/* General customization: 44/* General customization:
43 */ 45 */
@@ -63,10 +65,30 @@ enum plane {
63}; 65};
64#define plane_name(p) ((p) + 'A') 66#define plane_name(p) ((p) + 'A')
65 67
68enum port {
69 PORT_A = 0,
70 PORT_B,
71 PORT_C,
72 PORT_D,
73 PORT_E,
74 I915_MAX_PORTS
75};
76#define port_name(p) ((p) + 'A')
77
66#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 78#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
67 79
68#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 80#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
69 81
82struct intel_pch_pll {
83 int refcount; /* count of number of CRTCs sharing this PLL */
84 int active; /* count of number of active CRTCs (i.e. DPMS on) */
85 bool on; /* is the PLL actually active? Disabled during modeset */
86 int pll_reg;
87 int fp0_reg;
88 int fp1_reg;
89};
90#define I915_NUM_PLLS 2
91
70/* Interface history: 92/* Interface history:
71 * 93 *
72 * 1.1: Original. 94 * 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
111struct drm_i915_private; 133struct drm_i915_private;
112 134
113struct intel_opregion { 135struct intel_opregion {
114 struct opregion_header *header; 136 struct opregion_header __iomem *header;
115 struct opregion_acpi *acpi; 137 struct opregion_acpi __iomem *acpi;
116 struct opregion_swsci *swsci; 138 struct opregion_swsci __iomem *swsci;
117 struct opregion_asle *asle; 139 struct opregion_asle __iomem *asle;
118 void *vbt; 140 void __iomem *vbt;
119 u32 __iomem *lid_state; 141 u32 __iomem *lid_state;
120}; 142};
121#define OPREGION_SIZE (8*1024) 143#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
135struct drm_i915_fence_reg { 157struct drm_i915_fence_reg {
136 struct list_head lru_list; 158 struct list_head lru_list;
137 struct drm_i915_gem_object *obj; 159 struct drm_i915_gem_object *obj;
138 uint32_t setup_seqno;
139 int pin_count; 160 int pin_count;
140}; 161};
141 162
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
151struct intel_display_error_state; 172struct intel_display_error_state;
152 173
153struct drm_i915_error_state { 174struct drm_i915_error_state {
175 struct kref ref;
154 u32 eir; 176 u32 eir;
155 u32 pgtbl_er; 177 u32 pgtbl_er;
178 u32 ier;
179 bool waiting[I915_NUM_RINGS];
156 u32 pipestat[I915_MAX_PIPES]; 180 u32 pipestat[I915_MAX_PIPES];
157 u32 tail[I915_NUM_RINGS]; 181 u32 tail[I915_NUM_RINGS];
158 u32 head[I915_NUM_RINGS]; 182 u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
218 void (*update_wm)(struct drm_device *dev); 242 void (*update_wm)(struct drm_device *dev);
219 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 243 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
220 uint32_t sprite_width, int pixel_size); 244 uint32_t sprite_width, int pixel_size);
245 void (*sanitize_pm)(struct drm_device *dev);
246 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
247 struct drm_display_mode *mode);
221 int (*crtc_mode_set)(struct drm_crtc *crtc, 248 int (*crtc_mode_set)(struct drm_crtc *crtc,
222 struct drm_display_mode *mode, 249 struct drm_display_mode *mode,
223 struct drm_display_mode *adjusted_mode, 250 struct drm_display_mode *adjusted_mode,
224 int x, int y, 251 int x, int y,
225 struct drm_framebuffer *old_fb); 252 struct drm_framebuffer *old_fb);
253 void (*off)(struct drm_crtc *crtc);
226 void (*write_eld)(struct drm_connector *connector, 254 void (*write_eld)(struct drm_connector *connector,
227 struct drm_crtc *crtc); 255 struct drm_crtc *crtc);
228 void (*fdi_link_train)(struct drm_crtc *crtc); 256 void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,9 @@ struct intel_device_info {
255 u8 is_broadwater:1; 283 u8 is_broadwater:1;
256 u8 is_crestline:1; 284 u8 is_crestline:1;
257 u8 is_ivybridge:1; 285 u8 is_ivybridge:1;
286 u8 is_valleyview:1;
287 u8 has_pch_split:1;
288 u8 is_haswell:1;
258 u8 has_fbc:1; 289 u8 has_fbc:1;
259 u8 has_pipe_cxsr:1; 290 u8 has_pipe_cxsr:1;
260 u8 has_hotplug:1; 291 u8 has_hotplug:1;
@@ -291,10 +322,12 @@ enum no_fbc_reason {
291enum intel_pch { 322enum intel_pch {
292 PCH_IBX, /* Ibexpeak PCH */ 323 PCH_IBX, /* Ibexpeak PCH */
293 PCH_CPT, /* Cougarpoint PCH */ 324 PCH_CPT, /* Cougarpoint PCH */
325 PCH_LPT, /* Lynxpoint PCH */
294}; 326};
295 327
296#define QUIRK_PIPEA_FORCE (1<<0) 328#define QUIRK_PIPEA_FORCE (1<<0)
297#define QUIRK_LVDS_SSC_DISABLE (1<<1) 329#define QUIRK_LVDS_SSC_DISABLE (1<<1)
330#define QUIRK_INVERT_BRIGHTNESS (1<<2)
298 331
299struct intel_fbdev; 332struct intel_fbdev;
300struct intel_fbc_work; 333struct intel_fbc_work;
@@ -302,7 +335,6 @@ struct intel_fbc_work;
302struct intel_gmbus { 335struct intel_gmbus {
303 struct i2c_adapter adapter; 336 struct i2c_adapter adapter;
304 bool force_bit; 337 bool force_bit;
305 bool has_gpio;
306 u32 reg0; 338 u32 reg0;
307 u32 gpio_reg; 339 u32 gpio_reg;
308 struct i2c_algo_bit_data bit_algo; 340 struct i2c_algo_bit_data bit_algo;
@@ -314,7 +346,6 @@ typedef struct drm_i915_private {
314 346
315 const struct intel_device_info *info; 347 const struct intel_device_info *info;
316 348
317 int has_gem;
318 int relative_constants_mode; 349 int relative_constants_mode;
319 350
320 void __iomem *regs; 351 void __iomem *regs;
@@ -326,19 +357,23 @@ typedef struct drm_i915_private {
326 /** gt_lock is also taken in irq contexts. */ 357 /** gt_lock is also taken in irq contexts. */
327 struct spinlock gt_lock; 358 struct spinlock gt_lock;
328 359
329 struct intel_gmbus *gmbus; 360 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
330 361
331 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 362 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
332 * controller on different i2c buses. */ 363 * controller on different i2c buses. */
333 struct mutex gmbus_mutex; 364 struct mutex gmbus_mutex;
334 365
366 /**
367 * Base address of the gmbus and gpio block.
368 */
369 uint32_t gpio_mmio_base;
370
335 struct pci_dev *bridge_dev; 371 struct pci_dev *bridge_dev;
336 struct intel_ring_buffer ring[I915_NUM_RINGS]; 372 struct intel_ring_buffer ring[I915_NUM_RINGS];
337 uint32_t next_seqno; 373 uint32_t next_seqno;
338 374
339 drm_dma_handle_t *status_page_dmah; 375 drm_dma_handle_t *status_page_dmah;
340 uint32_t counter; 376 uint32_t counter;
341 drm_local_map_t hws_map;
342 struct drm_i915_gem_object *pwrctx; 377 struct drm_i915_gem_object *pwrctx;
343 struct drm_i915_gem_object *renderctx; 378 struct drm_i915_gem_object *renderctx;
344 379
@@ -354,6 +389,10 @@ typedef struct drm_i915_private {
354 389
355 /* protects the irq masks */ 390 /* protects the irq masks */
356 spinlock_t irq_lock; 391 spinlock_t irq_lock;
392
393 /* DPIO indirect register protection */
394 spinlock_t dpio_lock;
395
357 /** Cached value of IMR to avoid reads in updating the bitfield */ 396 /** Cached value of IMR to avoid reads in updating the bitfield */
358 u32 pipestat[2]; 397 u32 pipestat[2];
359 u32 irq_mask; 398 u32 irq_mask;
@@ -363,22 +402,20 @@ typedef struct drm_i915_private {
363 u32 hotplug_supported_mask; 402 u32 hotplug_supported_mask;
364 struct work_struct hotplug_work; 403 struct work_struct hotplug_work;
365 404
366 int tex_lru_log_granularity;
367 int allow_batchbuffer;
368 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 405 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
369 int vblank_pipe;
370 int num_pipe; 406 int num_pipe;
407 int num_pch_pll;
371 408
372 /* For hangcheck timer */ 409 /* For hangcheck timer */
373#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 410#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
374 struct timer_list hangcheck_timer; 411 struct timer_list hangcheck_timer;
375 int hangcheck_count; 412 int hangcheck_count;
376 uint32_t last_acthd; 413 uint32_t last_acthd[I915_NUM_RINGS];
377 uint32_t last_acthd_bsd;
378 uint32_t last_acthd_blt;
379 uint32_t last_instdone; 414 uint32_t last_instdone;
380 uint32_t last_instdone1; 415 uint32_t last_instdone1;
381 416
417 unsigned int stop_rings;
418
382 unsigned long cfb_size; 419 unsigned long cfb_size;
383 unsigned int cfb_fb; 420 unsigned int cfb_fb;
384 enum plane cfb_plane; 421 enum plane cfb_plane;
@@ -405,6 +442,8 @@ typedef struct drm_i915_private {
405 unsigned int lvds_use_ssc:1; 442 unsigned int lvds_use_ssc:1;
406 unsigned int display_clock_mode:1; 443 unsigned int display_clock_mode:1;
407 int lvds_ssc_freq; 444 int lvds_ssc_freq;
445 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
446 unsigned int lvds_val; /* used for checking LVDS channel mode */
408 struct { 447 struct {
409 int rate; 448 int rate;
410 int lanes; 449 int lanes;
@@ -428,6 +467,7 @@ typedef struct drm_i915_private {
428 unsigned int fsb_freq, mem_freq, is_ddr3; 467 unsigned int fsb_freq, mem_freq, is_ddr3;
429 468
430 spinlock_t error_lock; 469 spinlock_t error_lock;
470 /* Protected by dev->error_lock. */
431 struct drm_i915_error_state *first_error; 471 struct drm_i915_error_state *first_error;
432 struct work_struct error_work; 472 struct work_struct error_work;
433 struct completion error_completion; 473 struct completion error_completion;
@@ -652,24 +692,10 @@ typedef struct drm_i915_private {
652 */ 692 */
653 struct list_head inactive_list; 693 struct list_head inactive_list;
654 694
655 /**
656 * LRU list of objects which are not in the ringbuffer but
657 * are still pinned in the GTT.
658 */
659 struct list_head pinned_list;
660
661 /** LRU list of objects with fence regs on them. */ 695 /** LRU list of objects with fence regs on them. */
662 struct list_head fence_list; 696 struct list_head fence_list;
663 697
664 /** 698 /**
665 * List of objects currently pending being freed.
666 *
667 * These objects are no longer in use, but due to a signal
668 * we were prevented from freeing them at the appointed time.
669 */
670 struct list_head deferred_free_list;
671
672 /**
673 * We leave the user IRQ off as much as possible, 699 * We leave the user IRQ off as much as possible,
674 * but this means that requests will finish and never 700 * but this means that requests will finish and never
675 * be retired once the system goes idle. Set a timer to 701 * be retired once the system goes idle. Set a timer to
@@ -717,6 +743,16 @@ typedef struct drm_i915_private {
717 size_t object_memory; 743 size_t object_memory;
718 u32 object_count; 744 u32 object_count;
719 } mm; 745 } mm;
746
747 /* Old dri1 support infrastructure, beware the dragons ya fools entering
748 * here! */
749 struct {
750 unsigned allow_batchbuffer : 1;
751 u32 __iomem *gfx_hws_cpu_addr;
752 } dri1;
753
754 /* Kernel Modesetting */
755
720 struct sdvo_device_mapping sdvo_mappings[2]; 756 struct sdvo_device_mapping sdvo_mappings[2];
721 /* indicate whether the LVDS_BORDER should be enabled or not */ 757 /* indicate whether the LVDS_BORDER should be enabled or not */
722 unsigned int lvds_border_bits; 758 unsigned int lvds_border_bits;
@@ -726,7 +762,8 @@ typedef struct drm_i915_private {
726 struct drm_crtc *plane_to_crtc_mapping[3]; 762 struct drm_crtc *plane_to_crtc_mapping[3];
727 struct drm_crtc *pipe_to_crtc_mapping[3]; 763 struct drm_crtc *pipe_to_crtc_mapping[3];
728 wait_queue_head_t pending_flip_queue; 764 wait_queue_head_t pending_flip_queue;
729 bool flip_pending_is_done; 765
766 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
730 767
731 /* Reclocking support */ 768 /* Reclocking support */
732 bool render_reclock_avail; 769 bool render_reclock_avail;
@@ -781,6 +818,11 @@ typedef struct drm_i915_private {
781 struct drm_property *force_audio_property; 818 struct drm_property *force_audio_property;
782} drm_i915_private_t; 819} drm_i915_private_t;
783 820
821/* Iterate over initialised rings */
822#define for_each_ring(ring__, dev_priv__, i__) \
823 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
824 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
825
784enum hdmi_force_audio { 826enum hdmi_force_audio {
785 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 827 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
786 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 828 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +886,14 @@ struct drm_i915_gem_object {
844 * Current tiling mode for the object. 886 * Current tiling mode for the object.
845 */ 887 */
846 unsigned int tiling_mode:2; 888 unsigned int tiling_mode:2;
847 unsigned int tiling_changed:1; 889 /**
890 * Whether the tiling parameters for the currently associated fence
891 * register have changed. Note that for the purposes of tracking
892 * tiling changes we also treat the unfenced register, the register
893 * slot that the object occupies whilst it executes a fenced
894 * command (such as BLT on gen2/3), as a "fence".
895 */
896 unsigned int fence_dirty:1;
848 897
849 /** How many users have pinned this object in GTT space. The following 898 /** How many users have pinned this object in GTT space. The following
850 * users can each hold at most one reference: pwrite/pread, pin_ioctl 899 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +930,7 @@ struct drm_i915_gem_object {
881 unsigned int cache_level:2; 930 unsigned int cache_level:2;
882 931
883 unsigned int has_aliasing_ppgtt_mapping:1; 932 unsigned int has_aliasing_ppgtt_mapping:1;
933 unsigned int has_global_gtt_mapping:1;
884 934
885 struct page **pages; 935 struct page **pages;
886 936
@@ -890,6 +940,8 @@ struct drm_i915_gem_object {
890 struct scatterlist *sg_list; 940 struct scatterlist *sg_list;
891 int num_sg; 941 int num_sg;
892 942
943 /* prime dma-buf support */
944 struct sg_table *sg_table;
893 /** 945 /**
894 * Used for performing relocations during execbuffer insertion. 946 * Used for performing relocations during execbuffer insertion.
895 */ 947 */
@@ -904,13 +956,12 @@ struct drm_i915_gem_object {
904 */ 956 */
905 uint32_t gtt_offset; 957 uint32_t gtt_offset;
906 958
907 /** Breadcrumb of last rendering to the buffer. */
908 uint32_t last_rendering_seqno;
909 struct intel_ring_buffer *ring; 959 struct intel_ring_buffer *ring;
910 960
961 /** Breadcrumb of last rendering to the buffer. */
962 uint32_t last_rendering_seqno;
911 /** Breadcrumb of last fenced GPU access to the buffer. */ 963 /** Breadcrumb of last fenced GPU access to the buffer. */
912 uint32_t last_fenced_seqno; 964 uint32_t last_fenced_seqno;
913 struct intel_ring_buffer *last_fenced_ring;
914 965
915 /** Current tiling stride for the object, if it's tiled. */ 966 /** Current tiling stride for the object, if it's tiled. */
916 uint32_t stride; 967 uint32_t stride;
@@ -918,13 +969,6 @@ struct drm_i915_gem_object {
918 /** Record of address bit 17 of each page at last unbind. */ 969 /** Record of address bit 17 of each page at last unbind. */
919 unsigned long *bit_17; 970 unsigned long *bit_17;
920 971
921
922 /**
923 * If present, while GEM_DOMAIN_CPU is in the read domain this array
924 * flags which individual pages are valid.
925 */
926 uint8_t *page_cpu_valid;
927
928 /** User space pin count and filp owning the pin */ 972 /** User space pin count and filp owning the pin */
929 uint32_t user_pin_count; 973 uint32_t user_pin_count;
930 struct drm_file *pin_filp; 974 struct drm_file *pin_filp;
@@ -1001,6 +1045,8 @@ struct drm_i915_file_private {
1001#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1045#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1002#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1046#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1003#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1047#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1048#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1049#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1004#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1050#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1005 1051
1006/* 1052/*
@@ -1044,10 +1090,11 @@ struct drm_i915_file_private {
1044#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1090#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1045#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1091#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1046 1092
1047#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 1093#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1048#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1094#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1049 1095
1050#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1096#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1097#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1051#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1098#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1052#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1099#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1053 1100
@@ -1081,6 +1128,7 @@ extern int i915_panel_ignore_lid __read_mostly;
1081extern unsigned int i915_powersave __read_mostly; 1128extern unsigned int i915_powersave __read_mostly;
1082extern int i915_semaphores __read_mostly; 1129extern int i915_semaphores __read_mostly;
1083extern unsigned int i915_lvds_downclock __read_mostly; 1130extern unsigned int i915_lvds_downclock __read_mostly;
1131extern int i915_lvds_channel_mode __read_mostly;
1084extern int i915_panel_use_ssc __read_mostly; 1132extern int i915_panel_use_ssc __read_mostly;
1085extern int i915_vbt_sdvo_panel_type __read_mostly; 1133extern int i915_vbt_sdvo_panel_type __read_mostly;
1086extern int i915_enable_rc6 __read_mostly; 1134extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1142,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
1094extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1142extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1095 1143
1096 /* i915_dma.c */ 1144 /* i915_dma.c */
1145void i915_update_dri1_breadcrumb(struct drm_device *dev);
1097extern void i915_kernel_lost_context(struct drm_device * dev); 1146extern void i915_kernel_lost_context(struct drm_device * dev);
1098extern int i915_driver_load(struct drm_device *, unsigned long flags); 1147extern int i915_driver_load(struct drm_device *, unsigned long flags);
1099extern int i915_driver_unload(struct drm_device *); 1148extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1153,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
1104extern void i915_driver_postclose(struct drm_device *dev, 1153extern void i915_driver_postclose(struct drm_device *dev,
1105 struct drm_file *file_priv); 1154 struct drm_file *file_priv);
1106extern int i915_driver_device_is_agp(struct drm_device * dev); 1155extern int i915_driver_device_is_agp(struct drm_device * dev);
1156#ifdef CONFIG_COMPAT
1107extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1157extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1108 unsigned long arg); 1158 unsigned long arg);
1159#endif
1109extern int i915_emit_box(struct drm_device *dev, 1160extern int i915_emit_box(struct drm_device *dev,
1110 struct drm_clip_rect *box, 1161 struct drm_clip_rect *box,
1111 int DR1, int DR4); 1162 int DR1, int DR4);
1112extern int i915_reset(struct drm_device *dev, u8 flags); 1163extern int i915_reset(struct drm_device *dev);
1113extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1164extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1114extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1165extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1115extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1166extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1170,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1119/* i915_irq.c */ 1170/* i915_irq.c */
1120void i915_hangcheck_elapsed(unsigned long data); 1171void i915_hangcheck_elapsed(unsigned long data);
1121void i915_handle_error(struct drm_device *dev, bool wedged); 1172void i915_handle_error(struct drm_device *dev, bool wedged);
1122extern int i915_irq_emit(struct drm_device *dev, void *data,
1123 struct drm_file *file_priv);
1124extern int i915_irq_wait(struct drm_device *dev, void *data,
1125 struct drm_file *file_priv);
1126 1173
1127extern void intel_irq_init(struct drm_device *dev); 1174extern void intel_irq_init(struct drm_device *dev);
1128 1175
1129extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1176void i915_error_state_free(struct kref *error_ref);
1130 struct drm_file *file_priv);
1131extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1132 struct drm_file *file_priv);
1133extern int i915_vblank_swap(struct drm_device *dev, void *data,
1134 struct drm_file *file_priv);
1135 1177
1136void 1178void
1137i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1179i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1247,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1205void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1247void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1206void i915_gem_lastclose(struct drm_device *dev); 1248void i915_gem_lastclose(struct drm_device *dev);
1207 1249
1250int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1251 gfp_t gfpmask);
1208int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1252int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1209int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1253int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1254int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1255 struct intel_ring_buffer *to);
1210void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1256void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1211 struct intel_ring_buffer *ring, 1257 struct intel_ring_buffer *ring,
1212 u32 seqno); 1258 u32 seqno);
@@ -1229,17 +1275,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1229 1275
1230u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1276u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1231 1277
1232int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1278int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1233 struct intel_ring_buffer *pipelined);
1234int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1279int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1235 1280
1236static inline void 1281static inline bool
1237i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1282i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1238{ 1283{
1239 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1284 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1240 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1285 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1241 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1286 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1242 } 1287 return true;
1288 } else
1289 return false;
1243} 1290}
1244 1291
1245static inline void 1292static inline void
@@ -1260,27 +1307,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1260 uint32_t read_domains, 1307 uint32_t read_domains,
1261 uint32_t write_domain); 1308 uint32_t write_domain);
1262int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1309int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1310int __must_check i915_gem_init(struct drm_device *dev);
1263int __must_check i915_gem_init_hw(struct drm_device *dev); 1311int __must_check i915_gem_init_hw(struct drm_device *dev);
1264void i915_gem_init_swizzling(struct drm_device *dev); 1312void i915_gem_init_swizzling(struct drm_device *dev);
1265void i915_gem_init_ppgtt(struct drm_device *dev); 1313void i915_gem_init_ppgtt(struct drm_device *dev);
1266void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1314void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1267void i915_gem_do_init(struct drm_device *dev, 1315int __must_check i915_gpu_idle(struct drm_device *dev);
1268 unsigned long start,
1269 unsigned long mappable_end,
1270 unsigned long end);
1271int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
1272int __must_check i915_gem_idle(struct drm_device *dev); 1316int __must_check i915_gem_idle(struct drm_device *dev);
1273int __must_check i915_add_request(struct intel_ring_buffer *ring, 1317int __must_check i915_add_request(struct intel_ring_buffer *ring,
1274 struct drm_file *file, 1318 struct drm_file *file,
1275 struct drm_i915_gem_request *request); 1319 struct drm_i915_gem_request *request);
1276int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1320int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1277 uint32_t seqno, 1321 uint32_t seqno);
1278 bool do_retire);
1279int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1322int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1280int __must_check 1323int __must_check
1281i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1324i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1282 bool write); 1325 bool write);
1283int __must_check 1326int __must_check
1327i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1328int __must_check
1284i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1329i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1285 u32 alignment, 1330 u32 alignment,
1286 struct intel_ring_buffer *pipelined); 1331 struct intel_ring_buffer *pipelined);
@@ -1301,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1301int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1346int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1302 enum i915_cache_level cache_level); 1347 enum i915_cache_level cache_level);
1303 1348
1349struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1350 struct dma_buf *dma_buf);
1351
1352struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1353 struct drm_gem_object *gem_obj, int flags);
1354
1355
1304/* i915_gem_gtt.c */ 1356/* i915_gem_gtt.c */
1305int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1357int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1306void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1358void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1363,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1311 struct drm_i915_gem_object *obj); 1363 struct drm_i915_gem_object *obj);
1312 1364
1313void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1365void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1314int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1366int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1315void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 1367void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1316 enum i915_cache_level cache_level); 1368 enum i915_cache_level cache_level);
1317void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1369void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1370void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1371void i915_gem_init_global_gtt(struct drm_device *dev,
1372 unsigned long start,
1373 unsigned long mappable_end,
1374 unsigned long end);
1318 1375
1319/* i915_gem_evict.c */ 1376/* i915_gem_evict.c */
1320int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1377int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1321 unsigned alignment, bool mappable); 1378 unsigned alignment, bool mappable);
1322int __must_check i915_gem_evict_everything(struct drm_device *dev, 1379int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1323 bool purgeable_only); 1380
1324int __must_check i915_gem_evict_inactive(struct drm_device *dev, 1381/* i915_gem_stolen.c */
1325 bool purgeable_only); 1382int i915_gem_init_stolen(struct drm_device *dev);
1383void i915_gem_cleanup_stolen(struct drm_device *dev);
1326 1384
1327/* i915_gem_tiling.c */ 1385/* i915_gem_tiling.c */
1328void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1386void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1412,20 @@ extern int i915_restore_state(struct drm_device *dev);
1354extern int i915_save_state(struct drm_device *dev); 1412extern int i915_save_state(struct drm_device *dev);
1355extern int i915_restore_state(struct drm_device *dev); 1413extern int i915_restore_state(struct drm_device *dev);
1356 1414
1415/* i915_sysfs.c */
1416void i915_setup_sysfs(struct drm_device *dev_priv);
1417void i915_teardown_sysfs(struct drm_device *dev_priv);
1418
1357/* intel_i2c.c */ 1419/* intel_i2c.c */
1358extern int intel_setup_gmbus(struct drm_device *dev); 1420extern int intel_setup_gmbus(struct drm_device *dev);
1359extern void intel_teardown_gmbus(struct drm_device *dev); 1421extern void intel_teardown_gmbus(struct drm_device *dev);
1422extern inline bool intel_gmbus_is_port_valid(unsigned port)
1423{
1424 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1425}
1426
1427extern struct i2c_adapter *intel_gmbus_get_adapter(
1428 struct drm_i915_private *dev_priv, unsigned port);
1360extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1429extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1361extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1430extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1362extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1431extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1460,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1391#endif /* CONFIG_ACPI */ 1460#endif /* CONFIG_ACPI */
1392 1461
1393/* modesetting */ 1462/* modesetting */
1463extern void intel_modeset_init_hw(struct drm_device *dev);
1394extern void intel_modeset_init(struct drm_device *dev); 1464extern void intel_modeset_init(struct drm_device *dev);
1395extern void intel_modeset_gem_init(struct drm_device *dev); 1465extern void intel_modeset_gem_init(struct drm_device *dev);
1396extern void intel_modeset_cleanup(struct drm_device *dev); 1466extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1473,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
1403extern void gen6_set_rps(struct drm_device *dev, u8 val); 1473extern void gen6_set_rps(struct drm_device *dev, u8 val);
1404extern void intel_detect_pch(struct drm_device *dev); 1474extern void intel_detect_pch(struct drm_device *dev);
1405extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1475extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1476extern int intel_enable_rc6(const struct drm_device *dev);
1406 1477
1478extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1407extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1479extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1408extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); 1480extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1409extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1481extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1410extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); 1482extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1411 1483
1484extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1485extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1486
1412/* overlay */ 1487/* overlay */
1413#ifdef CONFIG_DEBUG_FS 1488#ifdef CONFIG_DEBUG_FS
1414extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1489extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1495,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
1420 struct intel_display_error_state *error); 1495 struct intel_display_error_state *error);
1421#endif 1496#endif
1422 1497
1423#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1424
1425#define BEGIN_LP_RING(n) \
1426 intel_ring_begin(LP_RING(dev_priv), (n))
1427
1428#define OUT_RING(x) \
1429 intel_ring_emit(LP_RING(dev_priv), x)
1430
1431#define ADVANCE_LP_RING() \
1432 intel_ring_advance(LP_RING(dev_priv))
1433
1434/**
1435 * Lock test for when it's just for synchronization of ring access.
1436 *
1437 * In that case, we don't need to do it when GEM is initialized as nobody else
1438 * has access to the ring.
1439 */
1440#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1441 if (LP_RING(dev->dev_private)->obj == NULL) \
1442 LOCK_TEST_WITH_RETURN(dev, file); \
1443} while (0)
1444
1445/* On SNB platform, before reading ring registers forcewake bit 1498/* On SNB platform, before reading ring registers forcewake bit
1446 * must be set to prevent GT core from power down and stale values being 1499 * must be set to prevent GT core from power down and stale values being
1447 * returned. 1500 * returned.
@@ -1450,12 +1503,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1450void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1503void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1451int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1504int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1452 1505
1453/* We give fast paths for the really cool registers */
1454#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1455 (((dev_priv)->info->gen >= 6) && \
1456 ((reg) < 0x40000) && \
1457 ((reg) != FORCEWAKE))
1458
1459#define __i915_read(x, y) \ 1506#define __i915_read(x, y) \
1460 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1507 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1461 1508
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0d1e4b7b4b99..c1e5c66553df 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,31 +35,41 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/swap.h> 36#include <linux/swap.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/dma-buf.h>
38 39
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 40static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 42static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 bool write);
44static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 43static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 unsigned alignment, 44 unsigned alignment,
50 bool map_and_fenceable); 45 bool map_and_fenceable);
51static void i915_gem_clear_fence_reg(struct drm_device *dev,
52 struct drm_i915_fence_reg *reg);
53static int i915_gem_phys_pwrite(struct drm_device *dev, 46static int i915_gem_phys_pwrite(struct drm_device *dev,
54 struct drm_i915_gem_object *obj, 47 struct drm_i915_gem_object *obj,
55 struct drm_i915_gem_pwrite *args, 48 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file); 49 struct drm_file *file);
57static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 50
51static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
58 56
59static int i915_gem_inactive_shrink(struct shrinker *shrinker, 57static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60 struct shrink_control *sc); 58 struct shrink_control *sc);
61static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 59static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
62 60
61static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
62{
63 if (obj->tiling_mode)
64 i915_gem_release_mmap(obj);
65
66 /* As we do not have an associated fence register, we will force
67 * a tiling change if we ever need to acquire one.
68 */
69 obj->fence_dirty = false;
70 obj->fence_reg = I915_FENCE_REG_NONE;
71}
72
63/* some bookkeeping */ 73/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 74static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size) 75 size_t size)
@@ -122,26 +132,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
122static inline bool 132static inline bool
123i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 133i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
124{ 134{
125 return obj->gtt_space && !obj->active && obj->pin_count == 0; 135 return !obj->active;
126}
127
128void i915_gem_do_init(struct drm_device *dev,
129 unsigned long start,
130 unsigned long mappable_end,
131 unsigned long end)
132{
133 drm_i915_private_t *dev_priv = dev->dev_private;
134
135 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
136
137 dev_priv->mm.gtt_start = start;
138 dev_priv->mm.gtt_mappable_end = mappable_end;
139 dev_priv->mm.gtt_end = end;
140 dev_priv->mm.gtt_total = end - start;
141 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
142
143 /* Take over this portion of the GTT */
144 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
145} 136}
146 137
147int 138int
@@ -150,12 +141,20 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
150{ 141{
151 struct drm_i915_gem_init *args = data; 142 struct drm_i915_gem_init *args = data;
152 143
144 if (drm_core_check_feature(dev, DRIVER_MODESET))
145 return -ENODEV;
146
153 if (args->gtt_start >= args->gtt_end || 147 if (args->gtt_start >= args->gtt_end ||
154 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) 148 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
155 return -EINVAL; 149 return -EINVAL;
156 150
151 /* GEM with user mode setting was never supported on ilk and later. */
152 if (INTEL_INFO(dev)->gen >= 5)
153 return -ENODEV;
154
157 mutex_lock(&dev->struct_mutex); 155 mutex_lock(&dev->struct_mutex);
158 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); 156 i915_gem_init_global_gtt(dev, args->gtt_start,
157 args->gtt_end, args->gtt_end);
159 mutex_unlock(&dev->struct_mutex); 158 mutex_unlock(&dev->struct_mutex);
160 159
161 return 0; 160 return 0;
@@ -170,13 +169,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
170 struct drm_i915_gem_object *obj; 169 struct drm_i915_gem_object *obj;
171 size_t pinned; 170 size_t pinned;
172 171
173 if (!(dev->driver->driver_features & DRIVER_GEM))
174 return -ENODEV;
175
176 pinned = 0; 172 pinned = 0;
177 mutex_lock(&dev->struct_mutex); 173 mutex_lock(&dev->struct_mutex);
178 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 174 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
179 pinned += obj->gtt_space->size; 175 if (obj->pin_count)
176 pinned += obj->gtt_space->size;
180 mutex_unlock(&dev->struct_mutex); 177 mutex_unlock(&dev->struct_mutex);
181 178
182 args->aper_size = dev_priv->mm.gtt_total; 179 args->aper_size = dev_priv->mm.gtt_total;
@@ -247,6 +244,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
247 struct drm_file *file) 244 struct drm_file *file)
248{ 245{
249 struct drm_i915_gem_create *args = data; 246 struct drm_i915_gem_create *args = data;
247
250 return i915_gem_create(file, dev, 248 return i915_gem_create(file, dev,
251 args->size, &args->handle); 249 args->size, &args->handle);
252} 250}
@@ -259,66 +257,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
259 obj->tiling_mode != I915_TILING_NONE; 257 obj->tiling_mode != I915_TILING_NONE;
260} 258}
261 259
262/**
263 * This is the fast shmem pread path, which attempts to copy_from_user directly
264 * from the backing pages of the object to the user's address space. On a
265 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
266 */
267static int
268i915_gem_shmem_pread_fast(struct drm_device *dev,
269 struct drm_i915_gem_object *obj,
270 struct drm_i915_gem_pread *args,
271 struct drm_file *file)
272{
273 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
274 ssize_t remain;
275 loff_t offset;
276 char __user *user_data;
277 int page_offset, page_length;
278
279 user_data = (char __user *) (uintptr_t) args->data_ptr;
280 remain = args->size;
281
282 offset = args->offset;
283
284 while (remain > 0) {
285 struct page *page;
286 char *vaddr;
287 int ret;
288
289 /* Operation in this page
290 *
291 * page_offset = offset within page
292 * page_length = bytes to copy for this page
293 */
294 page_offset = offset_in_page(offset);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
300 if (IS_ERR(page))
301 return PTR_ERR(page);
302
303 vaddr = kmap_atomic(page);
304 ret = __copy_to_user_inatomic(user_data,
305 vaddr + page_offset,
306 page_length);
307 kunmap_atomic(vaddr);
308
309 mark_page_accessed(page);
310 page_cache_release(page);
311 if (ret)
312 return -EFAULT;
313
314 remain -= page_length;
315 user_data += page_length;
316 offset += page_length;
317 }
318
319 return 0;
320}
321
322static inline int 260static inline int
323__copy_to_user_swizzled(char __user *cpu_vaddr, 261__copy_to_user_swizzled(char __user *cpu_vaddr,
324 const char *gpu_vaddr, int gpu_offset, 262 const char *gpu_vaddr, int gpu_offset,
@@ -346,8 +284,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
346} 284}
347 285
348static inline int 286static inline int
349__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, 287__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
350 const char *cpu_vaddr, 288 const char __user *cpu_vaddr,
351 int length) 289 int length)
352{ 290{
353 int ret, cpu_offset = 0; 291 int ret, cpu_offset = 0;
@@ -371,37 +309,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
371 return 0; 309 return 0;
372} 310}
373 311
374/** 312/* Per-page copy function for the shmem pread fastpath.
375 * This is the fallback shmem pread path, which allocates temporary storage 313 * Flushes invalid cachelines before reading the target if
376 * in kernel space to copy_to_user into outside of the struct_mutex, so we 314 * needs_clflush is set. */
377 * can copy out of the object's backing pages while holding the struct mutex
378 * and not take page faults.
379 */
380static int 315static int
381i915_gem_shmem_pread_slow(struct drm_device *dev, 316shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
382 struct drm_i915_gem_object *obj, 317 char __user *user_data,
383 struct drm_i915_gem_pread *args, 318 bool page_do_bit17_swizzling, bool needs_clflush)
384 struct drm_file *file) 319{
320 char *vaddr;
321 int ret;
322
323 if (unlikely(page_do_bit17_swizzling))
324 return -EINVAL;
325
326 vaddr = kmap_atomic(page);
327 if (needs_clflush)
328 drm_clflush_virt_range(vaddr + shmem_page_offset,
329 page_length);
330 ret = __copy_to_user_inatomic(user_data,
331 vaddr + shmem_page_offset,
332 page_length);
333 kunmap_atomic(vaddr);
334
335 return ret;
336}
337
338static void
339shmem_clflush_swizzled_range(char *addr, unsigned long length,
340 bool swizzled)
341{
342 if (unlikely(swizzled)) {
343 unsigned long start = (unsigned long) addr;
344 unsigned long end = (unsigned long) addr + length;
345
346 /* For swizzling simply ensure that we always flush both
347 * channels. Lame, but simple and it works. Swizzled
348 * pwrite/pread is far from a hotpath - current userspace
349 * doesn't use it at all. */
350 start = round_down(start, 128);
351 end = round_up(end, 128);
352
353 drm_clflush_virt_range((void *)start, end - start);
354 } else {
355 drm_clflush_virt_range(addr, length);
356 }
357
358}
359
360/* Only difference to the fast-path function is that this can handle bit17
361 * and uses non-atomic copy and kmap functions. */
362static int
363shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
364 char __user *user_data,
365 bool page_do_bit17_swizzling, bool needs_clflush)
366{
367 char *vaddr;
368 int ret;
369
370 vaddr = kmap(page);
371 if (needs_clflush)
372 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
373 page_length,
374 page_do_bit17_swizzling);
375
376 if (page_do_bit17_swizzling)
377 ret = __copy_to_user_swizzled(user_data,
378 vaddr, shmem_page_offset,
379 page_length);
380 else
381 ret = __copy_to_user(user_data,
382 vaddr + shmem_page_offset,
383 page_length);
384 kunmap(page);
385
386 return ret;
387}
388
389static int
390i915_gem_shmem_pread(struct drm_device *dev,
391 struct drm_i915_gem_object *obj,
392 struct drm_i915_gem_pread *args,
393 struct drm_file *file)
385{ 394{
386 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 395 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
387 char __user *user_data; 396 char __user *user_data;
388 ssize_t remain; 397 ssize_t remain;
389 loff_t offset; 398 loff_t offset;
390 int shmem_page_offset, page_length, ret; 399 int shmem_page_offset, page_length, ret = 0;
391 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 400 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
401 int hit_slowpath = 0;
402 int prefaulted = 0;
403 int needs_clflush = 0;
404 int release_page;
392 405
393 user_data = (char __user *) (uintptr_t) args->data_ptr; 406 user_data = (char __user *) (uintptr_t) args->data_ptr;
394 remain = args->size; 407 remain = args->size;
395 408
396 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 409 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
397 410
398 offset = args->offset; 411 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
412 /* If we're not in the cpu read domain, set ourself into the gtt
413 * read domain and manually flush cachelines (if required). This
414 * optimizes for the case when the gpu will dirty the data
415 * anyway again before the next pread happens. */
416 if (obj->cache_level == I915_CACHE_NONE)
417 needs_clflush = 1;
418 ret = i915_gem_object_set_to_gtt_domain(obj, false);
419 if (ret)
420 return ret;
421 }
399 422
400 mutex_unlock(&dev->struct_mutex); 423 offset = args->offset;
401 424
402 while (remain > 0) { 425 while (remain > 0) {
403 struct page *page; 426 struct page *page;
404 char *vaddr;
405 427
406 /* Operation in this page 428 /* Operation in this page
407 * 429 *
@@ -413,28 +435,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
413 if ((shmem_page_offset + page_length) > PAGE_SIZE) 435 if ((shmem_page_offset + page_length) > PAGE_SIZE)
414 page_length = PAGE_SIZE - shmem_page_offset; 436 page_length = PAGE_SIZE - shmem_page_offset;
415 437
416 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 438 if (obj->pages) {
417 if (IS_ERR(page)) { 439 page = obj->pages[offset >> PAGE_SHIFT];
418 ret = PTR_ERR(page); 440 release_page = 0;
419 goto out; 441 } else {
442 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
443 if (IS_ERR(page)) {
444 ret = PTR_ERR(page);
445 goto out;
446 }
447 release_page = 1;
420 } 448 }
421 449
422 page_do_bit17_swizzling = obj_do_bit17_swizzling && 450 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
423 (page_to_phys(page) & (1 << 17)) != 0; 451 (page_to_phys(page) & (1 << 17)) != 0;
424 452
425 vaddr = kmap(page); 453 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
426 if (page_do_bit17_swizzling) 454 user_data, page_do_bit17_swizzling,
427 ret = __copy_to_user_swizzled(user_data, 455 needs_clflush);
428 vaddr, shmem_page_offset, 456 if (ret == 0)
429 page_length); 457 goto next_page;
430 else
431 ret = __copy_to_user(user_data,
432 vaddr + shmem_page_offset,
433 page_length);
434 kunmap(page);
435 458
436 mark_page_accessed(page); 459 hit_slowpath = 1;
460 page_cache_get(page);
461 mutex_unlock(&dev->struct_mutex);
462
463 if (!prefaulted) {
464 ret = fault_in_multipages_writeable(user_data, remain);
465 /* Userspace is tricking us, but we've already clobbered
466 * its pages with the prefault and promised to write the
467 * data up to the first fault. Hence ignore any errors
468 * and just continue. */
469 (void)ret;
470 prefaulted = 1;
471 }
472
473 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
474 user_data, page_do_bit17_swizzling,
475 needs_clflush);
476
477 mutex_lock(&dev->struct_mutex);
437 page_cache_release(page); 478 page_cache_release(page);
479next_page:
480 mark_page_accessed(page);
481 if (release_page)
482 page_cache_release(page);
438 483
439 if (ret) { 484 if (ret) {
440 ret = -EFAULT; 485 ret = -EFAULT;
@@ -447,10 +492,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
447 } 492 }
448 493
449out: 494out:
450 mutex_lock(&dev->struct_mutex); 495 if (hit_slowpath) {
451 /* Fixup: Kill any reinstated backing storage pages */ 496 /* Fixup: Kill any reinstated backing storage pages */
452 if (obj->madv == __I915_MADV_PURGED) 497 if (obj->madv == __I915_MADV_PURGED)
453 i915_gem_object_truncate(obj); 498 i915_gem_object_truncate(obj);
499 }
454 500
455 return ret; 501 return ret;
456} 502}
@@ -476,11 +522,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
476 args->size)) 522 args->size))
477 return -EFAULT; 523 return -EFAULT;
478 524
479 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
480 args->size);
481 if (ret)
482 return -EFAULT;
483
484 ret = i915_mutex_lock_interruptible(dev); 525 ret = i915_mutex_lock_interruptible(dev);
485 if (ret) 526 if (ret)
486 return ret; 527 return ret;
@@ -498,19 +539,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
498 goto out; 539 goto out;
499 } 540 }
500 541
501 trace_i915_gem_object_pread(obj, args->offset, args->size); 542 /* prime objects have no backing filp to GEM pread/pwrite
502 543 * pages from.
503 ret = i915_gem_object_set_cpu_read_domain_range(obj, 544 */
504 args->offset, 545 if (!obj->base.filp) {
505 args->size); 546 ret = -EINVAL;
506 if (ret)
507 goto out; 547 goto out;
548 }
508 549
509 ret = -EFAULT; 550 trace_i915_gem_object_pread(obj, args->offset, args->size);
510 if (!i915_gem_object_needs_bit17_swizzle(obj)) 551
511 ret = i915_gem_shmem_pread_fast(dev, obj, args, file); 552 ret = i915_gem_shmem_pread(dev, obj, args, file);
512 if (ret == -EFAULT)
513 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
514 553
515out: 554out:
516 drm_gem_object_unreference(&obj->base); 555 drm_gem_object_unreference(&obj->base);
@@ -529,40 +568,19 @@ fast_user_write(struct io_mapping *mapping,
529 char __user *user_data, 568 char __user *user_data,
530 int length) 569 int length)
531{ 570{
532 char *vaddr_atomic; 571 void __iomem *vaddr_atomic;
572 void *vaddr;
533 unsigned long unwritten; 573 unsigned long unwritten;
534 574
535 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 575 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
536 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 576 /* We can use the cpu mem copy function because this is X86. */
577 vaddr = (void __force*)vaddr_atomic + page_offset;
578 unwritten = __copy_from_user_inatomic_nocache(vaddr,
537 user_data, length); 579 user_data, length);
538 io_mapping_unmap_atomic(vaddr_atomic); 580 io_mapping_unmap_atomic(vaddr_atomic);
539 return unwritten; 581 return unwritten;
540} 582}
541 583
542/* Here's the write path which can sleep for
543 * page faults
544 */
545
546static inline void
547slow_kernel_write(struct io_mapping *mapping,
548 loff_t gtt_base, int gtt_offset,
549 struct page *user_page, int user_offset,
550 int length)
551{
552 char __iomem *dst_vaddr;
553 char *src_vaddr;
554
555 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
556 src_vaddr = kmap(user_page);
557
558 memcpy_toio(dst_vaddr + gtt_offset,
559 src_vaddr + user_offset,
560 length);
561
562 kunmap(user_page);
563 io_mapping_unmap(dst_vaddr);
564}
565
566/** 584/**
567 * This is the fast pwrite path, where we copy the data directly from the 585 * This is the fast pwrite path, where we copy the data directly from the
568 * user into the GTT, uncached. 586 * user into the GTT, uncached.
@@ -577,7 +595,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
577 ssize_t remain; 595 ssize_t remain;
578 loff_t offset, page_base; 596 loff_t offset, page_base;
579 char __user *user_data; 597 char __user *user_data;
580 int page_offset, page_length; 598 int page_offset, page_length, ret;
599
600 ret = i915_gem_object_pin(obj, 0, true);
601 if (ret)
602 goto out;
603
604 ret = i915_gem_object_set_to_gtt_domain(obj, true);
605 if (ret)
606 goto out_unpin;
607
608 ret = i915_gem_object_put_fence(obj);
609 if (ret)
610 goto out_unpin;
581 611
582 user_data = (char __user *) (uintptr_t) args->data_ptr; 612 user_data = (char __user *) (uintptr_t) args->data_ptr;
583 remain = args->size; 613 remain = args->size;
@@ -602,214 +632,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
602 * retry in the slow path. 632 * retry in the slow path.
603 */ 633 */
604 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 634 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
605 page_offset, user_data, page_length)) 635 page_offset, user_data, page_length)) {
606 return -EFAULT; 636 ret = -EFAULT;
637 goto out_unpin;
638 }
607 639
608 remain -= page_length; 640 remain -= page_length;
609 user_data += page_length; 641 user_data += page_length;
610 offset += page_length; 642 offset += page_length;
611 } 643 }
612 644
613 return 0; 645out_unpin:
646 i915_gem_object_unpin(obj);
647out:
648 return ret;
614} 649}
615 650
616/** 651/* Per-page copy function for the shmem pwrite fastpath.
617 * This is the fallback GTT pwrite path, which uses get_user_pages to pin 652 * Flushes invalid cachelines before writing to the target if
618 * the memory and maps it using kmap_atomic for copying. 653 * needs_clflush_before is set and flushes out any written cachelines after
619 * 654 * writing if needs_clflush is set. */
620 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
621 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
622 */
623static int 655static int
624i915_gem_gtt_pwrite_slow(struct drm_device *dev, 656shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
625 struct drm_i915_gem_object *obj, 657 char __user *user_data,
626 struct drm_i915_gem_pwrite *args, 658 bool page_do_bit17_swizzling,
627 struct drm_file *file) 659 bool needs_clflush_before,
660 bool needs_clflush_after)
628{ 661{
629 drm_i915_private_t *dev_priv = dev->dev_private; 662 char *vaddr;
630 ssize_t remain;
631 loff_t gtt_page_base, offset;
632 loff_t first_data_page, last_data_page, num_pages;
633 loff_t pinned_pages, i;
634 struct page **user_pages;
635 struct mm_struct *mm = current->mm;
636 int gtt_page_offset, data_page_offset, data_page_index, page_length;
637 int ret; 663 int ret;
638 uint64_t data_ptr = args->data_ptr;
639
640 remain = args->size;
641
642 /* Pin the user pages containing the data. We can't fault while
643 * holding the struct mutex, and all of the pwrite implementations
644 * want to hold it while dereferencing the user data.
645 */
646 first_data_page = data_ptr / PAGE_SIZE;
647 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
648 num_pages = last_data_page - first_data_page + 1;
649
650 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
651 if (user_pages == NULL)
652 return -ENOMEM;
653
654 mutex_unlock(&dev->struct_mutex);
655 down_read(&mm->mmap_sem);
656 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
657 num_pages, 0, 0, user_pages, NULL);
658 up_read(&mm->mmap_sem);
659 mutex_lock(&dev->struct_mutex);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
664
665 ret = i915_gem_object_set_to_gtt_domain(obj, true);
666 if (ret)
667 goto out_unpin_pages;
668
669 ret = i915_gem_object_put_fence(obj);
670 if (ret)
671 goto out_unpin_pages;
672
673 offset = obj->gtt_offset + args->offset;
674
675 while (remain > 0) {
676 /* Operation in this page
677 *
678 * gtt_page_base = page offset within aperture
679 * gtt_page_offset = offset within page in aperture
680 * data_page_index = page number in get_user_pages return
681 * data_page_offset = offset with data_page_index page.
682 * page_length = bytes to copy for this page
683 */
684 gtt_page_base = offset & PAGE_MASK;
685 gtt_page_offset = offset_in_page(offset);
686 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
687 data_page_offset = offset_in_page(data_ptr);
688
689 page_length = remain;
690 if ((gtt_page_offset + page_length) > PAGE_SIZE)
691 page_length = PAGE_SIZE - gtt_page_offset;
692 if ((data_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - data_page_offset;
694 664
695 slow_kernel_write(dev_priv->mm.gtt_mapping, 665 if (unlikely(page_do_bit17_swizzling))
696 gtt_page_base, gtt_page_offset, 666 return -EINVAL;
697 user_pages[data_page_index],
698 data_page_offset,
699 page_length);
700
701 remain -= page_length;
702 offset += page_length;
703 data_ptr += page_length;
704 }
705 667
706out_unpin_pages: 668 vaddr = kmap_atomic(page);
707 for (i = 0; i < pinned_pages; i++) 669 if (needs_clflush_before)
708 page_cache_release(user_pages[i]); 670 drm_clflush_virt_range(vaddr + shmem_page_offset,
709 drm_free_large(user_pages); 671 page_length);
672 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
673 user_data,
674 page_length);
675 if (needs_clflush_after)
676 drm_clflush_virt_range(vaddr + shmem_page_offset,
677 page_length);
678 kunmap_atomic(vaddr);
710 679
711 return ret; 680 return ret;
712} 681}
713 682
714/** 683/* Only difference to the fast-path function is that this can handle bit17
715 * This is the fast shmem pwrite path, which attempts to directly 684 * and uses non-atomic copy and kmap functions. */
716 * copy_from_user into the kmapped pages backing the object.
717 */
718static int 685static int
719i915_gem_shmem_pwrite_fast(struct drm_device *dev, 686shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
720 struct drm_i915_gem_object *obj, 687 char __user *user_data,
721 struct drm_i915_gem_pwrite *args, 688 bool page_do_bit17_swizzling,
722 struct drm_file *file) 689 bool needs_clflush_before,
690 bool needs_clflush_after)
723{ 691{
724 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 692 char *vaddr;
725 ssize_t remain; 693 int ret;
726 loff_t offset;
727 char __user *user_data;
728 int page_offset, page_length;
729
730 user_data = (char __user *) (uintptr_t) args->data_ptr;
731 remain = args->size;
732
733 offset = args->offset;
734 obj->dirty = 1;
735
736 while (remain > 0) {
737 struct page *page;
738 char *vaddr;
739 int ret;
740
741 /* Operation in this page
742 *
743 * page_offset = offset within page
744 * page_length = bytes to copy for this page
745 */
746 page_offset = offset_in_page(offset);
747 page_length = remain;
748 if ((page_offset + remain) > PAGE_SIZE)
749 page_length = PAGE_SIZE - page_offset;
750
751 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
752 if (IS_ERR(page))
753 return PTR_ERR(page);
754 694
755 vaddr = kmap_atomic(page); 695 vaddr = kmap(page);
756 ret = __copy_from_user_inatomic(vaddr + page_offset, 696 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
697 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
698 page_length,
699 page_do_bit17_swizzling);
700 if (page_do_bit17_swizzling)
701 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
757 user_data, 702 user_data,
758 page_length); 703 page_length);
759 kunmap_atomic(vaddr); 704 else
760 705 ret = __copy_from_user(vaddr + shmem_page_offset,
761 set_page_dirty(page); 706 user_data,
762 mark_page_accessed(page); 707 page_length);
763 page_cache_release(page); 708 if (needs_clflush_after)
764 709 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
765 /* If we get a fault while copying data, then (presumably) our 710 page_length,
766 * source page isn't available. Return the error and we'll 711 page_do_bit17_swizzling);
767 * retry in the slow path. 712 kunmap(page);
768 */
769 if (ret)
770 return -EFAULT;
771
772 remain -= page_length;
773 user_data += page_length;
774 offset += page_length;
775 }
776 713
777 return 0; 714 return ret;
778} 715}
779 716
780/**
781 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
782 * the memory and maps it using kmap_atomic for copying.
783 *
784 * This avoids taking mmap_sem for faulting on the user's address while the
785 * struct_mutex is held.
786 */
787static int 717static int
788i915_gem_shmem_pwrite_slow(struct drm_device *dev, 718i915_gem_shmem_pwrite(struct drm_device *dev,
789 struct drm_i915_gem_object *obj, 719 struct drm_i915_gem_object *obj,
790 struct drm_i915_gem_pwrite *args, 720 struct drm_i915_gem_pwrite *args,
791 struct drm_file *file) 721 struct drm_file *file)
792{ 722{
793 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 723 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
794 ssize_t remain; 724 ssize_t remain;
795 loff_t offset; 725 loff_t offset;
796 char __user *user_data; 726 char __user *user_data;
797 int shmem_page_offset, page_length, ret; 727 int shmem_page_offset, page_length, ret = 0;
798 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 728 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
729 int hit_slowpath = 0;
730 int needs_clflush_after = 0;
731 int needs_clflush_before = 0;
732 int release_page;
799 733
800 user_data = (char __user *) (uintptr_t) args->data_ptr; 734 user_data = (char __user *) (uintptr_t) args->data_ptr;
801 remain = args->size; 735 remain = args->size;
802 736
803 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 737 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
804 738
739 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
740 /* If we're not in the cpu write domain, set ourself into the gtt
741 * write domain and manually flush cachelines (if required). This
742 * optimizes for the case when the gpu will use the data
743 * right away and we therefore have to clflush anyway. */
744 if (obj->cache_level == I915_CACHE_NONE)
745 needs_clflush_after = 1;
746 ret = i915_gem_object_set_to_gtt_domain(obj, true);
747 if (ret)
748 return ret;
749 }
750 /* Same trick applies for invalidate partially written cachelines before
751 * writing. */
752 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
753 && obj->cache_level == I915_CACHE_NONE)
754 needs_clflush_before = 1;
755
805 offset = args->offset; 756 offset = args->offset;
806 obj->dirty = 1; 757 obj->dirty = 1;
807 758
808 mutex_unlock(&dev->struct_mutex);
809
810 while (remain > 0) { 759 while (remain > 0) {
811 struct page *page; 760 struct page *page;
812 char *vaddr; 761 int partial_cacheline_write;
813 762
814 /* Operation in this page 763 /* Operation in this page
815 * 764 *
@@ -822,29 +771,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
822 if ((shmem_page_offset + page_length) > PAGE_SIZE) 771 if ((shmem_page_offset + page_length) > PAGE_SIZE)
823 page_length = PAGE_SIZE - shmem_page_offset; 772 page_length = PAGE_SIZE - shmem_page_offset;
824 773
825 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 774 /* If we don't overwrite a cacheline completely we need to be
826 if (IS_ERR(page)) { 775 * careful to have up-to-date data by first clflushing. Don't
827 ret = PTR_ERR(page); 776 * overcomplicate things and flush the entire patch. */
828 goto out; 777 partial_cacheline_write = needs_clflush_before &&
778 ((shmem_page_offset | page_length)
779 & (boot_cpu_data.x86_clflush_size - 1));
780
781 if (obj->pages) {
782 page = obj->pages[offset >> PAGE_SHIFT];
783 release_page = 0;
784 } else {
785 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
786 if (IS_ERR(page)) {
787 ret = PTR_ERR(page);
788 goto out;
789 }
790 release_page = 1;
829 } 791 }
830 792
831 page_do_bit17_swizzling = obj_do_bit17_swizzling && 793 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
832 (page_to_phys(page) & (1 << 17)) != 0; 794 (page_to_phys(page) & (1 << 17)) != 0;
833 795
834 vaddr = kmap(page); 796 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
835 if (page_do_bit17_swizzling) 797 user_data, page_do_bit17_swizzling,
836 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, 798 partial_cacheline_write,
837 user_data, 799 needs_clflush_after);
838 page_length); 800 if (ret == 0)
839 else 801 goto next_page;
840 ret = __copy_from_user(vaddr + shmem_page_offset, 802
841 user_data, 803 hit_slowpath = 1;
842 page_length); 804 page_cache_get(page);
843 kunmap(page); 805 mutex_unlock(&dev->struct_mutex);
806
807 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
808 user_data, page_do_bit17_swizzling,
809 partial_cacheline_write,
810 needs_clflush_after);
844 811
812 mutex_lock(&dev->struct_mutex);
813 page_cache_release(page);
814next_page:
845 set_page_dirty(page); 815 set_page_dirty(page);
846 mark_page_accessed(page); 816 mark_page_accessed(page);
847 page_cache_release(page); 817 if (release_page)
818 page_cache_release(page);
848 819
849 if (ret) { 820 if (ret) {
850 ret = -EFAULT; 821 ret = -EFAULT;
@@ -857,17 +828,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
857 } 828 }
858 829
859out: 830out:
860 mutex_lock(&dev->struct_mutex); 831 if (hit_slowpath) {
861 /* Fixup: Kill any reinstated backing storage pages */ 832 /* Fixup: Kill any reinstated backing storage pages */
862 if (obj->madv == __I915_MADV_PURGED) 833 if (obj->madv == __I915_MADV_PURGED)
863 i915_gem_object_truncate(obj); 834 i915_gem_object_truncate(obj);
864 /* and flush dirty cachelines in case the object isn't in the cpu write 835 /* and flush dirty cachelines in case the object isn't in the cpu write
865 * domain anymore. */ 836 * domain anymore. */
866 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 837 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
867 i915_gem_clflush_object(obj); 838 i915_gem_clflush_object(obj);
868 intel_gtt_chipset_flush(); 839 intel_gtt_chipset_flush();
840 }
869 } 841 }
870 842
843 if (needs_clflush_after)
844 intel_gtt_chipset_flush();
845
871 return ret; 846 return ret;
872} 847}
873 848
@@ -892,8 +867,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
892 args->size)) 867 args->size))
893 return -EFAULT; 868 return -EFAULT;
894 869
895 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, 870 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
896 args->size); 871 args->size);
897 if (ret) 872 if (ret)
898 return -EFAULT; 873 return -EFAULT;
899 874
@@ -914,8 +889,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
914 goto out; 889 goto out;
915 } 890 }
916 891
892 /* prime objects have no backing filp to GEM pread/pwrite
893 * pages from.
894 */
895 if (!obj->base.filp) {
896 ret = -EINVAL;
897 goto out;
898 }
899
917 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 900 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
918 901
902 ret = -EFAULT;
919 /* We can only do the GTT pwrite on untiled buffers, as otherwise 903 /* We can only do the GTT pwrite on untiled buffers, as otherwise
920 * it would end up going through the fenced access, and we'll get 904 * it would end up going through the fenced access, and we'll get
921 * different detiling behavior between reading and writing. 905 * different detiling behavior between reading and writing.
@@ -928,42 +912,18 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
928 } 912 }
929 913
930 if (obj->gtt_space && 914 if (obj->gtt_space &&
915 obj->cache_level == I915_CACHE_NONE &&
916 obj->tiling_mode == I915_TILING_NONE &&
917 obj->map_and_fenceable &&
931 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 918 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
932 ret = i915_gem_object_pin(obj, 0, true);
933 if (ret)
934 goto out;
935
936 ret = i915_gem_object_set_to_gtt_domain(obj, true);
937 if (ret)
938 goto out_unpin;
939
940 ret = i915_gem_object_put_fence(obj);
941 if (ret)
942 goto out_unpin;
943
944 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 919 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
945 if (ret == -EFAULT) 920 /* Note that the gtt paths might fail with non-page-backed user
946 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); 921 * pointers (e.g. gtt mappings when moving data between
947 922 * textures). Fallback to the shmem path in that case. */
948out_unpin:
949 i915_gem_object_unpin(obj);
950
951 if (ret != -EFAULT)
952 goto out;
953 /* Fall through to the shmfs paths because the gtt paths might
954 * fail with non-page-backed user pointers (e.g. gtt mappings
955 * when moving data between textures). */
956 } 923 }
957 924
958 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
959 if (ret)
960 goto out;
961
962 ret = -EFAULT;
963 if (!i915_gem_object_needs_bit17_swizzle(obj))
964 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
965 if (ret == -EFAULT) 925 if (ret == -EFAULT)
966 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 926 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
967 927
968out: 928out:
969 drm_gem_object_unreference(&obj->base); 929 drm_gem_object_unreference(&obj->base);
@@ -986,9 +946,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
986 uint32_t write_domain = args->write_domain; 946 uint32_t write_domain = args->write_domain;
987 int ret; 947 int ret;
988 948
989 if (!(dev->driver->driver_features & DRIVER_GEM))
990 return -ENODEV;
991
992 /* Only handle setting domains to types used by the CPU. */ 949 /* Only handle setting domains to types used by the CPU. */
993 if (write_domain & I915_GEM_GPU_DOMAINS) 950 if (write_domain & I915_GEM_GPU_DOMAINS)
994 return -EINVAL; 951 return -EINVAL;
@@ -1042,9 +999,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1042 struct drm_i915_gem_object *obj; 999 struct drm_i915_gem_object *obj;
1043 int ret = 0; 1000 int ret = 0;
1044 1001
1045 if (!(dev->driver->driver_features & DRIVER_GEM))
1046 return -ENODEV;
1047
1048 ret = i915_mutex_lock_interruptible(dev); 1002 ret = i915_mutex_lock_interruptible(dev);
1049 if (ret) 1003 if (ret)
1050 return ret; 1004 return ret;
@@ -1080,13 +1034,18 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1080 struct drm_gem_object *obj; 1034 struct drm_gem_object *obj;
1081 unsigned long addr; 1035 unsigned long addr;
1082 1036
1083 if (!(dev->driver->driver_features & DRIVER_GEM))
1084 return -ENODEV;
1085
1086 obj = drm_gem_object_lookup(dev, file, args->handle); 1037 obj = drm_gem_object_lookup(dev, file, args->handle);
1087 if (obj == NULL) 1038 if (obj == NULL)
1088 return -ENOENT; 1039 return -ENOENT;
1089 1040
1041 /* prime objects have no backing filp to GEM mmap
1042 * pages from.
1043 */
1044 if (!obj->filp) {
1045 drm_gem_object_unreference_unlocked(obj);
1046 return -EINVAL;
1047 }
1048
1090 addr = vm_mmap(obj->filp, 0, args->size, 1049 addr = vm_mmap(obj->filp, 0, args->size,
1091 PROT_READ | PROT_WRITE, MAP_SHARED, 1050 PROT_READ | PROT_WRITE, MAP_SHARED,
1092 args->offset); 1051 args->offset);
@@ -1151,10 +1110,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1151 goto unlock; 1110 goto unlock;
1152 } 1111 }
1153 1112
1154 if (obj->tiling_mode == I915_TILING_NONE) 1113 if (!obj->has_global_gtt_mapping)
1155 ret = i915_gem_object_put_fence(obj); 1114 i915_gem_gtt_bind_object(obj, obj->cache_level);
1156 else 1115
1157 ret = i915_gem_object_get_fence(obj, NULL); 1116 ret = i915_gem_object_get_fence(obj);
1158 if (ret) 1117 if (ret)
1159 goto unlock; 1118 goto unlock;
1160 1119
@@ -1308,9 +1267,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
1308 struct drm_i915_gem_object *obj; 1267 struct drm_i915_gem_object *obj;
1309 int ret; 1268 int ret;
1310 1269
1311 if (!(dev->driver->driver_features & DRIVER_GEM))
1312 return -ENODEV;
1313
1314 ret = i915_mutex_lock_interruptible(dev); 1270 ret = i915_mutex_lock_interruptible(dev);
1315 if (ret) 1271 if (ret)
1316 return ret; 1272 return ret;
@@ -1368,14 +1324,10 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1368{ 1324{
1369 struct drm_i915_gem_mmap_gtt *args = data; 1325 struct drm_i915_gem_mmap_gtt *args = data;
1370 1326
1371 if (!(dev->driver->driver_features & DRIVER_GEM))
1372 return -ENODEV;
1373
1374 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1327 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1375} 1328}
1376 1329
1377 1330int
1378static int
1379i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1331i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1380 gfp_t gfpmask) 1332 gfp_t gfpmask)
1381{ 1333{
@@ -1384,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1384 struct inode *inode; 1336 struct inode *inode;
1385 struct page *page; 1337 struct page *page;
1386 1338
1339 if (obj->pages || obj->sg_table)
1340 return 0;
1341
1387 /* Get the list of pages out of our struct file. They'll be pinned 1342 /* Get the list of pages out of our struct file. They'll be pinned
1388 * at this point until we release them. 1343 * at this point until we release them.
1389 */ 1344 */
@@ -1425,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1425 int page_count = obj->base.size / PAGE_SIZE; 1380 int page_count = obj->base.size / PAGE_SIZE;
1426 int i; 1381 int i;
1427 1382
1383 if (!obj->pages)
1384 return;
1385
1428 BUG_ON(obj->madv == __I915_MADV_PURGED); 1386 BUG_ON(obj->madv == __I915_MADV_PURGED);
1429 1387
1430 if (i915_gem_object_needs_bit17_swizzle(obj)) 1388 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1473,7 +1431,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1473 1431
1474 if (obj->fenced_gpu_access) { 1432 if (obj->fenced_gpu_access) {
1475 obj->last_fenced_seqno = seqno; 1433 obj->last_fenced_seqno = seqno;
1476 obj->last_fenced_ring = ring;
1477 1434
1478 /* Bump MRU to take account of the delayed flush */ 1435 /* Bump MRU to take account of the delayed flush */
1479 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1436 if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1512,15 +1469,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1512 struct drm_device *dev = obj->base.dev; 1469 struct drm_device *dev = obj->base.dev;
1513 struct drm_i915_private *dev_priv = dev->dev_private; 1470 struct drm_i915_private *dev_priv = dev->dev_private;
1514 1471
1515 if (obj->pin_count != 0) 1472 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1516 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1517 else
1518 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1519 1473
1520 BUG_ON(!list_empty(&obj->gpu_write_list)); 1474 BUG_ON(!list_empty(&obj->gpu_write_list));
1521 BUG_ON(!obj->active); 1475 BUG_ON(!obj->active);
1522 obj->ring = NULL; 1476 obj->ring = NULL;
1523 obj->last_fenced_ring = NULL;
1524 1477
1525 i915_gem_object_move_off_active(obj); 1478 i915_gem_object_move_off_active(obj);
1526 obj->fenced_gpu_access = false; 1479 obj->fenced_gpu_access = false;
@@ -1546,6 +1499,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1546 inode = obj->base.filp->f_path.dentry->d_inode; 1499 inode = obj->base.filp->f_path.dentry->d_inode;
1547 shmem_truncate_range(inode, 0, (loff_t)-1); 1500 shmem_truncate_range(inode, 0, (loff_t)-1);
1548 1501
1502 if (obj->base.map_list.map)
1503 drm_gem_free_mmap_offset(&obj->base);
1504
1549 obj->madv = __I915_MADV_PURGED; 1505 obj->madv = __I915_MADV_PURGED;
1550} 1506}
1551 1507
@@ -1711,30 +1667,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1711 1667
1712 for (i = 0; i < dev_priv->num_fence_regs; i++) { 1668 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1713 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1669 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1714 struct drm_i915_gem_object *obj = reg->obj;
1715 1670
1716 if (!obj) 1671 i915_gem_write_fence(dev, i, NULL);
1717 continue;
1718 1672
1719 if (obj->tiling_mode) 1673 if (reg->obj)
1720 i915_gem_release_mmap(obj); 1674 i915_gem_object_fence_lost(reg->obj);
1721 1675
1722 reg->obj->fence_reg = I915_FENCE_REG_NONE; 1676 reg->pin_count = 0;
1723 reg->obj->fenced_gpu_access = false; 1677 reg->obj = NULL;
1724 reg->obj->last_fenced_seqno = 0; 1678 INIT_LIST_HEAD(&reg->lru_list);
1725 reg->obj->last_fenced_ring = NULL;
1726 i915_gem_clear_fence_reg(dev, reg);
1727 } 1679 }
1680
1681 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1728} 1682}
1729 1683
1730void i915_gem_reset(struct drm_device *dev) 1684void i915_gem_reset(struct drm_device *dev)
1731{ 1685{
1732 struct drm_i915_private *dev_priv = dev->dev_private; 1686 struct drm_i915_private *dev_priv = dev->dev_private;
1733 struct drm_i915_gem_object *obj; 1687 struct drm_i915_gem_object *obj;
1688 struct intel_ring_buffer *ring;
1734 int i; 1689 int i;
1735 1690
1736 for (i = 0; i < I915_NUM_RINGS; i++) 1691 for_each_ring(ring, dev_priv, i)
1737 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); 1692 i915_gem_reset_ring_lists(dev_priv, ring);
1738 1693
1739 /* Remove anything from the flushing lists. The GPU cache is likely 1694 /* Remove anything from the flushing lists. The GPU cache is likely
1740 * to be lost on reset along with the data, so simply move the 1695 * to be lost on reset along with the data, so simply move the
@@ -1839,24 +1794,11 @@ void
1839i915_gem_retire_requests(struct drm_device *dev) 1794i915_gem_retire_requests(struct drm_device *dev)
1840{ 1795{
1841 drm_i915_private_t *dev_priv = dev->dev_private; 1796 drm_i915_private_t *dev_priv = dev->dev_private;
1797 struct intel_ring_buffer *ring;
1842 int i; 1798 int i;
1843 1799
1844 if (!list_empty(&dev_priv->mm.deferred_free_list)) { 1800 for_each_ring(ring, dev_priv, i)
1845 struct drm_i915_gem_object *obj, *next; 1801 i915_gem_retire_requests_ring(ring);
1846
1847 /* We must be careful that during unbind() we do not
1848 * accidentally infinitely recurse into retire requests.
1849 * Currently:
1850 * retire -> free -> unbind -> wait -> retire_ring
1851 */
1852 list_for_each_entry_safe(obj, next,
1853 &dev_priv->mm.deferred_free_list,
1854 mm_list)
1855 i915_gem_free_object_tail(obj);
1856 }
1857
1858 for (i = 0; i < I915_NUM_RINGS; i++)
1859 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1860} 1802}
1861 1803
1862static void 1804static void
@@ -1864,6 +1806,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1864{ 1806{
1865 drm_i915_private_t *dev_priv; 1807 drm_i915_private_t *dev_priv;
1866 struct drm_device *dev; 1808 struct drm_device *dev;
1809 struct intel_ring_buffer *ring;
1867 bool idle; 1810 bool idle;
1868 int i; 1811 int i;
1869 1812
@@ -1883,9 +1826,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1883 * objects indefinitely. 1826 * objects indefinitely.
1884 */ 1827 */
1885 idle = true; 1828 idle = true;
1886 for (i = 0; i < I915_NUM_RINGS; i++) { 1829 for_each_ring(ring, dev_priv, i) {
1887 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1888
1889 if (!list_empty(&ring->gpu_write_list)) { 1830 if (!list_empty(&ring->gpu_write_list)) {
1890 struct drm_i915_gem_request *request; 1831 struct drm_i915_gem_request *request;
1891 int ret; 1832 int ret;
@@ -1907,20 +1848,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
1907 mutex_unlock(&dev->struct_mutex); 1848 mutex_unlock(&dev->struct_mutex);
1908} 1849}
1909 1850
1910/** 1851static int
1911 * Waits for a sequence number to be signaled, and cleans up the 1852i915_gem_check_wedge(struct drm_i915_private *dev_priv)
1912 * request and object lists appropriately for that event.
1913 */
1914int
1915i915_wait_request(struct intel_ring_buffer *ring,
1916 uint32_t seqno,
1917 bool do_retire)
1918{ 1853{
1919 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1854 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1920 u32 ier;
1921 int ret = 0;
1922
1923 BUG_ON(seqno == 0);
1924 1855
1925 if (atomic_read(&dev_priv->mm.wedged)) { 1856 if (atomic_read(&dev_priv->mm.wedged)) {
1926 struct completion *x = &dev_priv->error_completion; 1857 struct completion *x = &dev_priv->error_completion;
@@ -1935,6 +1866,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
1935 return recovery_complete ? -EIO : -EAGAIN; 1866 return recovery_complete ? -EIO : -EAGAIN;
1936 } 1867 }
1937 1868
1869 return 0;
1870}
1871
1872/*
1873 * Compare seqno against outstanding lazy request. Emit a request if they are
1874 * equal.
1875 */
1876static int
1877i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1878{
1879 int ret = 0;
1880
1881 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1882
1938 if (seqno == ring->outstanding_lazy_request) { 1883 if (seqno == ring->outstanding_lazy_request) {
1939 struct drm_i915_gem_request *request; 1884 struct drm_i915_gem_request *request;
1940 1885
@@ -1948,54 +1893,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
1948 return ret; 1893 return ret;
1949 } 1894 }
1950 1895
1951 seqno = request->seqno; 1896 BUG_ON(seqno != request->seqno);
1952 } 1897 }
1953 1898
1954 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 1899 return ret;
1955 if (HAS_PCH_SPLIT(ring->dev)) 1900}
1956 ier = I915_READ(DEIER) | I915_READ(GTIER);
1957 else
1958 ier = I915_READ(IER);
1959 if (!ier) {
1960 DRM_ERROR("something (likely vbetool) disabled "
1961 "interrupts, re-enabling\n");
1962 ring->dev->driver->irq_preinstall(ring->dev);
1963 ring->dev->driver->irq_postinstall(ring->dev);
1964 }
1965 1901
1966 trace_i915_gem_request_wait_begin(ring, seqno); 1902static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1967 1903 bool interruptible)
1968 ring->waiting_seqno = seqno; 1904{
1969 if (ring->irq_get(ring)) { 1905 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1970 if (dev_priv->mm.interruptible) 1906 int ret = 0;
1971 ret = wait_event_interruptible(ring->irq_queue, 1907
1972 i915_seqno_passed(ring->get_seqno(ring), seqno) 1908 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1973 || atomic_read(&dev_priv->mm.wedged)); 1909 return 0;
1974 else 1910
1975 wait_event(ring->irq_queue, 1911 trace_i915_gem_request_wait_begin(ring, seqno);
1976 i915_seqno_passed(ring->get_seqno(ring), seqno) 1912 if (WARN_ON(!ring->irq_get(ring)))
1977 || atomic_read(&dev_priv->mm.wedged)); 1913 return -ENODEV;
1978 1914
1979 ring->irq_put(ring); 1915#define EXIT_COND \
1980 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring), 1916 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1981 seqno) || 1917 atomic_read(&dev_priv->mm.wedged))
1982 atomic_read(&dev_priv->mm.wedged), 3000)) 1918
1983 ret = -EBUSY; 1919 if (interruptible)
1984 ring->waiting_seqno = 0; 1920 ret = wait_event_interruptible(ring->irq_queue,
1985 1921 EXIT_COND);
1986 trace_i915_gem_request_wait_end(ring, seqno); 1922 else
1987 } 1923 wait_event(ring->irq_queue, EXIT_COND);
1924
1925 ring->irq_put(ring);
1926 trace_i915_gem_request_wait_end(ring, seqno);
1927#undef EXIT_COND
1928
1929 return ret;
1930}
1931
1932/**
1933 * Waits for a sequence number to be signaled, and cleans up the
1934 * request and object lists appropriately for that event.
1935 */
1936int
1937i915_wait_request(struct intel_ring_buffer *ring,
1938 uint32_t seqno)
1939{
1940 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1941 int ret = 0;
1942
1943 BUG_ON(seqno == 0);
1944
1945 ret = i915_gem_check_wedge(dev_priv);
1946 if (ret)
1947 return ret;
1948
1949 ret = i915_gem_check_olr(ring, seqno);
1950 if (ret)
1951 return ret;
1952
1953 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
1988 if (atomic_read(&dev_priv->mm.wedged)) 1954 if (atomic_read(&dev_priv->mm.wedged))
1989 ret = -EAGAIN; 1955 ret = -EAGAIN;
1990 1956
1991 /* Directly dispatch request retiring. While we have the work queue
1992 * to handle this, the waiter on a request often wants an associated
1993 * buffer to have made it to the inactive list, and we would need
1994 * a separate wait queue to handle that.
1995 */
1996 if (ret == 0 && do_retire)
1997 i915_gem_retire_requests_ring(ring);
1998
1999 return ret; 1957 return ret;
2000} 1958}
2001 1959
@@ -2017,15 +1975,58 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2017 * it. 1975 * it.
2018 */ 1976 */
2019 if (obj->active) { 1977 if (obj->active) {
2020 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, 1978 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2021 true);
2022 if (ret) 1979 if (ret)
2023 return ret; 1980 return ret;
1981 i915_gem_retire_requests_ring(obj->ring);
2024 } 1982 }
2025 1983
2026 return 0; 1984 return 0;
2027} 1985}
2028 1986
1987/**
1988 * i915_gem_object_sync - sync an object to a ring.
1989 *
1990 * @obj: object which may be in use on another ring.
1991 * @to: ring we wish to use the object on. May be NULL.
1992 *
1993 * This code is meant to abstract object synchronization with the GPU.
1994 * Calling with NULL implies synchronizing the object with the CPU
1995 * rather than a particular GPU ring.
1996 *
1997 * Returns 0 if successful, else propagates up the lower layer error.
1998 */
1999int
2000i915_gem_object_sync(struct drm_i915_gem_object *obj,
2001 struct intel_ring_buffer *to)
2002{
2003 struct intel_ring_buffer *from = obj->ring;
2004 u32 seqno;
2005 int ret, idx;
2006
2007 if (from == NULL || to == from)
2008 return 0;
2009
2010 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2011 return i915_gem_object_wait_rendering(obj);
2012
2013 idx = intel_ring_sync_index(from, to);
2014
2015 seqno = obj->last_rendering_seqno;
2016 if (seqno <= from->sync_seqno[idx])
2017 return 0;
2018
2019 ret = i915_gem_check_olr(obj->ring, seqno);
2020 if (ret)
2021 return ret;
2022
2023 ret = to->sync_to(to, from, seqno);
2024 if (!ret)
2025 from->sync_seqno[idx] = seqno;
2026
2027 return ret;
2028}
2029
2029static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2030static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2030{ 2031{
2031 u32 old_write_domain, old_read_domains; 2032 u32 old_write_domain, old_read_domains;
@@ -2068,7 +2069,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2068 } 2069 }
2069 2070
2070 ret = i915_gem_object_finish_gpu(obj); 2071 ret = i915_gem_object_finish_gpu(obj);
2071 if (ret == -ERESTARTSYS) 2072 if (ret)
2072 return ret; 2073 return ret;
2073 /* Continue on if we fail due to EIO, the GPU is hung so we 2074 /* Continue on if we fail due to EIO, the GPU is hung so we
2074 * should be safe and we need to cleanup or else we might 2075 * should be safe and we need to cleanup or else we might
@@ -2095,16 +2096,18 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2095 2096
2096 /* release the fence reg _after_ flushing */ 2097 /* release the fence reg _after_ flushing */
2097 ret = i915_gem_object_put_fence(obj); 2098 ret = i915_gem_object_put_fence(obj);
2098 if (ret == -ERESTARTSYS) 2099 if (ret)
2099 return ret; 2100 return ret;
2100 2101
2101 trace_i915_gem_object_unbind(obj); 2102 trace_i915_gem_object_unbind(obj);
2102 2103
2103 i915_gem_gtt_unbind_object(obj); 2104 if (obj->has_global_gtt_mapping)
2105 i915_gem_gtt_unbind_object(obj);
2104 if (obj->has_aliasing_ppgtt_mapping) { 2106 if (obj->has_aliasing_ppgtt_mapping) {
2105 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); 2107 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2106 obj->has_aliasing_ppgtt_mapping = 0; 2108 obj->has_aliasing_ppgtt_mapping = 0;
2107 } 2109 }
2110 i915_gem_gtt_finish_object(obj);
2108 2111
2109 i915_gem_object_put_pages_gtt(obj); 2112 i915_gem_object_put_pages_gtt(obj);
2110 2113
@@ -2145,7 +2148,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
2145 return 0; 2148 return 0;
2146} 2149}
2147 2150
2148static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) 2151static int i915_ring_idle(struct intel_ring_buffer *ring)
2149{ 2152{
2150 int ret; 2153 int ret;
2151 2154
@@ -2159,208 +2162,201 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2159 return ret; 2162 return ret;
2160 } 2163 }
2161 2164
2162 return i915_wait_request(ring, i915_gem_next_request_seqno(ring), 2165 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2163 do_retire);
2164} 2166}
2165 2167
2166int i915_gpu_idle(struct drm_device *dev, bool do_retire) 2168int i915_gpu_idle(struct drm_device *dev)
2167{ 2169{
2168 drm_i915_private_t *dev_priv = dev->dev_private; 2170 drm_i915_private_t *dev_priv = dev->dev_private;
2171 struct intel_ring_buffer *ring;
2169 int ret, i; 2172 int ret, i;
2170 2173
2171 /* Flush everything onto the inactive list. */ 2174 /* Flush everything onto the inactive list. */
2172 for (i = 0; i < I915_NUM_RINGS; i++) { 2175 for_each_ring(ring, dev_priv, i) {
2173 ret = i915_ring_idle(&dev_priv->ring[i], do_retire); 2176 ret = i915_ring_idle(ring);
2174 if (ret) 2177 if (ret)
2175 return ret; 2178 return ret;
2179
2180 /* Is the device fubar? */
2181 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2182 return -EBUSY;
2176 } 2183 }
2177 2184
2178 return 0; 2185 return 0;
2179} 2186}
2180 2187
2181static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, 2188static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2182 struct intel_ring_buffer *pipelined) 2189 struct drm_i915_gem_object *obj)
2183{ 2190{
2184 struct drm_device *dev = obj->base.dev;
2185 drm_i915_private_t *dev_priv = dev->dev_private; 2191 drm_i915_private_t *dev_priv = dev->dev_private;
2186 u32 size = obj->gtt_space->size;
2187 int regnum = obj->fence_reg;
2188 uint64_t val; 2192 uint64_t val;
2189 2193
2190 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2194 if (obj) {
2191 0xfffff000) << 32; 2195 u32 size = obj->gtt_space->size;
2192 val |= obj->gtt_offset & 0xfffff000;
2193 val |= (uint64_t)((obj->stride / 128) - 1) <<
2194 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2195 2196
2196 if (obj->tiling_mode == I915_TILING_Y) 2197 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2197 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2198 0xfffff000) << 32;
2198 val |= I965_FENCE_REG_VALID; 2199 val |= obj->gtt_offset & 0xfffff000;
2200 val |= (uint64_t)((obj->stride / 128) - 1) <<
2201 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2199 2202
2200 if (pipelined) { 2203 if (obj->tiling_mode == I915_TILING_Y)
2201 int ret = intel_ring_begin(pipelined, 6); 2204 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2202 if (ret) 2205 val |= I965_FENCE_REG_VALID;
2203 return ret;
2204
2205 intel_ring_emit(pipelined, MI_NOOP);
2206 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2207 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2208 intel_ring_emit(pipelined, (u32)val);
2209 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2210 intel_ring_emit(pipelined, (u32)(val >> 32));
2211 intel_ring_advance(pipelined);
2212 } else 2206 } else
2213 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); 2207 val = 0;
2214 2208
2215 return 0; 2209 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2210 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2216} 2211}
2217 2212
2218static int i965_write_fence_reg(struct drm_i915_gem_object *obj, 2213static void i965_write_fence_reg(struct drm_device *dev, int reg,
2219 struct intel_ring_buffer *pipelined) 2214 struct drm_i915_gem_object *obj)
2220{ 2215{
2221 struct drm_device *dev = obj->base.dev;
2222 drm_i915_private_t *dev_priv = dev->dev_private; 2216 drm_i915_private_t *dev_priv = dev->dev_private;
2223 u32 size = obj->gtt_space->size;
2224 int regnum = obj->fence_reg;
2225 uint64_t val; 2217 uint64_t val;
2226 2218
2227 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2219 if (obj) {
2228 0xfffff000) << 32; 2220 u32 size = obj->gtt_space->size;
2229 val |= obj->gtt_offset & 0xfffff000;
2230 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2231 if (obj->tiling_mode == I915_TILING_Y)
2232 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2233 val |= I965_FENCE_REG_VALID;
2234 2221
2235 if (pipelined) { 2222 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2236 int ret = intel_ring_begin(pipelined, 6); 2223 0xfffff000) << 32;
2237 if (ret) 2224 val |= obj->gtt_offset & 0xfffff000;
2238 return ret; 2225 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2239 2226 if (obj->tiling_mode == I915_TILING_Y)
2240 intel_ring_emit(pipelined, MI_NOOP); 2227 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2241 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); 2228 val |= I965_FENCE_REG_VALID;
2242 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2243 intel_ring_emit(pipelined, (u32)val);
2244 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2245 intel_ring_emit(pipelined, (u32)(val >> 32));
2246 intel_ring_advance(pipelined);
2247 } else 2229 } else
2248 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); 2230 val = 0;
2249 2231
2250 return 0; 2232 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2233 POSTING_READ(FENCE_REG_965_0 + reg * 8);
2251} 2234}
2252 2235
2253static int i915_write_fence_reg(struct drm_i915_gem_object *obj, 2236static void i915_write_fence_reg(struct drm_device *dev, int reg,
2254 struct intel_ring_buffer *pipelined) 2237 struct drm_i915_gem_object *obj)
2255{ 2238{
2256 struct drm_device *dev = obj->base.dev;
2257 drm_i915_private_t *dev_priv = dev->dev_private; 2239 drm_i915_private_t *dev_priv = dev->dev_private;
2258 u32 size = obj->gtt_space->size; 2240 u32 val;
2259 u32 fence_reg, val, pitch_val;
2260 int tile_width;
2261
2262 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2263 (size & -size) != size ||
2264 (obj->gtt_offset & (size - 1)),
2265 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2266 obj->gtt_offset, obj->map_and_fenceable, size))
2267 return -EINVAL;
2268 2241
2269 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2242 if (obj) {
2270 tile_width = 128; 2243 u32 size = obj->gtt_space->size;
2271 else 2244 int pitch_val;
2272 tile_width = 512; 2245 int tile_width;
2273
2274 /* Note: pitch better be a power of two tile widths */
2275 pitch_val = obj->stride / tile_width;
2276 pitch_val = ffs(pitch_val) - 1;
2277
2278 val = obj->gtt_offset;
2279 if (obj->tiling_mode == I915_TILING_Y)
2280 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2281 val |= I915_FENCE_SIZE_BITS(size);
2282 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2283 val |= I830_FENCE_REG_VALID;
2284
2285 fence_reg = obj->fence_reg;
2286 if (fence_reg < 8)
2287 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2288 else
2289 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2290 2246
2291 if (pipelined) { 2247 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2292 int ret = intel_ring_begin(pipelined, 4); 2248 (size & -size) != size ||
2293 if (ret) 2249 (obj->gtt_offset & (size - 1)),
2294 return ret; 2250 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2251 obj->gtt_offset, obj->map_and_fenceable, size);
2295 2252
2296 intel_ring_emit(pipelined, MI_NOOP); 2253 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2297 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2254 tile_width = 128;
2298 intel_ring_emit(pipelined, fence_reg); 2255 else
2299 intel_ring_emit(pipelined, val); 2256 tile_width = 512;
2300 intel_ring_advance(pipelined); 2257
2258 /* Note: pitch better be a power of two tile widths */
2259 pitch_val = obj->stride / tile_width;
2260 pitch_val = ffs(pitch_val) - 1;
2261
2262 val = obj->gtt_offset;
2263 if (obj->tiling_mode == I915_TILING_Y)
2264 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2265 val |= I915_FENCE_SIZE_BITS(size);
2266 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2267 val |= I830_FENCE_REG_VALID;
2301 } else 2268 } else
2302 I915_WRITE(fence_reg, val); 2269 val = 0;
2303 2270
2304 return 0; 2271 if (reg < 8)
2272 reg = FENCE_REG_830_0 + reg * 4;
2273 else
2274 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2275
2276 I915_WRITE(reg, val);
2277 POSTING_READ(reg);
2305} 2278}
2306 2279
2307static int i830_write_fence_reg(struct drm_i915_gem_object *obj, 2280static void i830_write_fence_reg(struct drm_device *dev, int reg,
2308 struct intel_ring_buffer *pipelined) 2281 struct drm_i915_gem_object *obj)
2309{ 2282{
2310 struct drm_device *dev = obj->base.dev;
2311 drm_i915_private_t *dev_priv = dev->dev_private; 2283 drm_i915_private_t *dev_priv = dev->dev_private;
2312 u32 size = obj->gtt_space->size;
2313 int regnum = obj->fence_reg;
2314 uint32_t val; 2284 uint32_t val;
2315 uint32_t pitch_val;
2316 2285
2317 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2286 if (obj) {
2318 (size & -size) != size || 2287 u32 size = obj->gtt_space->size;
2319 (obj->gtt_offset & (size - 1)), 2288 uint32_t pitch_val;
2320 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2289
2321 obj->gtt_offset, size)) 2290 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2322 return -EINVAL; 2291 (size & -size) != size ||
2323 2292 (obj->gtt_offset & (size - 1)),
2324 pitch_val = obj->stride / 128; 2293 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2325 pitch_val = ffs(pitch_val) - 1; 2294 obj->gtt_offset, size);
2326 2295
2327 val = obj->gtt_offset; 2296 pitch_val = obj->stride / 128;
2328 if (obj->tiling_mode == I915_TILING_Y) 2297 pitch_val = ffs(pitch_val) - 1;
2329 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2298
2330 val |= I830_FENCE_SIZE_BITS(size); 2299 val = obj->gtt_offset;
2331 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2300 if (obj->tiling_mode == I915_TILING_Y)
2332 val |= I830_FENCE_REG_VALID; 2301 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2302 val |= I830_FENCE_SIZE_BITS(size);
2303 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2304 val |= I830_FENCE_REG_VALID;
2305 } else
2306 val = 0;
2333 2307
2334 if (pipelined) { 2308 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2335 int ret = intel_ring_begin(pipelined, 4); 2309 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2336 if (ret) 2310}
2337 return ret;
2338 2311
2339 intel_ring_emit(pipelined, MI_NOOP); 2312static void i915_gem_write_fence(struct drm_device *dev, int reg,
2340 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2313 struct drm_i915_gem_object *obj)
2341 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); 2314{
2342 intel_ring_emit(pipelined, val); 2315 switch (INTEL_INFO(dev)->gen) {
2343 intel_ring_advance(pipelined); 2316 case 7:
2344 } else 2317 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2345 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); 2318 case 5:
2319 case 4: i965_write_fence_reg(dev, reg, obj); break;
2320 case 3: i915_write_fence_reg(dev, reg, obj); break;
2321 case 2: i830_write_fence_reg(dev, reg, obj); break;
2322 default: break;
2323 }
2324}
2346 2325
2347 return 0; 2326static inline int fence_number(struct drm_i915_private *dev_priv,
2327 struct drm_i915_fence_reg *fence)
2328{
2329 return fence - dev_priv->fence_regs;
2348} 2330}
2349 2331
2350static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) 2332static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2333 struct drm_i915_fence_reg *fence,
2334 bool enable)
2351{ 2335{
2352 return i915_seqno_passed(ring->get_seqno(ring), seqno); 2336 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2337 int reg = fence_number(dev_priv, fence);
2338
2339 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2340
2341 if (enable) {
2342 obj->fence_reg = reg;
2343 fence->obj = obj;
2344 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2345 } else {
2346 obj->fence_reg = I915_FENCE_REG_NONE;
2347 fence->obj = NULL;
2348 list_del_init(&fence->lru_list);
2349 }
2353} 2350}
2354 2351
2355static int 2352static int
2356i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2353i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2357 struct intel_ring_buffer *pipelined)
2358{ 2354{
2359 int ret; 2355 int ret;
2360 2356
2361 if (obj->fenced_gpu_access) { 2357 if (obj->fenced_gpu_access) {
2362 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2358 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2363 ret = i915_gem_flush_ring(obj->last_fenced_ring, 2359 ret = i915_gem_flush_ring(obj->ring,
2364 0, obj->base.write_domain); 2360 0, obj->base.write_domain);
2365 if (ret) 2361 if (ret)
2366 return ret; 2362 return ret;
@@ -2369,18 +2365,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2369 obj->fenced_gpu_access = false; 2365 obj->fenced_gpu_access = false;
2370 } 2366 }
2371 2367
2372 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { 2368 if (obj->last_fenced_seqno) {
2373 if (!ring_passed_seqno(obj->last_fenced_ring, 2369 ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
2374 obj->last_fenced_seqno)) { 2370 if (ret)
2375 ret = i915_wait_request(obj->last_fenced_ring, 2371 return ret;
2376 obj->last_fenced_seqno,
2377 true);
2378 if (ret)
2379 return ret;
2380 }
2381 2372
2382 obj->last_fenced_seqno = 0; 2373 obj->last_fenced_seqno = 0;
2383 obj->last_fenced_ring = NULL;
2384 } 2374 }
2385 2375
2386 /* Ensure that all CPU reads are completed before installing a fence 2376 /* Ensure that all CPU reads are completed before installing a fence
@@ -2395,34 +2385,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2395int 2385int
2396i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 2386i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2397{ 2387{
2388 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2398 int ret; 2389 int ret;
2399 2390
2400 if (obj->tiling_mode) 2391 ret = i915_gem_object_flush_fence(obj);
2401 i915_gem_release_mmap(obj);
2402
2403 ret = i915_gem_object_flush_fence(obj, NULL);
2404 if (ret) 2392 if (ret)
2405 return ret; 2393 return ret;
2406 2394
2407 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2395 if (obj->fence_reg == I915_FENCE_REG_NONE)
2408 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2396 return 0;
2409
2410 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
2411 i915_gem_clear_fence_reg(obj->base.dev,
2412 &dev_priv->fence_regs[obj->fence_reg]);
2413 2397
2414 obj->fence_reg = I915_FENCE_REG_NONE; 2398 i915_gem_object_update_fence(obj,
2415 } 2399 &dev_priv->fence_regs[obj->fence_reg],
2400 false);
2401 i915_gem_object_fence_lost(obj);
2416 2402
2417 return 0; 2403 return 0;
2418} 2404}
2419 2405
2420static struct drm_i915_fence_reg * 2406static struct drm_i915_fence_reg *
2421i915_find_fence_reg(struct drm_device *dev, 2407i915_find_fence_reg(struct drm_device *dev)
2422 struct intel_ring_buffer *pipelined)
2423{ 2408{
2424 struct drm_i915_private *dev_priv = dev->dev_private; 2409 struct drm_i915_private *dev_priv = dev->dev_private;
2425 struct drm_i915_fence_reg *reg, *first, *avail; 2410 struct drm_i915_fence_reg *reg, *avail;
2426 int i; 2411 int i;
2427 2412
2428 /* First try to find a free reg */ 2413 /* First try to find a free reg */
@@ -2440,204 +2425,77 @@ i915_find_fence_reg(struct drm_device *dev,
2440 return NULL; 2425 return NULL;
2441 2426
2442 /* None available, try to steal one or wait for a user to finish */ 2427 /* None available, try to steal one or wait for a user to finish */
2443 avail = first = NULL;
2444 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 2428 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2445 if (reg->pin_count) 2429 if (reg->pin_count)
2446 continue; 2430 continue;
2447 2431
2448 if (first == NULL) 2432 return reg;
2449 first = reg;
2450
2451 if (!pipelined ||
2452 !reg->obj->last_fenced_ring ||
2453 reg->obj->last_fenced_ring == pipelined) {
2454 avail = reg;
2455 break;
2456 }
2457 } 2433 }
2458 2434
2459 if (avail == NULL) 2435 return NULL;
2460 avail = first;
2461
2462 return avail;
2463} 2436}
2464 2437
2465/** 2438/**
2466 * i915_gem_object_get_fence - set up a fence reg for an object 2439 * i915_gem_object_get_fence - set up fencing for an object
2467 * @obj: object to map through a fence reg 2440 * @obj: object to map through a fence reg
2468 * @pipelined: ring on which to queue the change, or NULL for CPU access
2469 * @interruptible: must we wait uninterruptibly for the register to retire?
2470 * 2441 *
2471 * When mapping objects through the GTT, userspace wants to be able to write 2442 * When mapping objects through the GTT, userspace wants to be able to write
2472 * to them without having to worry about swizzling if the object is tiled. 2443 * to them without having to worry about swizzling if the object is tiled.
2473 *
2474 * This function walks the fence regs looking for a free one for @obj, 2444 * This function walks the fence regs looking for a free one for @obj,
2475 * stealing one if it can't find any. 2445 * stealing one if it can't find any.
2476 * 2446 *
2477 * It then sets up the reg based on the object's properties: address, pitch 2447 * It then sets up the reg based on the object's properties: address, pitch
2478 * and tiling format. 2448 * and tiling format.
2449 *
2450 * For an untiled surface, this removes any existing fence.
2479 */ 2451 */
2480int 2452int
2481i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 2453i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2482 struct intel_ring_buffer *pipelined)
2483{ 2454{
2484 struct drm_device *dev = obj->base.dev; 2455 struct drm_device *dev = obj->base.dev;
2485 struct drm_i915_private *dev_priv = dev->dev_private; 2456 struct drm_i915_private *dev_priv = dev->dev_private;
2457 bool enable = obj->tiling_mode != I915_TILING_NONE;
2486 struct drm_i915_fence_reg *reg; 2458 struct drm_i915_fence_reg *reg;
2487 int ret; 2459 int ret;
2488 2460
2489 /* XXX disable pipelining. There are bugs. Shocking. */ 2461 /* Have we updated the tiling parameters upon the object and so
2490 pipelined = NULL; 2462 * will need to serialise the write to the associated fence register?
2463 */
2464 if (obj->fence_dirty) {
2465 ret = i915_gem_object_flush_fence(obj);
2466 if (ret)
2467 return ret;
2468 }
2491 2469
2492 /* Just update our place in the LRU if our fence is getting reused. */ 2470 /* Just update our place in the LRU if our fence is getting reused. */
2493 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2471 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2494 reg = &dev_priv->fence_regs[obj->fence_reg]; 2472 reg = &dev_priv->fence_regs[obj->fence_reg];
2495 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2473 if (!obj->fence_dirty) {
2496 2474 list_move_tail(&reg->lru_list,
2497 if (obj->tiling_changed) { 2475 &dev_priv->mm.fence_list);
2498 ret = i915_gem_object_flush_fence(obj, pipelined); 2476 return 0;
2499 if (ret)
2500 return ret;
2501
2502 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2503 pipelined = NULL;
2504
2505 if (pipelined) {
2506 reg->setup_seqno =
2507 i915_gem_next_request_seqno(pipelined);
2508 obj->last_fenced_seqno = reg->setup_seqno;
2509 obj->last_fenced_ring = pipelined;
2510 }
2511
2512 goto update;
2513 } 2477 }
2478 } else if (enable) {
2479 reg = i915_find_fence_reg(dev);
2480 if (reg == NULL)
2481 return -EDEADLK;
2514 2482
2515 if (!pipelined) { 2483 if (reg->obj) {
2516 if (reg->setup_seqno) { 2484 struct drm_i915_gem_object *old = reg->obj;
2517 if (!ring_passed_seqno(obj->last_fenced_ring,
2518 reg->setup_seqno)) {
2519 ret = i915_wait_request(obj->last_fenced_ring,
2520 reg->setup_seqno,
2521 true);
2522 if (ret)
2523 return ret;
2524 }
2525 2485
2526 reg->setup_seqno = 0; 2486 ret = i915_gem_object_flush_fence(old);
2527 }
2528 } else if (obj->last_fenced_ring &&
2529 obj->last_fenced_ring != pipelined) {
2530 ret = i915_gem_object_flush_fence(obj, pipelined);
2531 if (ret) 2487 if (ret)
2532 return ret; 2488 return ret;
2533 }
2534
2535 return 0;
2536 }
2537
2538 reg = i915_find_fence_reg(dev, pipelined);
2539 if (reg == NULL)
2540 return -EDEADLK;
2541
2542 ret = i915_gem_object_flush_fence(obj, pipelined);
2543 if (ret)
2544 return ret;
2545
2546 if (reg->obj) {
2547 struct drm_i915_gem_object *old = reg->obj;
2548
2549 drm_gem_object_reference(&old->base);
2550
2551 if (old->tiling_mode)
2552 i915_gem_release_mmap(old);
2553 2489
2554 ret = i915_gem_object_flush_fence(old, pipelined); 2490 i915_gem_object_fence_lost(old);
2555 if (ret) {
2556 drm_gem_object_unreference(&old->base);
2557 return ret;
2558 } 2491 }
2492 } else
2493 return 0;
2559 2494
2560 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) 2495 i915_gem_object_update_fence(obj, reg, enable);
2561 pipelined = NULL; 2496 obj->fence_dirty = false;
2562
2563 old->fence_reg = I915_FENCE_REG_NONE;
2564 old->last_fenced_ring = pipelined;
2565 old->last_fenced_seqno =
2566 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2567
2568 drm_gem_object_unreference(&old->base);
2569 } else if (obj->last_fenced_seqno == 0)
2570 pipelined = NULL;
2571
2572 reg->obj = obj;
2573 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2574 obj->fence_reg = reg - dev_priv->fence_regs;
2575 obj->last_fenced_ring = pipelined;
2576
2577 reg->setup_seqno =
2578 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2579 obj->last_fenced_seqno = reg->setup_seqno;
2580
2581update:
2582 obj->tiling_changed = false;
2583 switch (INTEL_INFO(dev)->gen) {
2584 case 7:
2585 case 6:
2586 ret = sandybridge_write_fence_reg(obj, pipelined);
2587 break;
2588 case 5:
2589 case 4:
2590 ret = i965_write_fence_reg(obj, pipelined);
2591 break;
2592 case 3:
2593 ret = i915_write_fence_reg(obj, pipelined);
2594 break;
2595 case 2:
2596 ret = i830_write_fence_reg(obj, pipelined);
2597 break;
2598 }
2599
2600 return ret;
2601}
2602
2603/**
2604 * i915_gem_clear_fence_reg - clear out fence register info
2605 * @obj: object to clear
2606 *
2607 * Zeroes out the fence register itself and clears out the associated
2608 * data structures in dev_priv and obj.
2609 */
2610static void
2611i915_gem_clear_fence_reg(struct drm_device *dev,
2612 struct drm_i915_fence_reg *reg)
2613{
2614 drm_i915_private_t *dev_priv = dev->dev_private;
2615 uint32_t fence_reg = reg - dev_priv->fence_regs;
2616
2617 switch (INTEL_INFO(dev)->gen) {
2618 case 7:
2619 case 6:
2620 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2621 break;
2622 case 5:
2623 case 4:
2624 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2625 break;
2626 case 3:
2627 if (fence_reg >= 8)
2628 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2629 else
2630 case 2:
2631 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2632
2633 I915_WRITE(fence_reg, 0);
2634 break;
2635 }
2636 2497
2637 list_del_init(&reg->lru_list); 2498 return 0;
2638 reg->obj = NULL;
2639 reg->setup_seqno = 0;
2640 reg->pin_count = 0;
2641} 2499}
2642 2500
2643/** 2501/**
@@ -2749,7 +2607,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2749 return ret; 2607 return ret;
2750 } 2608 }
2751 2609
2752 ret = i915_gem_gtt_bind_object(obj); 2610 ret = i915_gem_gtt_prepare_object(obj);
2753 if (ret) { 2611 if (ret) {
2754 i915_gem_object_put_pages_gtt(obj); 2612 i915_gem_object_put_pages_gtt(obj);
2755 drm_mm_put_block(obj->gtt_space); 2613 drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2619,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2761 goto search_free; 2619 goto search_free;
2762 } 2620 }
2763 2621
2622 if (!dev_priv->mm.aliasing_ppgtt)
2623 i915_gem_gtt_bind_object(obj, obj->cache_level);
2624
2764 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); 2625 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2765 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2626 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2766 2627
@@ -2878,6 +2739,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2878int 2739int
2879i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 2740i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2880{ 2741{
2742 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2881 uint32_t old_write_domain, old_read_domains; 2743 uint32_t old_write_domain, old_read_domains;
2882 int ret; 2744 int ret;
2883 2745
@@ -2918,6 +2780,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2918 old_read_domains, 2780 old_read_domains,
2919 old_write_domain); 2781 old_write_domain);
2920 2782
2783 /* And bump the LRU for this access */
2784 if (i915_gem_object_is_inactive(obj))
2785 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2786
2921 return 0; 2787 return 0;
2922} 2788}
2923 2789
@@ -2953,7 +2819,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2953 return ret; 2819 return ret;
2954 } 2820 }
2955 2821
2956 i915_gem_gtt_rebind_object(obj, cache_level); 2822 if (obj->has_global_gtt_mapping)
2823 i915_gem_gtt_bind_object(obj, cache_level);
2957 if (obj->has_aliasing_ppgtt_mapping) 2824 if (obj->has_aliasing_ppgtt_mapping)
2958 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 2825 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2959 obj, cache_level); 2826 obj, cache_level);
@@ -2990,11 +2857,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2990 * Prepare buffer for display plane (scanout, cursors, etc). 2857 * Prepare buffer for display plane (scanout, cursors, etc).
2991 * Can be called from an uninterruptible phase (modesetting) and allows 2858 * Can be called from an uninterruptible phase (modesetting) and allows
2992 * any flushes to be pipelined (for pageflips). 2859 * any flushes to be pipelined (for pageflips).
2993 *
2994 * For the display plane, we want to be in the GTT but out of any write
2995 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2996 * ability to pipeline the waits, pinning and any additional subtleties
2997 * that may differentiate the display plane from ordinary buffers.
2998 */ 2860 */
2999int 2861int
3000i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2862i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3009,8 +2871,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3009 return ret; 2871 return ret;
3010 2872
3011 if (pipelined != obj->ring) { 2873 if (pipelined != obj->ring) {
3012 ret = i915_gem_object_wait_rendering(obj); 2874 ret = i915_gem_object_sync(obj, pipelined);
3013 if (ret == -ERESTARTSYS) 2875 if (ret)
3014 return ret; 2876 return ret;
3015 } 2877 }
3016 2878
@@ -3082,7 +2944,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3082 * This function returns when the move is complete, including waiting on 2944 * This function returns when the move is complete, including waiting on
3083 * flushes to occur. 2945 * flushes to occur.
3084 */ 2946 */
3085static int 2947int
3086i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 2948i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3087{ 2949{
3088 uint32_t old_write_domain, old_read_domains; 2950 uint32_t old_write_domain, old_read_domains;
@@ -3095,17 +2957,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3095 if (ret) 2957 if (ret)
3096 return ret; 2958 return ret;
3097 2959
3098 ret = i915_gem_object_wait_rendering(obj); 2960 if (write || obj->pending_gpu_write) {
3099 if (ret) 2961 ret = i915_gem_object_wait_rendering(obj);
3100 return ret; 2962 if (ret)
2963 return ret;
2964 }
3101 2965
3102 i915_gem_object_flush_gtt_write_domain(obj); 2966 i915_gem_object_flush_gtt_write_domain(obj);
3103 2967
3104 /* If we have a partially-valid cache of the object in the CPU,
3105 * finish invalidating it and free the per-page flags.
3106 */
3107 i915_gem_object_set_to_full_cpu_read_domain(obj);
3108
3109 old_write_domain = obj->base.write_domain; 2968 old_write_domain = obj->base.write_domain;
3110 old_read_domains = obj->base.read_domains; 2969 old_read_domains = obj->base.read_domains;
3111 2970
@@ -3136,113 +2995,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3136 return 0; 2995 return 0;
3137} 2996}
3138 2997
3139/**
3140 * Moves the object from a partially CPU read to a full one.
3141 *
3142 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3143 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3144 */
3145static void
3146i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3147{
3148 if (!obj->page_cpu_valid)
3149 return;
3150
3151 /* If we're partially in the CPU read domain, finish moving it in.
3152 */
3153 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3154 int i;
3155
3156 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3157 if (obj->page_cpu_valid[i])
3158 continue;
3159 drm_clflush_pages(obj->pages + i, 1);
3160 }
3161 }
3162
3163 /* Free the page_cpu_valid mappings which are now stale, whether
3164 * or not we've got I915_GEM_DOMAIN_CPU.
3165 */
3166 kfree(obj->page_cpu_valid);
3167 obj->page_cpu_valid = NULL;
3168}
3169
3170/**
3171 * Set the CPU read domain on a range of the object.
3172 *
3173 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3174 * not entirely valid. The page_cpu_valid member of the object flags which
3175 * pages have been flushed, and will be respected by
3176 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3177 * of the whole object.
3178 *
3179 * This function returns when the move is complete, including waiting on
3180 * flushes to occur.
3181 */
3182static int
3183i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3184 uint64_t offset, uint64_t size)
3185{
3186 uint32_t old_read_domains;
3187 int i, ret;
3188
3189 if (offset == 0 && size == obj->base.size)
3190 return i915_gem_object_set_to_cpu_domain(obj, 0);
3191
3192 ret = i915_gem_object_flush_gpu_write_domain(obj);
3193 if (ret)
3194 return ret;
3195
3196 ret = i915_gem_object_wait_rendering(obj);
3197 if (ret)
3198 return ret;
3199
3200 i915_gem_object_flush_gtt_write_domain(obj);
3201
3202 /* If we're already fully in the CPU read domain, we're done. */
3203 if (obj->page_cpu_valid == NULL &&
3204 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3205 return 0;
3206
3207 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3208 * newly adding I915_GEM_DOMAIN_CPU
3209 */
3210 if (obj->page_cpu_valid == NULL) {
3211 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3212 GFP_KERNEL);
3213 if (obj->page_cpu_valid == NULL)
3214 return -ENOMEM;
3215 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3216 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3217
3218 /* Flush the cache on any pages that are still invalid from the CPU's
3219 * perspective.
3220 */
3221 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3222 i++) {
3223 if (obj->page_cpu_valid[i])
3224 continue;
3225
3226 drm_clflush_pages(obj->pages + i, 1);
3227
3228 obj->page_cpu_valid[i] = 1;
3229 }
3230
3231 /* It should now be out of any other write domains, and we can update
3232 * the domain values for our changes.
3233 */
3234 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3235
3236 old_read_domains = obj->base.read_domains;
3237 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3238
3239 trace_i915_gem_object_change_domain(obj,
3240 old_read_domains,
3241 obj->base.write_domain);
3242
3243 return 0;
3244}
3245
3246/* Throttle our rendering by waiting until the ring has completed our requests 2998/* Throttle our rendering by waiting until the ring has completed our requests
3247 * emitted over 20 msec ago. 2999 * emitted over 20 msec ago.
3248 * 3000 *
@@ -3280,28 +3032,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3280 if (seqno == 0) 3032 if (seqno == 0)
3281 return 0; 3033 return 0;
3282 3034
3283 ret = 0; 3035 ret = __wait_seqno(ring, seqno, true);
3284 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3285 /* And wait for the seqno passing without holding any locks and
3286 * causing extra latency for others. This is safe as the irq
3287 * generation is designed to be run atomically and so is
3288 * lockless.
3289 */
3290 if (ring->irq_get(ring)) {
3291 ret = wait_event_interruptible(ring->irq_queue,
3292 i915_seqno_passed(ring->get_seqno(ring), seqno)
3293 || atomic_read(&dev_priv->mm.wedged));
3294 ring->irq_put(ring);
3295
3296 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3297 ret = -EIO;
3298 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3299 seqno) ||
3300 atomic_read(&dev_priv->mm.wedged), 3000)) {
3301 ret = -EBUSY;
3302 }
3303 }
3304
3305 if (ret == 0) 3036 if (ret == 0)
3306 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3037 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3307 3038
@@ -3313,12 +3044,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3313 uint32_t alignment, 3044 uint32_t alignment,
3314 bool map_and_fenceable) 3045 bool map_and_fenceable)
3315{ 3046{
3316 struct drm_device *dev = obj->base.dev;
3317 struct drm_i915_private *dev_priv = dev->dev_private;
3318 int ret; 3047 int ret;
3319 3048
3320 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 3049 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3321 WARN_ON(i915_verify_lists(dev));
3322 3050
3323 if (obj->gtt_space != NULL) { 3051 if (obj->gtt_space != NULL) {
3324 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3052 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3343,34 +3071,23 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3343 return ret; 3071 return ret;
3344 } 3072 }
3345 3073
3346 if (obj->pin_count++ == 0) { 3074 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3347 if (!obj->active) 3075 i915_gem_gtt_bind_object(obj, obj->cache_level);
3348 list_move_tail(&obj->mm_list, 3076
3349 &dev_priv->mm.pinned_list); 3077 obj->pin_count++;
3350 }
3351 obj->pin_mappable |= map_and_fenceable; 3078 obj->pin_mappable |= map_and_fenceable;
3352 3079
3353 WARN_ON(i915_verify_lists(dev));
3354 return 0; 3080 return 0;
3355} 3081}
3356 3082
3357void 3083void
3358i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3084i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3359{ 3085{
3360 struct drm_device *dev = obj->base.dev;
3361 drm_i915_private_t *dev_priv = dev->dev_private;
3362
3363 WARN_ON(i915_verify_lists(dev));
3364 BUG_ON(obj->pin_count == 0); 3086 BUG_ON(obj->pin_count == 0);
3365 BUG_ON(obj->gtt_space == NULL); 3087 BUG_ON(obj->gtt_space == NULL);
3366 3088
3367 if (--obj->pin_count == 0) { 3089 if (--obj->pin_count == 0)
3368 if (!obj->active)
3369 list_move_tail(&obj->mm_list,
3370 &dev_priv->mm.inactive_list);
3371 obj->pin_mappable = false; 3090 obj->pin_mappable = false;
3372 }
3373 WARN_ON(i915_verify_lists(dev));
3374} 3091}
3375 3092
3376int 3093int
@@ -3494,20 +3211,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3494 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3211 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3495 ret = i915_gem_flush_ring(obj->ring, 3212 ret = i915_gem_flush_ring(obj->ring,
3496 0, obj->base.write_domain); 3213 0, obj->base.write_domain);
3497 } else if (obj->ring->outstanding_lazy_request == 3214 } else {
3498 obj->last_rendering_seqno) { 3215 ret = i915_gem_check_olr(obj->ring,
3499 struct drm_i915_gem_request *request; 3216 obj->last_rendering_seqno);
3500
3501 /* This ring is not being cleared by active usage,
3502 * so emit a request to do so.
3503 */
3504 request = kzalloc(sizeof(*request), GFP_KERNEL);
3505 if (request) {
3506 ret = i915_add_request(obj->ring, NULL, request);
3507 if (ret)
3508 kfree(request);
3509 } else
3510 ret = -ENOMEM;
3511 } 3217 }
3512 3218
3513 /* Update the active list for the hardware's current position. 3219 /* Update the active list for the hardware's current position.
@@ -3643,46 +3349,42 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3643 return 0; 3349 return 0;
3644} 3350}
3645 3351
3646static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) 3352void i915_gem_free_object(struct drm_gem_object *gem_obj)
3647{ 3353{
3354 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3648 struct drm_device *dev = obj->base.dev; 3355 struct drm_device *dev = obj->base.dev;
3649 drm_i915_private_t *dev_priv = dev->dev_private; 3356 drm_i915_private_t *dev_priv = dev->dev_private;
3650 int ret;
3651
3652 ret = i915_gem_object_unbind(obj);
3653 if (ret == -ERESTARTSYS) {
3654 list_move(&obj->mm_list,
3655 &dev_priv->mm.deferred_free_list);
3656 return;
3657 }
3658 3357
3659 trace_i915_gem_object_destroy(obj); 3358 trace_i915_gem_object_destroy(obj);
3660 3359
3360 if (gem_obj->import_attach)
3361 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3362
3363 if (obj->phys_obj)
3364 i915_gem_detach_phys_object(dev, obj);
3365
3366 obj->pin_count = 0;
3367 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3368 bool was_interruptible;
3369
3370 was_interruptible = dev_priv->mm.interruptible;
3371 dev_priv->mm.interruptible = false;
3372
3373 WARN_ON(i915_gem_object_unbind(obj));
3374
3375 dev_priv->mm.interruptible = was_interruptible;
3376 }
3377
3661 if (obj->base.map_list.map) 3378 if (obj->base.map_list.map)
3662 drm_gem_free_mmap_offset(&obj->base); 3379 drm_gem_free_mmap_offset(&obj->base);
3663 3380
3664 drm_gem_object_release(&obj->base); 3381 drm_gem_object_release(&obj->base);
3665 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3382 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3666 3383
3667 kfree(obj->page_cpu_valid);
3668 kfree(obj->bit_17); 3384 kfree(obj->bit_17);
3669 kfree(obj); 3385 kfree(obj);
3670} 3386}
3671 3387
3672void i915_gem_free_object(struct drm_gem_object *gem_obj)
3673{
3674 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3675 struct drm_device *dev = obj->base.dev;
3676
3677 while (obj->pin_count > 0)
3678 i915_gem_object_unpin(obj);
3679
3680 if (obj->phys_obj)
3681 i915_gem_detach_phys_object(dev, obj);
3682
3683 i915_gem_free_object_tail(obj);
3684}
3685
3686int 3388int
3687i915_gem_idle(struct drm_device *dev) 3389i915_gem_idle(struct drm_device *dev)
3688{ 3390{
@@ -3696,20 +3398,16 @@ i915_gem_idle(struct drm_device *dev)
3696 return 0; 3398 return 0;
3697 } 3399 }
3698 3400
3699 ret = i915_gpu_idle(dev, true); 3401 ret = i915_gpu_idle(dev);
3700 if (ret) { 3402 if (ret) {
3701 mutex_unlock(&dev->struct_mutex); 3403 mutex_unlock(&dev->struct_mutex);
3702 return ret; 3404 return ret;
3703 } 3405 }
3406 i915_gem_retire_requests(dev);
3704 3407
3705 /* Under UMS, be paranoid and evict. */ 3408 /* Under UMS, be paranoid and evict. */
3706 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 3409 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3707 ret = i915_gem_evict_inactive(dev, false); 3410 i915_gem_evict_everything(dev, false);
3708 if (ret) {
3709 mutex_unlock(&dev->struct_mutex);
3710 return ret;
3711 }
3712 }
3713 3411
3714 i915_gem_reset_fences(dev); 3412 i915_gem_reset_fences(dev);
3715 3413
@@ -3747,9 +3445,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3747 3445
3748 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 3446 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3749 if (IS_GEN6(dev)) 3447 if (IS_GEN6(dev))
3750 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB)); 3448 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3751 else 3449 else
3752 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3450 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3753} 3451}
3754 3452
3755void i915_gem_init_ppgtt(struct drm_device *dev) 3453void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3787,21 +3485,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3787 pd_offset <<= 16; 3485 pd_offset <<= 16;
3788 3486
3789 if (INTEL_INFO(dev)->gen == 6) { 3487 if (INTEL_INFO(dev)->gen == 6) {
3790 uint32_t ecochk = I915_READ(GAM_ECOCHK); 3488 uint32_t ecochk, gab_ctl, ecobits;
3489
3490 ecobits = I915_READ(GAC_ECO_BITS);
3491 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
3492
3493 gab_ctl = I915_READ(GAB_CTL);
3494 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3495
3496 ecochk = I915_READ(GAM_ECOCHK);
3791 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 3497 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3792 ECOCHK_PPGTT_CACHE64B); 3498 ECOCHK_PPGTT_CACHE64B);
3793 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); 3499 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3794 } else if (INTEL_INFO(dev)->gen >= 7) { 3500 } else if (INTEL_INFO(dev)->gen >= 7) {
3795 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); 3501 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3796 /* GFX_MODE is per-ring on gen7+ */ 3502 /* GFX_MODE is per-ring on gen7+ */
3797 } 3503 }
3798 3504
3799 for (i = 0; i < I915_NUM_RINGS; i++) { 3505 for_each_ring(ring, dev_priv, i) {
3800 ring = &dev_priv->ring[i];
3801
3802 if (INTEL_INFO(dev)->gen >= 7) 3506 if (INTEL_INFO(dev)->gen >= 7)
3803 I915_WRITE(RING_MODE_GEN7(ring), 3507 I915_WRITE(RING_MODE_GEN7(ring),
3804 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); 3508 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3805 3509
3806 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 3510 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3807 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); 3511 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3845,14 +3549,80 @@ cleanup_render_ring:
3845 return ret; 3549 return ret;
3846} 3550}
3847 3551
3552static bool
3553intel_enable_ppgtt(struct drm_device *dev)
3554{
3555 if (i915_enable_ppgtt >= 0)
3556 return i915_enable_ppgtt;
3557
3558#ifdef CONFIG_INTEL_IOMMU
3559 /* Disable ppgtt on SNB if VT-d is on. */
3560 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3561 return false;
3562#endif
3563
3564 return true;
3565}
3566
3567int i915_gem_init(struct drm_device *dev)
3568{
3569 struct drm_i915_private *dev_priv = dev->dev_private;
3570 unsigned long gtt_size, mappable_size;
3571 int ret;
3572
3573 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3574 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3575
3576 mutex_lock(&dev->struct_mutex);
3577 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3578 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3579 * aperture accordingly when using aliasing ppgtt. */
3580 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3581
3582 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3583
3584 ret = i915_gem_init_aliasing_ppgtt(dev);
3585 if (ret) {
3586 mutex_unlock(&dev->struct_mutex);
3587 return ret;
3588 }
3589 } else {
3590 /* Let GEM Manage all of the aperture.
3591 *
3592 * However, leave one page at the end still bound to the scratch
3593 * page. There are a number of places where the hardware
3594 * apparently prefetches past the end of the object, and we've
3595 * seen multiple hangs with the GPU head pointer stuck in a
3596 * batchbuffer bound at the last page of the aperture. One page
3597 * should be enough to keep any prefetching inside of the
3598 * aperture.
3599 */
3600 i915_gem_init_global_gtt(dev, 0, mappable_size,
3601 gtt_size);
3602 }
3603
3604 ret = i915_gem_init_hw(dev);
3605 mutex_unlock(&dev->struct_mutex);
3606 if (ret) {
3607 i915_gem_cleanup_aliasing_ppgtt(dev);
3608 return ret;
3609 }
3610
3611 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3612 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3613 dev_priv->dri1.allow_batchbuffer = 1;
3614 return 0;
3615}
3616
3848void 3617void
3849i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3618i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3850{ 3619{
3851 drm_i915_private_t *dev_priv = dev->dev_private; 3620 drm_i915_private_t *dev_priv = dev->dev_private;
3621 struct intel_ring_buffer *ring;
3852 int i; 3622 int i;
3853 3623
3854 for (i = 0; i < I915_NUM_RINGS; i++) 3624 for_each_ring(ring, dev_priv, i)
3855 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 3625 intel_cleanup_ring_buffer(ring);
3856} 3626}
3857 3627
3858int 3628int
@@ -3860,7 +3630,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3860 struct drm_file *file_priv) 3630 struct drm_file *file_priv)
3861{ 3631{
3862 drm_i915_private_t *dev_priv = dev->dev_private; 3632 drm_i915_private_t *dev_priv = dev->dev_private;
3863 int ret, i; 3633 int ret;
3864 3634
3865 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3635 if (drm_core_check_feature(dev, DRIVER_MODESET))
3866 return 0; 3636 return 0;
@@ -3882,10 +3652,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3882 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3652 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3883 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3653 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3884 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3654 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3885 for (i = 0; i < I915_NUM_RINGS; i++) {
3886 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3887 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3888 }
3889 mutex_unlock(&dev->struct_mutex); 3655 mutex_unlock(&dev->struct_mutex);
3890 3656
3891 ret = drm_irq_install(dev); 3657 ret = drm_irq_install(dev);
@@ -3944,9 +3710,7 @@ i915_gem_load(struct drm_device *dev)
3944 INIT_LIST_HEAD(&dev_priv->mm.active_list); 3710 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3945 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 3711 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3946 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 3712 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3947 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3948 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3713 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3949 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3950 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3714 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3951 for (i = 0; i < I915_NUM_RINGS; i++) 3715 for (i = 0; i < I915_NUM_RINGS; i++)
3952 init_ring_lists(&dev_priv->ring[i]); 3716 init_ring_lists(&dev_priv->ring[i]);
@@ -3958,12 +3722,8 @@ i915_gem_load(struct drm_device *dev)
3958 3722
3959 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3723 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3960 if (IS_GEN3(dev)) { 3724 if (IS_GEN3(dev)) {
3961 u32 tmp = I915_READ(MI_ARB_STATE); 3725 I915_WRITE(MI_ARB_STATE,
3962 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { 3726 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3963 /* arb state is a masked write, so set bit + bit in mask */
3964 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3965 I915_WRITE(MI_ARB_STATE, tmp);
3966 }
3967 } 3727 }
3968 3728
3969 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 3729 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -3978,9 +3738,7 @@ i915_gem_load(struct drm_device *dev)
3978 dev_priv->num_fence_regs = 8; 3738 dev_priv->num_fence_regs = 8;
3979 3739
3980 /* Initialize fence registers to zero */ 3740 /* Initialize fence registers to zero */
3981 for (i = 0; i < dev_priv->num_fence_regs; i++) { 3741 i915_gem_reset_fences(dev);
3982 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3983 }
3984 3742
3985 i915_gem_detect_bit_6_swizzle(dev); 3743 i915_gem_detect_bit_6_swizzle(dev);
3986 init_waitqueue_head(&dev_priv->pending_flip_queue); 3744 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4268,7 +4026,7 @@ rescan:
4268 * This has a dramatic impact to reduce the number of 4026 * This has a dramatic impact to reduce the number of
4269 * OOM-killer events whilst running the GPU aggressively. 4027 * OOM-killer events whilst running the GPU aggressively.
4270 */ 4028 */
4271 if (i915_gpu_idle(dev, true) == 0) 4029 if (i915_gpu_idle(dev) == 0)
4272 goto rescan; 4030 goto rescan;
4273 } 4031 }
4274 mutex_unlock(&dev->struct_mutex); 4032 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d6..a4f6aaabca99 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
114 } 114 }
115 } 115 }
116 116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
133 return warned = err; 117 return warned = err;
134} 118}
135#endif /* WATCH_INACTIVE */ 119#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..8e269178d6a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include "drmP.h"
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev;
35 int npages = obj->base.size / PAGE_SIZE;
36 struct sg_table *sg = NULL;
37 int ret;
38 int nents;
39
40 ret = i915_mutex_lock_interruptible(dev);
41 if (ret)
42 return ERR_PTR(ret);
43
44 if (!obj->pages) {
45 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
46 if (ret)
47 goto out;
48 }
49
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53out:
54 mutex_unlock(&dev->struct_mutex);
55 return sg;
56}
57
58static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir)
60{
61 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62 sg_free_table(sg);
63 kfree(sg);
64}
65
66static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
67{
68 struct drm_i915_gem_object *obj = dma_buf->priv;
69
70 if (obj->base.export_dma_buf == dma_buf) {
71 /* drop the reference on the export fd holds */
72 obj->base.export_dma_buf = NULL;
73 drm_gem_object_unreference_unlocked(&obj->base);
74 }
75}
76
77static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
78{
79 return NULL;
80}
81
82static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
83{
84
85}
86static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
87{
88 return NULL;
89}
90
91static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
92{
93
94}
95
96static const struct dma_buf_ops i915_dmabuf_ops = {
97 .map_dma_buf = i915_gem_map_dma_buf,
98 .unmap_dma_buf = i915_gem_unmap_dma_buf,
99 .release = i915_gem_dmabuf_release,
100 .kmap = i915_gem_dmabuf_kmap,
101 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
102 .kunmap = i915_gem_dmabuf_kunmap,
103 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
104};
105
106struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
107 struct drm_gem_object *gem_obj, int flags)
108{
109 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
110
111 return dma_buf_export(obj, &i915_dmabuf_ops,
112 obj->base.size, 0600);
113}
114
115struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
116 struct dma_buf *dma_buf)
117{
118 struct dma_buf_attachment *attach;
119 struct sg_table *sg;
120 struct drm_i915_gem_object *obj;
121 int npages;
122 int size;
123 int ret;
124
125 /* is this one of own objects? */
126 if (dma_buf->ops == &i915_dmabuf_ops) {
127 obj = dma_buf->priv;
128 /* is it from our device? */
129 if (obj->base.dev == dev) {
130 drm_gem_object_reference(&obj->base);
131 return &obj->base;
132 }
133 }
134
135 /* need to attach */
136 attach = dma_buf_attach(dma_buf, dev->dev);
137 if (IS_ERR(attach))
138 return ERR_CAST(attach);
139
140 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
141 if (IS_ERR(sg)) {
142 ret = PTR_ERR(sg);
143 goto fail_detach;
144 }
145
146 size = dma_buf->size;
147 npages = size / PAGE_SIZE;
148
149 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
150 if (obj == NULL) {
151 ret = -ENOMEM;
152 goto fail_unmap;
153 }
154
155 ret = drm_gem_private_object_init(dev, &obj->base, size);
156 if (ret) {
157 kfree(obj);
158 goto fail_unmap;
159 }
160
161 obj->sg_table = sg;
162 obj->base.import_attach = attach;
163
164 return &obj->base;
165
166fail_unmap:
167 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
168fail_detach:
169 dma_buf_detach(dma_buf, attach);
170 return ERR_PTR(ret);
171}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b2..ae7c24e12e52 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
35static bool 35static bool
36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
37{ 37{
38 if (obj->pin_count)
39 return false;
40
38 list_add(&obj->exec_list, unwind); 41 list_add(&obj->exec_list, unwind);
39 return drm_mm_scan_add_block(obj->gtt_space); 42 return drm_mm_scan_add_block(obj->gtt_space);
40} 43}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
90 /* Now merge in the soon-to-be-expired objects... */ 93 /* Now merge in the soon-to-be-expired objects... */
91 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 94 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
92 /* Does the object require an outstanding flush? */ 95 /* Does the object require an outstanding flush? */
93 if (obj->base.write_domain || obj->pin_count) 96 if (obj->base.write_domain)
94 continue; 97 continue;
95 98
96 if (mark_free(obj, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
99 102
100 /* Finally add anything with a pending flush (in order of retirement) */ 103 /* Finally add anything with a pending flush (in order of retirement) */
101 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { 104 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
102 if (obj->pin_count)
103 continue;
104
105 if (mark_free(obj, &unwind_list)) 105 if (mark_free(obj, &unwind_list))
106 goto found; 106 goto found;
107 } 107 }
108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (!obj->base.write_domain || obj->pin_count) 109 if (!obj->base.write_domain)
110 continue; 110 continue;
111 111
112 if (mark_free(obj, &unwind_list)) 112 if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) 166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
167{ 167{
168 drm_i915_private_t *dev_priv = dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 int ret; 169 struct drm_i915_gem_object *obj, *next;
170 bool lists_empty; 170 bool lists_empty;
171 int ret;
171 172
172 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
173 list_empty(&dev_priv->mm.flushing_list) && 174 list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
177 178
178 trace_i915_gem_evict_everything(dev, purgeable_only); 179 trace_i915_gem_evict_everything(dev, purgeable_only);
179 180
180 /* Flush everything (on to the inactive lists) and evict */ 181 /* The gpu_idle will flush everything in the write domain to the
181 ret = i915_gpu_idle(dev, true); 182 * active list. Then we must move everything off the active list
183 * with retire requests.
184 */
185 ret = i915_gpu_idle(dev);
182 if (ret) 186 if (ret)
183 return ret; 187 return ret;
184 188
185 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 189 i915_gem_retire_requests(dev);
186 190
187 return i915_gem_evict_inactive(dev, purgeable_only); 191 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
188}
189
190/** Unbinds all inactive objects. */
191int
192i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
193{
194 drm_i915_private_t *dev_priv = dev->dev_private;
195 struct drm_i915_gem_object *obj, *next;
196 192
193 /* Having flushed everything, unbind() should never raise an error */
197 list_for_each_entry_safe(obj, next, 194 list_for_each_entry_safe(obj, next,
198 &dev_priv->mm.inactive_list, mm_list) { 195 &dev_priv->mm.inactive_list, mm_list) {
199 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { 196 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
200 int ret = i915_gem_object_unbind(obj); 197 if (obj->pin_count == 0)
201 if (ret) 198 WARN_ON(i915_gem_object_unbind(obj));
202 return ret;
203 } 199 }
204 } 200 }
205 201
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index de431942ded4..974a9f1068a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
266 kfree(eb); 266 kfree(eb);
267} 267}
268 268
269static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
270{
271 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
272 obj->cache_level != I915_CACHE_NONE);
273}
274
269static int 275static int
270i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 276i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
271 struct eb_objects *eb, 277 struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
273{ 279{
274 struct drm_device *dev = obj->base.dev; 280 struct drm_device *dev = obj->base.dev;
275 struct drm_gem_object *target_obj; 281 struct drm_gem_object *target_obj;
282 struct drm_i915_gem_object *target_i915_obj;
276 uint32_t target_offset; 283 uint32_t target_offset;
277 int ret = -EINVAL; 284 int ret = -EINVAL;
278 285
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
281 if (unlikely(target_obj == NULL)) 288 if (unlikely(target_obj == NULL))
282 return -ENOENT; 289 return -ENOENT;
283 290
284 target_offset = to_intel_bo(target_obj)->gtt_offset; 291 target_i915_obj = to_intel_bo(target_obj);
292 target_offset = target_i915_obj->gtt_offset;
285 293
286 /* The target buffer should have appeared before us in the 294 /* The target buffer should have appeared before us in the
287 * exec_object list, so it should have a GTT space bound by now. 295 * exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 return ret; 360 return ret;
353 } 361 }
354 362
363 /* We can't wait for rendering with pagefaults disabled */
364 if (obj->active && in_atomic())
365 return -EFAULT;
366
355 reloc->delta += target_offset; 367 reloc->delta += target_offset;
356 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 368 if (use_cpu_reloc(obj)) {
357 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 369 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
358 char *vaddr; 370 char *vaddr;
359 371
372 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
373 if (ret)
374 return ret;
375
360 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); 376 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
361 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 377 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
362 kunmap_atomic(vaddr); 378 kunmap_atomic(vaddr);
@@ -365,11 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
365 uint32_t __iomem *reloc_entry; 381 uint32_t __iomem *reloc_entry;
366 void __iomem *reloc_page; 382 void __iomem *reloc_page;
367 383
368 /* We can't wait for rendering with pagefaults disabled */ 384 ret = i915_gem_object_set_to_gtt_domain(obj, true);
369 if (obj->active && in_atomic()) 385 if (ret)
370 return -EFAULT; 386 return ret;
371 387
372 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 388 ret = i915_gem_object_put_fence(obj);
373 if (ret) 389 if (ret)
374 return ret; 390 return ret;
375 391
@@ -383,6 +399,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
383 io_mapping_unmap_atomic(reloc_page); 399 io_mapping_unmap_atomic(reloc_page);
384 } 400 }
385 401
402 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
403 * pipe_control writes because the gpu doesn't properly redirect them
404 * through the ppgtt for non_secure batchbuffers. */
405 if (unlikely(IS_GEN6(dev) &&
406 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
407 !target_i915_obj->has_global_gtt_mapping)) {
408 i915_gem_gtt_bind_object(target_i915_obj,
409 target_i915_obj->cache_level);
410 }
411
386 /* and update the user's relocation entry */ 412 /* and update the user's relocation entry */
387 reloc->presumed_offset = target_offset; 413 reloc->presumed_offset = target_offset;
388 414
@@ -393,30 +419,46 @@ static int
393i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 419i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
394 struct eb_objects *eb) 420 struct eb_objects *eb)
395{ 421{
422#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
423 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
396 struct drm_i915_gem_relocation_entry __user *user_relocs; 424 struct drm_i915_gem_relocation_entry __user *user_relocs;
397 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 425 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
398 int i, ret; 426 int remain, ret;
399 427
400 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 428 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
401 for (i = 0; i < entry->relocation_count; i++) {
402 struct drm_i915_gem_relocation_entry reloc;
403 429
404 if (__copy_from_user_inatomic(&reloc, 430 remain = entry->relocation_count;
405 user_relocs+i, 431 while (remain) {
406 sizeof(reloc))) 432 struct drm_i915_gem_relocation_entry *r = stack_reloc;
433 int count = remain;
434 if (count > ARRAY_SIZE(stack_reloc))
435 count = ARRAY_SIZE(stack_reloc);
436 remain -= count;
437
438 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
407 return -EFAULT; 439 return -EFAULT;
408 440
409 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); 441 do {
410 if (ret) 442 u64 offset = r->presumed_offset;
411 return ret;
412 443
413 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, 444 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
414 &reloc.presumed_offset, 445 if (ret)
415 sizeof(reloc.presumed_offset))) 446 return ret;
416 return -EFAULT; 447
448 if (r->presumed_offset != offset &&
449 __copy_to_user_inatomic(&user_relocs->presumed_offset,
450 &r->presumed_offset,
451 sizeof(r->presumed_offset))) {
452 return -EFAULT;
453 }
454
455 user_relocs++;
456 r++;
457 } while (--count);
417 } 458 }
418 459
419 return 0; 460 return 0;
461#undef N_RELOC
420} 462}
421 463
422static int 464static int
@@ -465,6 +507,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
465#define __EXEC_OBJECT_HAS_FENCE (1<<31) 507#define __EXEC_OBJECT_HAS_FENCE (1<<31)
466 508
467static int 509static int
510need_reloc_mappable(struct drm_i915_gem_object *obj)
511{
512 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
513 return entry->relocation_count && !use_cpu_reloc(obj);
514}
515
516static int
468pin_and_fence_object(struct drm_i915_gem_object *obj, 517pin_and_fence_object(struct drm_i915_gem_object *obj,
469 struct intel_ring_buffer *ring) 518 struct intel_ring_buffer *ring)
470{ 519{
@@ -477,8 +526,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
477 has_fenced_gpu_access && 526 has_fenced_gpu_access &&
478 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 527 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
479 obj->tiling_mode != I915_TILING_NONE; 528 obj->tiling_mode != I915_TILING_NONE;
480 need_mappable = 529 need_mappable = need_fence || need_reloc_mappable(obj);
481 entry->relocation_count ? true : need_fence;
482 530
483 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); 531 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
484 if (ret) 532 if (ret)
@@ -486,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
486 534
487 if (has_fenced_gpu_access) { 535 if (has_fenced_gpu_access) {
488 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 536 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
489 if (obj->tiling_mode) { 537 ret = i915_gem_object_get_fence(obj);
490 ret = i915_gem_object_get_fence(obj, ring); 538 if (ret)
491 if (ret) 539 goto err_unpin;
492 goto err_unpin;
493 540
541 if (i915_gem_object_pin_fence(obj))
494 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 542 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
495 i915_gem_object_pin_fence(obj); 543
496 } else {
497 ret = i915_gem_object_put_fence(obj);
498 if (ret)
499 goto err_unpin;
500 }
501 obj->pending_fenced_gpu_access = true; 544 obj->pending_fenced_gpu_access = true;
502 } 545 }
503 } 546 }
@@ -535,8 +578,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
535 has_fenced_gpu_access && 578 has_fenced_gpu_access &&
536 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 579 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
537 obj->tiling_mode != I915_TILING_NONE; 580 obj->tiling_mode != I915_TILING_NONE;
538 need_mappable = 581 need_mappable = need_fence || need_reloc_mappable(obj);
539 entry->relocation_count ? true : need_fence;
540 582
541 if (need_mappable) 583 if (need_mappable)
542 list_move(&obj->exec_list, &ordered_objects); 584 list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +618,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576 has_fenced_gpu_access && 618 has_fenced_gpu_access &&
577 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 619 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
578 obj->tiling_mode != I915_TILING_NONE; 620 obj->tiling_mode != I915_TILING_NONE;
579 need_mappable = 621 need_mappable = need_fence || need_reloc_mappable(obj);
580 entry->relocation_count ? true : need_fence;
581 622
582 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 623 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
583 (need_mappable && !obj->map_and_fenceable)) 624 (need_mappable && !obj->map_and_fenceable))
@@ -798,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
798 return 0; 839 return 0;
799} 840}
800 841
801static bool
802intel_enable_semaphores(struct drm_device *dev)
803{
804 if (INTEL_INFO(dev)->gen < 6)
805 return 0;
806
807 if (i915_semaphores >= 0)
808 return i915_semaphores;
809
810 /* Disable semaphores on SNB */
811 if (INTEL_INFO(dev)->gen == 6)
812 return 0;
813
814 return 1;
815}
816
817static int
818i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
819 struct intel_ring_buffer *to)
820{
821 struct intel_ring_buffer *from = obj->ring;
822 u32 seqno;
823 int ret, idx;
824
825 if (from == NULL || to == from)
826 return 0;
827
828 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
829 if (!intel_enable_semaphores(obj->base.dev))
830 return i915_gem_object_wait_rendering(obj);
831
832 idx = intel_ring_sync_index(from, to);
833
834 seqno = obj->last_rendering_seqno;
835 if (seqno <= from->sync_seqno[idx])
836 return 0;
837
838 if (seqno == from->outstanding_lazy_request) {
839 struct drm_i915_gem_request *request;
840
841 request = kzalloc(sizeof(*request), GFP_KERNEL);
842 if (request == NULL)
843 return -ENOMEM;
844
845 ret = i915_add_request(from, NULL, request);
846 if (ret) {
847 kfree(request);
848 return ret;
849 }
850
851 seqno = request->seqno;
852 }
853
854 from->sync_seqno[idx] = seqno;
855
856 return to->sync_to(to, from, seqno - 1);
857}
858
859static int 842static int
860i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 843i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
861{ 844{
@@ -917,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
917 } 900 }
918 901
919 list_for_each_entry(obj, objects, exec_list) { 902 list_for_each_entry(obj, objects, exec_list) {
920 ret = i915_gem_execbuffer_sync_rings(obj, ring); 903 ret = i915_gem_object_sync(obj, ring);
921 if (ret) 904 if (ret)
922 return ret; 905 return ret;
923 } 906 }
@@ -955,7 +938,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
955 if (!access_ok(VERIFY_WRITE, ptr, length)) 938 if (!access_ok(VERIFY_WRITE, ptr, length))
956 return -EFAULT; 939 return -EFAULT;
957 940
958 if (fault_in_pages_readable(ptr, length)) 941 if (fault_in_multipages_readable(ptr, length))
959 return -EFAULT; 942 return -EFAULT;
960 } 943 }
961 944
@@ -984,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
984 obj->pending_gpu_write = true; 967 obj->pending_gpu_write = true;
985 list_move_tail(&obj->gpu_write_list, 968 list_move_tail(&obj->gpu_write_list,
986 &ring->gpu_write_list); 969 &ring->gpu_write_list);
987 intel_mark_busy(ring->dev, obj); 970 if (obj->pin_count) /* check for potential scanout */
971 intel_mark_busy(ring->dev, obj);
988 } 972 }
989 973
990 trace_i915_gem_object_change_domain(obj, old_read, old_write); 974 trace_i915_gem_object_change_domain(obj, old_read, old_write);
991 } 975 }
976
977 intel_mark_busy(ring->dev, NULL);
992} 978}
993 979
994static void 980static void
@@ -1078,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1078 ring = &dev_priv->ring[RCS]; 1064 ring = &dev_priv->ring[RCS];
1079 break; 1065 break;
1080 case I915_EXEC_BSD: 1066 case I915_EXEC_BSD:
1081 if (!HAS_BSD(dev)) {
1082 DRM_DEBUG("execbuf with invalid ring (BSD)\n");
1083 return -EINVAL;
1084 }
1085 ring = &dev_priv->ring[VCS]; 1067 ring = &dev_priv->ring[VCS];
1086 break; 1068 break;
1087 case I915_EXEC_BLT: 1069 case I915_EXEC_BLT:
1088 if (!HAS_BLT(dev)) {
1089 DRM_DEBUG("execbuf with invalid ring (BLT)\n");
1090 return -EINVAL;
1091 }
1092 ring = &dev_priv->ring[BCS]; 1070 ring = &dev_priv->ring[BCS];
1093 break; 1071 break;
1094 default: 1072 default:
@@ -1096,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1096 (int)(args->flags & I915_EXEC_RING_MASK)); 1074 (int)(args->flags & I915_EXEC_RING_MASK));
1097 return -EINVAL; 1075 return -EINVAL;
1098 } 1076 }
1077 if (!intel_ring_initialized(ring)) {
1078 DRM_DEBUG("execbuf with invalid ring: %d\n",
1079 (int)(args->flags & I915_EXEC_RING_MASK));
1080 return -EINVAL;
1081 }
1099 1082
1100 mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1083 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1101 mask = I915_EXEC_CONSTANTS_MASK; 1084 mask = I915_EXEC_CONSTANTS_MASK;
@@ -1133,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1133 return -EINVAL; 1116 return -EINVAL;
1134 } 1117 }
1135 1118
1119 if (INTEL_INFO(dev)->gen >= 5) {
1120 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1121 return -EINVAL;
1122 }
1123
1136 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { 1124 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1137 DRM_DEBUG("execbuf with %u cliprects\n", 1125 DRM_DEBUG("execbuf with %u cliprects\n",
1138 args->num_cliprects); 1126 args->num_cliprects);
1139 return -EINVAL; 1127 return -EINVAL;
1140 } 1128 }
1129
1141 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1130 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1142 GFP_KERNEL); 1131 GFP_KERNEL);
1143 if (cliprects == NULL) { 1132 if (cliprects == NULL) {
@@ -1242,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1242 * so every billion or so execbuffers, we need to stall 1231 * so every billion or so execbuffers, we need to stall
1243 * the GPU in order to reset the counters. 1232 * the GPU in order to reset the counters.
1244 */ 1233 */
1245 ret = i915_gpu_idle(dev, true); 1234 ret = i915_gpu_idle(dev);
1246 if (ret) 1235 if (ret)
1247 goto err; 1236 goto err;
1237 i915_gem_retire_requests(dev);
1248 1238
1249 BUG_ON(ring->sync_seqno[i]); 1239 BUG_ON(ring->sync_seqno[i]);
1250 } 1240 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
96 GFP_KERNEL); 96 GFP_KERNEL);
97 if (!ppgtt->pt_dma_addr) 97 if (!ppgtt->pt_dma_addr)
98 goto err_pt_alloc; 98 goto err_pt_alloc;
99 }
100 99
101 for (i = 0; i < ppgtt->num_pd_entries; i++) { 100 for (i = 0; i < ppgtt->num_pd_entries; i++) {
102 dma_addr_t pt_addr; 101 dma_addr_t pt_addr;
103 if (dev_priv->mm.gtt->needs_dmar) { 102
104 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 103 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
105 0, 4096, 104 0, 4096,
106 PCI_DMA_BIDIRECTIONAL); 105 PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
112 111
113 } 112 }
114 ppgtt->pt_dma_addr[i] = pt_addr; 113 ppgtt->pt_dma_addr[i] = pt_addr;
115 } else 114 }
116 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
117 } 115 }
118 116
119 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; 117 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
269 BUG(); 267 BUG();
270 } 268 }
271 269
272 if (dev_priv->mm.gtt->needs_dmar) { 270 if (obj->sg_table) {
271 i915_ppgtt_insert_sg_entries(ppgtt,
272 obj->sg_table->sgl,
273 obj->sg_table->nents,
274 obj->gtt_space->start >> PAGE_SHIFT,
275 pte_flags);
276 } else if (dev_priv->mm.gtt->needs_dmar) {
273 BUG_ON(!obj->sg_list); 277 BUG_ON(!obj->sg_list);
274 278
275 i915_ppgtt_insert_sg_entries(ppgtt, 279 i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
319 323
320 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { 324 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
321 dev_priv->mm.interruptible = false; 325 dev_priv->mm.interruptible = false;
322 if (i915_gpu_idle(dev_priv->dev, false)) { 326 if (i915_gpu_idle(dev_priv->dev)) {
323 DRM_ERROR("Couldn't idle GPU\n"); 327 DRM_ERROR("Couldn't idle GPU\n");
324 /* Wait a bit, in hopes it avoids the hang */ 328 /* Wait a bit, in hopes it avoids the hang */
325 udelay(10); 329 udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
346 350
347 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 351 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
348 i915_gem_clflush_object(obj); 352 i915_gem_clflush_object(obj);
349 i915_gem_gtt_rebind_object(obj, obj->cache_level); 353 i915_gem_gtt_bind_object(obj, obj->cache_level);
350 } 354 }
351 355
352 intel_gtt_chipset_flush(); 356 intel_gtt_chipset_flush();
353} 357}
354 358
355int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) 359int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
356{ 360{
357 struct drm_device *dev = obj->base.dev; 361 struct drm_device *dev = obj->base.dev;
358 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = dev->dev_private;
359 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
360 int ret;
361
362 if (dev_priv->mm.gtt->needs_dmar) {
363 ret = intel_gtt_map_memory(obj->pages,
364 obj->base.size >> PAGE_SHIFT,
365 &obj->sg_list,
366 &obj->num_sg);
367 if (ret != 0)
368 return ret;
369
370 intel_gtt_insert_sg_entries(obj->sg_list,
371 obj->num_sg,
372 obj->gtt_space->start >> PAGE_SHIFT,
373 agp_type);
374 } else
375 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
376 obj->base.size >> PAGE_SHIFT,
377 obj->pages,
378 agp_type);
379 363
380 return 0; 364 if (dev_priv->mm.gtt->needs_dmar)
365 return intel_gtt_map_memory(obj->pages,
366 obj->base.size >> PAGE_SHIFT,
367 &obj->sg_list,
368 &obj->num_sg);
369 else
370 return 0;
381} 371}
382 372
383void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 373void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
384 enum i915_cache_level cache_level) 374 enum i915_cache_level cache_level)
385{ 375{
386 struct drm_device *dev = obj->base.dev; 376 struct drm_device *dev = obj->base.dev;
387 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = dev->dev_private;
388 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 378 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
389 379
390 if (dev_priv->mm.gtt->needs_dmar) { 380 if (obj->sg_table) {
381 intel_gtt_insert_sg_entries(obj->sg_table->sgl,
382 obj->sg_table->nents,
383 obj->gtt_space->start >> PAGE_SHIFT,
384 agp_type);
385 } else if (dev_priv->mm.gtt->needs_dmar) {
391 BUG_ON(!obj->sg_list); 386 BUG_ON(!obj->sg_list);
392 387
393 intel_gtt_insert_sg_entries(obj->sg_list, 388 intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
399 obj->base.size >> PAGE_SHIFT, 394 obj->base.size >> PAGE_SHIFT,
400 obj->pages, 395 obj->pages,
401 agp_type); 396 agp_type);
397
398 obj->has_global_gtt_mapping = 1;
402} 399}
403 400
404void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 401void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
405{ 402{
403 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
404 obj->base.size >> PAGE_SHIFT);
405
406 obj->has_global_gtt_mapping = 0;
407}
408
409void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
410{
406 struct drm_device *dev = obj->base.dev; 411 struct drm_device *dev = obj->base.dev;
407 struct drm_i915_private *dev_priv = dev->dev_private; 412 struct drm_i915_private *dev_priv = dev->dev_private;
408 bool interruptible; 413 bool interruptible;
409 414
410 interruptible = do_idling(dev_priv); 415 interruptible = do_idling(dev_priv);
411 416
412 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
413 obj->base.size >> PAGE_SHIFT);
414
415 if (obj->sg_list) { 417 if (obj->sg_list) {
416 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 418 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
417 obj->sg_list = NULL; 419 obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
419 421
420 undo_idling(dev_priv, interruptible); 422 undo_idling(dev_priv, interruptible);
421} 423}
424
425void i915_gem_init_global_gtt(struct drm_device *dev,
426 unsigned long start,
427 unsigned long mappable_end,
428 unsigned long end)
429{
430 drm_i915_private_t *dev_priv = dev->dev_private;
431
432 /* Substract the guard page ... */
433 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
434
435 dev_priv->mm.gtt_start = start;
436 dev_priv->mm.gtt_mappable_end = mappable_end;
437 dev_priv->mm.gtt_end = end;
438 dev_priv->mm.gtt_total = end - start;
439 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
440
441 /* ... but ensure that we clear the entire range. */
442 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
443}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 000000000000..ada2e90a2a60
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/*
35 * The BIOS typically reserves some of the system's memory for the exclusive
36 * use of the integrated graphics. This memory is no longer available for
37 * use by the OS and so the user finds that his system has less memory
38 * available than he put in. We refer to this memory as stolen.
39 *
40 * The BIOS will allocate its framebuffer from the stolen memory. Our
41 * goal is try to reuse that object for our own fbcon which must always
42 * be available for panics. Anything else we can reuse the stolen memory
43 * for is a boon.
44 */
45
46#define PTE_ADDRESS_MASK 0xfffff000
47#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
48#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
49#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
50#define PTE_MAPPING_TYPE_CACHED (3 << 1)
51#define PTE_MAPPING_TYPE_MASK (3 << 1)
52#define PTE_VALID (1 << 0)
53
54/**
55 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
56 * a physical one
57 * @dev: drm device
58 * @offset: address to translate
59 *
60 * Some chip functions require allocations from stolen space and need the
61 * physical address of the memory in question.
62 */
63static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
64{
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct pci_dev *pdev = dev_priv->bridge_dev;
67 u32 base;
68
69#if 0
70 /* On the machines I have tested the Graphics Base of Stolen Memory
71 * is unreliable, so compute the base by subtracting the stolen memory
72 * from the Top of Low Usable DRAM which is where the BIOS places
73 * the graphics stolen memory.
74 */
75 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76 /* top 32bits are reserved = 0 */
77 pci_read_config_dword(pdev, 0xA4, &base);
78 } else {
79 /* XXX presume 8xx is the same as i915 */
80 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
81 }
82#else
83 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
84 u16 val;
85 pci_read_config_word(pdev, 0xb0, &val);
86 base = val >> 4 << 20;
87 } else {
88 u8 val;
89 pci_read_config_byte(pdev, 0x9c, &val);
90 base = val >> 3 << 27;
91 }
92 base -= dev_priv->mm.gtt->stolen_size;
93#endif
94
95 return base + offset;
96}
97
98static void i915_warn_stolen(struct drm_device *dev)
99{
100 DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
101 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
102}
103
104static void i915_setup_compression(struct drm_device *dev, int size)
105{
106 struct drm_i915_private *dev_priv = dev->dev_private;
107 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
108 unsigned long cfb_base;
109 unsigned long ll_base = 0;
110
111 /* Just in case the BIOS is doing something questionable. */
112 intel_disable_fbc(dev);
113
114 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
115 if (compressed_fb)
116 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
117 if (!compressed_fb)
118 goto err;
119
120 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
121 if (!cfb_base)
122 goto err_fb;
123
124 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
125 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
126 4096, 4096, 0);
127 if (compressed_llb)
128 compressed_llb = drm_mm_get_block(compressed_llb,
129 4096, 4096);
130 if (!compressed_llb)
131 goto err_fb;
132
133 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
134 if (!ll_base)
135 goto err_llb;
136 }
137
138 dev_priv->cfb_size = size;
139
140 dev_priv->compressed_fb = compressed_fb;
141 if (HAS_PCH_SPLIT(dev))
142 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
143 else if (IS_GM45(dev)) {
144 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
145 } else {
146 I915_WRITE(FBC_CFB_BASE, cfb_base);
147 I915_WRITE(FBC_LL_BASE, ll_base);
148 dev_priv->compressed_llb = compressed_llb;
149 }
150
151 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
152 cfb_base, ll_base, size >> 20);
153 return;
154
155err_llb:
156 drm_mm_put_block(compressed_llb);
157err_fb:
158 drm_mm_put_block(compressed_fb);
159err:
160 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
161 i915_warn_stolen(dev);
162}
163
164static void i915_cleanup_compression(struct drm_device *dev)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167
168 drm_mm_put_block(dev_priv->compressed_fb);
169 if (dev_priv->compressed_llb)
170 drm_mm_put_block(dev_priv->compressed_llb);
171}
172
173void i915_gem_cleanup_stolen(struct drm_device *dev)
174{
175 if (I915_HAS_FBC(dev) && i915_powersave)
176 i915_cleanup_compression(dev);
177}
178
179int i915_gem_init_stolen(struct drm_device *dev)
180{
181 struct drm_i915_private *dev_priv = dev->dev_private;
182 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
183
184 /* Basic memrange allocator for stolen space */
185 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
186
187 /* Try to set up FBC with a reasonable compressed buffer size */
188 if (I915_HAS_FBC(dev) && i915_powersave) {
189 int cfb_size;
190
191 /* Leave 1M for line length buffer & misc. */
192
193 /* Try to get a 32M buffer... */
194 if (prealloc_size > (36*1024*1024))
195 cfb_size = 32*1024*1024;
196 else /* fall back to 7/8 of the stolen space */
197 cfb_size = prealloc_size * 7 / 8;
198 i915_setup_compression(dev, cfb_size);
199 }
200
201 return 0;
202}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a9306665987..b964df51cec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
354 /* We need to rebind the object if its current allocation 354 /* We need to rebind the object if its current allocation
355 * no longer meets the alignment restrictions for its new 355 * no longer meets the alignment restrictions for its new
356 * tiling mode. Otherwise we can just leave it alone, but 356 * tiling mode. Otherwise we can just leave it alone, but
357 * need to ensure that any fence register is cleared. 357 * need to ensure that any fence register is updated before
358 * the next fenced (either through the GTT or by the BLT unit
359 * on older GPUs) access.
360 *
361 * After updating the tiling parameters, we then flag whether
362 * we need to update an associated fence register. Note this
363 * has to also include the unfenced register the GPU uses
364 * whilst executing a fenced command for an untiled object.
358 */ 365 */
359 i915_gem_release_mmap(obj);
360 366
361 obj->map_and_fenceable = 367 obj->map_and_fenceable =
362 obj->gtt_space == NULL || 368 obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
374 } 380 }
375 381
376 if (ret == 0) { 382 if (ret == 0) {
377 obj->tiling_changed = true; 383 obj->fence_dirty =
384 obj->fenced_gpu_access ||
385 obj->fence_reg != I915_FENCE_REG_NONE;
386
378 obj->tiling_mode = args->tiling_mode; 387 obj->tiling_mode = args->tiling_mode;
379 obj->stride = args->stride; 388 obj->stride = args->stride;
389
390 /* Force the fence to be reacquired for GTT access */
391 i915_gem_release_mmap(obj);
380 } 392 }
381 } 393 }
382 /* we have to maintain this existing ABI... */ 394 /* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2b..0e72abb9f701 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h"
37 38
38typedef struct _drm_i915_batchbuffer32 { 39typedef struct _drm_i915_batchbuffer32 {
39 int start; /* agp offset */ 40 int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
181 (unsigned long)request); 182 (unsigned long)request);
182} 183}
183 184
184drm_ioctl_compat_t *i915_compat_ioctls[] = { 185static drm_ioctl_compat_t *i915_compat_ioctls[] = {
185 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, 186 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
186 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, 187 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
187 [DRM_I915_GETPARAM] = compat_i915_getparam, 188 [DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
189 [DRM_I915_ALLOC] = compat_i915_alloc 190 [DRM_I915_ALLOC] = compat_i915_alloc
190}; 191};
191 192
193#ifdef CONFIG_COMPAT
192/** 194/**
193 * Called whenever a 32-bit process running under a 64-bit kernel 195 * Called whenever a 32-bit process running under a 64-bit kernel
194 * performs an ioctl on /dev/dri/card<n>. 196 * performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
217 219
218 return ret; 220 return ret;
219} 221}
222#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337e..cc4a63307611 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/sysrq.h> 31#include <linux/sysrq.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include "drmP.h" 33#include "drmP.h"
@@ -35,35 +37,6 @@
35#include "i915_trace.h" 37#include "i915_trace.h"
36#include "intel_drv.h" 38#include "intel_drv.h"
37 39
38#define MAX_NOPID ((u32)~0)
39
40/**
41 * Interrupts that are always left unmasked.
42 *
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
45 * PIPESTAT alone.
46 */
47#define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54
55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
60
61#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
63
64#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
66
67/* For display hotplug interrupt */ 40/* For display hotplug interrupt */
68static void 41static void
69ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 42ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -118,6 +91,10 @@ void intel_enable_asle(struct drm_device *dev)
118 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
119 unsigned long irqflags; 92 unsigned long irqflags;
120 93
94 /* FIXME: opregion/asle for VLV */
95 if (IS_VALLEYVIEW(dev))
96 return;
97
121 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 98 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
122 99
123 if (HAS_PCH_SPLIT(dev)) 100 if (HAS_PCH_SPLIT(dev))
@@ -354,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
354 struct intel_ring_buffer *ring) 331 struct intel_ring_buffer *ring)
355{ 332{
356 struct drm_i915_private *dev_priv = dev->dev_private; 333 struct drm_i915_private *dev_priv = dev->dev_private;
357 u32 seqno;
358 334
359 if (ring->obj == NULL) 335 if (ring->obj == NULL)
360 return; 336 return;
361 337
362 seqno = ring->get_seqno(ring); 338 trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
363 trace_i915_gem_request_complete(ring, seqno);
364 339
365 ring->irq_seqno = seqno;
366 wake_up_all(&ring->irq_queue); 340 wake_up_all(&ring->irq_queue);
367 if (i915_enable_hangcheck) { 341 if (i915_enable_hangcheck) {
368 dev_priv->hangcheck_count = 0; 342 dev_priv->hangcheck_count = 0;
@@ -424,13 +398,145 @@ static void gen6_pm_rps_work(struct work_struct *work)
424 mutex_unlock(&dev_priv->dev->struct_mutex); 398 mutex_unlock(&dev_priv->dev->struct_mutex);
425} 399}
426 400
427static void pch_irq_handler(struct drm_device *dev) 401static void snb_gt_irq_handler(struct drm_device *dev,
402 struct drm_i915_private *dev_priv,
403 u32 gt_iir)
404{
405
406 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
407 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
408 notify_ring(dev, &dev_priv->ring[RCS]);
409 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
410 notify_ring(dev, &dev_priv->ring[VCS]);
411 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
412 notify_ring(dev, &dev_priv->ring[BCS]);
413
414 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
415 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
416 GT_RENDER_CS_ERROR_INTERRUPT)) {
417 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
418 i915_handle_error(dev, false);
419 }
420}
421
422static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
423 u32 pm_iir)
424{
425 unsigned long flags;
426
427 /*
428 * IIR bits should never already be set because IMR should
429 * prevent an interrupt from being shown in IIR. The warning
430 * displays a case where we've unsafely cleared
431 * dev_priv->pm_iir. Although missing an interrupt of the same
432 * type is not a problem, it displays a problem in the logic.
433 *
434 * The mask bit in IMR is cleared by rps_work.
435 */
436
437 spin_lock_irqsave(&dev_priv->rps_lock, flags);
438 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
439 dev_priv->pm_iir |= pm_iir;
440 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
441 POSTING_READ(GEN6_PMIMR);
442 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
443
444 queue_work(dev_priv->wq, &dev_priv->rps_work);
445}
446
447static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
428{ 448{
449 struct drm_device *dev = (struct drm_device *) arg;
429 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
430 u32 pch_iir; 451 u32 iir, gt_iir, pm_iir;
452 irqreturn_t ret = IRQ_NONE;
453 unsigned long irqflags;
431 int pipe; 454 int pipe;
455 u32 pipe_stats[I915_MAX_PIPES];
456 u32 vblank_status;
457 int vblank = 0;
458 bool blc_event;
432 459
433 pch_iir = I915_READ(SDEIIR); 460 atomic_inc(&dev_priv->irq_received);
461
462 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
463 PIPE_VBLANK_INTERRUPT_STATUS;
464
465 while (true) {
466 iir = I915_READ(VLV_IIR);
467 gt_iir = I915_READ(GTIIR);
468 pm_iir = I915_READ(GEN6_PMIIR);
469
470 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
471 goto out;
472
473 ret = IRQ_HANDLED;
474
475 snb_gt_irq_handler(dev, dev_priv, gt_iir);
476
477 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
478 for_each_pipe(pipe) {
479 int reg = PIPESTAT(pipe);
480 pipe_stats[pipe] = I915_READ(reg);
481
482 /*
483 * Clear the PIPE*STAT regs before the IIR
484 */
485 if (pipe_stats[pipe] & 0x8000ffff) {
486 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
487 DRM_DEBUG_DRIVER("pipe %c underrun\n",
488 pipe_name(pipe));
489 I915_WRITE(reg, pipe_stats[pipe]);
490 }
491 }
492 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
493
494 /* Consume port. Then clear IIR or we'll miss events */
495 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
496 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
497
498 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
499 hotplug_status);
500 if (hotplug_status & dev_priv->hotplug_supported_mask)
501 queue_work(dev_priv->wq,
502 &dev_priv->hotplug_work);
503
504 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
505 I915_READ(PORT_HOTPLUG_STAT);
506 }
507
508
509 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
510 drm_handle_vblank(dev, 0);
511 vblank++;
512 intel_finish_page_flip(dev, 0);
513 }
514
515 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
516 drm_handle_vblank(dev, 1);
517 vblank++;
518 intel_finish_page_flip(dev, 0);
519 }
520
521 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
522 blc_event = true;
523
524 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
525 gen6_queue_rps_work(dev_priv, pm_iir);
526
527 I915_WRITE(GTIIR, gt_iir);
528 I915_WRITE(GEN6_PMIIR, pm_iir);
529 I915_WRITE(VLV_IIR, iir);
530 }
531
532out:
533 return ret;
534}
535
536static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
537{
538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
539 int pipe;
434 540
435 if (pch_iir & SDE_AUDIO_POWER_MASK) 541 if (pch_iir & SDE_AUDIO_POWER_MASK)
436 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 542 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -471,91 +577,77 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
471{ 577{
472 struct drm_device *dev = (struct drm_device *) arg; 578 struct drm_device *dev = (struct drm_device *) arg;
473 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 579 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
474 int ret = IRQ_NONE; 580 u32 de_iir, gt_iir, de_ier, pm_iir;
475 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 581 irqreturn_t ret = IRQ_NONE;
476 struct drm_i915_master_private *master_priv; 582 int i;
477 583
478 atomic_inc(&dev_priv->irq_received); 584 atomic_inc(&dev_priv->irq_received);
479 585
480 /* disable master interrupt before clearing iir */ 586 /* disable master interrupt before clearing iir */
481 de_ier = I915_READ(DEIER); 587 de_ier = I915_READ(DEIER);
482 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 588 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
483 POSTING_READ(DEIER);
484 589
485 de_iir = I915_READ(DEIIR);
486 gt_iir = I915_READ(GTIIR); 590 gt_iir = I915_READ(GTIIR);
487 pch_iir = I915_READ(SDEIIR); 591 if (gt_iir) {
488 pm_iir = I915_READ(GEN6_PMIIR); 592 snb_gt_irq_handler(dev, dev_priv, gt_iir);
489 593 I915_WRITE(GTIIR, gt_iir);
490 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) 594 ret = IRQ_HANDLED;
491 goto done;
492
493 ret = IRQ_HANDLED;
494
495 if (dev->primary->master) {
496 master_priv = dev->primary->master->driver_priv;
497 if (master_priv->sarea_priv)
498 master_priv->sarea_priv->last_dispatch =
499 READ_BREADCRUMB(dev_priv);
500 } 595 }
501 596
502 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 597 de_iir = I915_READ(DEIIR);
503 notify_ring(dev, &dev_priv->ring[RCS]); 598 if (de_iir) {
504 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) 599 if (de_iir & DE_GSE_IVB)
505 notify_ring(dev, &dev_priv->ring[VCS]); 600 intel_opregion_gse_intr(dev);
506 if (gt_iir & GT_BLT_USER_INTERRUPT) 601
507 notify_ring(dev, &dev_priv->ring[BCS]); 602 for (i = 0; i < 3; i++) {
508 603 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
509 if (de_iir & DE_GSE_IVB) 604 intel_prepare_page_flip(dev, i);
510 intel_opregion_gse_intr(dev); 605 intel_finish_page_flip_plane(dev, i);
511 606 }
512 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { 607 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
513 intel_prepare_page_flip(dev, 0); 608 drm_handle_vblank(dev, i);
514 intel_finish_page_flip_plane(dev, 0); 609 }
515 }
516 610
517 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { 611 /* check event from PCH */
518 intel_prepare_page_flip(dev, 1); 612 if (de_iir & DE_PCH_EVENT_IVB) {
519 intel_finish_page_flip_plane(dev, 1); 613 u32 pch_iir = I915_READ(SDEIIR);
520 }
521 614
522 if (de_iir & DE_PIPEA_VBLANK_IVB) 615 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
523 drm_handle_vblank(dev, 0); 616 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
617 pch_irq_handler(dev, pch_iir);
524 618
525 if (de_iir & DE_PIPEB_VBLANK_IVB) 619 /* clear PCH hotplug event before clear CPU irq */
526 drm_handle_vblank(dev, 1); 620 I915_WRITE(SDEIIR, pch_iir);
621 }
527 622
528 /* check event from PCH */ 623 I915_WRITE(DEIIR, de_iir);
529 if (de_iir & DE_PCH_EVENT_IVB) { 624 ret = IRQ_HANDLED;
530 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
531 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
532 pch_irq_handler(dev);
533 } 625 }
534 626
535 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 627 pm_iir = I915_READ(GEN6_PMIIR);
536 unsigned long flags; 628 if (pm_iir) {
537 spin_lock_irqsave(&dev_priv->rps_lock, flags); 629 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
538 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); 630 gen6_queue_rps_work(dev_priv, pm_iir);
539 dev_priv->pm_iir |= pm_iir; 631 I915_WRITE(GEN6_PMIIR, pm_iir);
540 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 632 ret = IRQ_HANDLED;
541 POSTING_READ(GEN6_PMIMR);
542 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
543 queue_work(dev_priv->wq, &dev_priv->rps_work);
544 } 633 }
545 634
546 /* should clear PCH hotplug event before clear CPU irq */
547 I915_WRITE(SDEIIR, pch_iir);
548 I915_WRITE(GTIIR, gt_iir);
549 I915_WRITE(DEIIR, de_iir);
550 I915_WRITE(GEN6_PMIIR, pm_iir);
551
552done:
553 I915_WRITE(DEIER, de_ier); 635 I915_WRITE(DEIER, de_ier);
554 POSTING_READ(DEIER); 636 POSTING_READ(DEIER);
555 637
556 return ret; 638 return ret;
557} 639}
558 640
641static void ilk_gt_irq_handler(struct drm_device *dev,
642 struct drm_i915_private *dev_priv,
643 u32 gt_iir)
644{
645 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
646 notify_ring(dev, &dev_priv->ring[RCS]);
647 if (gt_iir & GT_BSD_USER_INTERRUPT)
648 notify_ring(dev, &dev_priv->ring[VCS]);
649}
650
559static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 651static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
560{ 652{
561 struct drm_device *dev = (struct drm_device *) arg; 653 struct drm_device *dev = (struct drm_device *) arg;
@@ -563,14 +655,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
563 int ret = IRQ_NONE; 655 int ret = IRQ_NONE;
564 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 656 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
565 u32 hotplug_mask; 657 u32 hotplug_mask;
566 struct drm_i915_master_private *master_priv;
567 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
568 658
569 atomic_inc(&dev_priv->irq_received); 659 atomic_inc(&dev_priv->irq_received);
570 660
571 if (IS_GEN6(dev))
572 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
573
574 /* disable master interrupt before clearing iir */ 661 /* disable master interrupt before clearing iir */
575 de_ier = I915_READ(DEIER); 662 de_ier = I915_READ(DEIER);
576 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 663 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -592,19 +679,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
592 679
593 ret = IRQ_HANDLED; 680 ret = IRQ_HANDLED;
594 681
595 if (dev->primary->master) { 682 if (IS_GEN5(dev))
596 master_priv = dev->primary->master->driver_priv; 683 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
597 if (master_priv->sarea_priv) 684 else
598 master_priv->sarea_priv->last_dispatch = 685 snb_gt_irq_handler(dev, dev_priv, gt_iir);
599 READ_BREADCRUMB(dev_priv);
600 }
601
602 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
603 notify_ring(dev, &dev_priv->ring[RCS]);
604 if (gt_iir & bsd_usr_interrupt)
605 notify_ring(dev, &dev_priv->ring[VCS]);
606 if (gt_iir & GT_BLT_USER_INTERRUPT)
607 notify_ring(dev, &dev_priv->ring[BCS]);
608 686
609 if (de_iir & DE_GSE) 687 if (de_iir & DE_GSE)
610 intel_opregion_gse_intr(dev); 688 intel_opregion_gse_intr(dev);
@@ -629,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
629 if (de_iir & DE_PCH_EVENT) { 707 if (de_iir & DE_PCH_EVENT) {
630 if (pch_iir & hotplug_mask) 708 if (pch_iir & hotplug_mask)
631 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 709 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
632 pch_irq_handler(dev); 710 pch_irq_handler(dev, pch_iir);
633 } 711 }
634 712
635 if (de_iir & DE_PCU_EVENT) { 713 if (de_iir & DE_PCU_EVENT) {
@@ -637,25 +715,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
637 i915_handle_rps_change(dev); 715 i915_handle_rps_change(dev);
638 } 716 }
639 717
640 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { 718 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
641 /* 719 gen6_queue_rps_work(dev_priv, pm_iir);
642 * IIR bits should never already be set because IMR should
643 * prevent an interrupt from being shown in IIR. The warning
644 * displays a case where we've unsafely cleared
645 * dev_priv->pm_iir. Although missing an interrupt of the same
646 * type is not a problem, it displays a problem in the logic.
647 *
648 * The mask bit in IMR is cleared by rps_work.
649 */
650 unsigned long flags;
651 spin_lock_irqsave(&dev_priv->rps_lock, flags);
652 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
653 dev_priv->pm_iir |= pm_iir;
654 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
655 POSTING_READ(GEN6_PMIMR);
656 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
657 queue_work(dev_priv->wq, &dev_priv->rps_work);
658 }
659 720
660 /* should clear PCH hotplug event before clear CPU irq */ 721 /* should clear PCH hotplug event before clear CPU irq */
661 I915_WRITE(SDEIIR, pch_iir); 722 I915_WRITE(SDEIIR, pch_iir);
@@ -691,7 +752,7 @@ static void i915_error_work_func(struct work_struct *work)
691 if (atomic_read(&dev_priv->mm.wedged)) { 752 if (atomic_read(&dev_priv->mm.wedged)) {
692 DRM_DEBUG_DRIVER("resetting chip\n"); 753 DRM_DEBUG_DRIVER("resetting chip\n");
693 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 754 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
694 if (!i915_reset(dev, GRDOM_RENDER)) { 755 if (!i915_reset(dev)) {
695 atomic_set(&dev_priv->mm.wedged, 0); 756 atomic_set(&dev_priv->mm.wedged, 0);
696 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 757 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
697 } 758 }
@@ -727,7 +788,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
727 goto unwind; 788 goto unwind;
728 789
729 local_irq_save(flags); 790 local_irq_save(flags);
730 if (reloc_offset < dev_priv->mm.gtt_mappable_end) { 791 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
792 src->has_global_gtt_mapping) {
731 void __iomem *s; 793 void __iomem *s;
732 794
733 /* Simply ignore tiling or any overlapping fence. 795 /* Simply ignore tiling or any overlapping fence.
@@ -782,10 +844,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
782 kfree(obj); 844 kfree(obj);
783} 845}
784 846
785static void 847void
786i915_error_state_free(struct drm_device *dev, 848i915_error_state_free(struct kref *error_ref)
787 struct drm_i915_error_state *error)
788{ 849{
850 struct drm_i915_error_state *error = container_of(error_ref,
851 typeof(*error), ref);
789 int i; 852 int i;
790 853
791 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 854 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -798,37 +861,56 @@ i915_error_state_free(struct drm_device *dev,
798 kfree(error->overlay); 861 kfree(error->overlay);
799 kfree(error); 862 kfree(error);
800} 863}
864static void capture_bo(struct drm_i915_error_buffer *err,
865 struct drm_i915_gem_object *obj)
866{
867 err->size = obj->base.size;
868 err->name = obj->base.name;
869 err->seqno = obj->last_rendering_seqno;
870 err->gtt_offset = obj->gtt_offset;
871 err->read_domains = obj->base.read_domains;
872 err->write_domain = obj->base.write_domain;
873 err->fence_reg = obj->fence_reg;
874 err->pinned = 0;
875 if (obj->pin_count > 0)
876 err->pinned = 1;
877 if (obj->user_pin_count > 0)
878 err->pinned = -1;
879 err->tiling = obj->tiling_mode;
880 err->dirty = obj->dirty;
881 err->purgeable = obj->madv != I915_MADV_WILLNEED;
882 err->ring = obj->ring ? obj->ring->id : -1;
883 err->cache_level = obj->cache_level;
884}
801 885
802static u32 capture_bo_list(struct drm_i915_error_buffer *err, 886static u32 capture_active_bo(struct drm_i915_error_buffer *err,
803 int count, 887 int count, struct list_head *head)
804 struct list_head *head)
805{ 888{
806 struct drm_i915_gem_object *obj; 889 struct drm_i915_gem_object *obj;
807 int i = 0; 890 int i = 0;
808 891
809 list_for_each_entry(obj, head, mm_list) { 892 list_for_each_entry(obj, head, mm_list) {
810 err->size = obj->base.size; 893 capture_bo(err++, obj);
811 err->name = obj->base.name;
812 err->seqno = obj->last_rendering_seqno;
813 err->gtt_offset = obj->gtt_offset;
814 err->read_domains = obj->base.read_domains;
815 err->write_domain = obj->base.write_domain;
816 err->fence_reg = obj->fence_reg;
817 err->pinned = 0;
818 if (obj->pin_count > 0)
819 err->pinned = 1;
820 if (obj->user_pin_count > 0)
821 err->pinned = -1;
822 err->tiling = obj->tiling_mode;
823 err->dirty = obj->dirty;
824 err->purgeable = obj->madv != I915_MADV_WILLNEED;
825 err->ring = obj->ring ? obj->ring->id : -1;
826 err->cache_level = obj->cache_level;
827
828 if (++i == count) 894 if (++i == count)
829 break; 895 break;
896 }
830 897
831 err++; 898 return i;
899}
900
901static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
902 int count, struct list_head *head)
903{
904 struct drm_i915_gem_object *obj;
905 int i = 0;
906
907 list_for_each_entry(obj, head, gtt_list) {
908 if (obj->pin_count == 0)
909 continue;
910
911 capture_bo(err++, obj);
912 if (++i == count)
913 break;
832 } 914 }
833 915
834 return i; 916 return i;
@@ -901,7 +983,6 @@ static void i915_record_ring_state(struct drm_device *dev,
901 struct drm_i915_private *dev_priv = dev->dev_private; 983 struct drm_i915_private *dev_priv = dev->dev_private;
902 984
903 if (INTEL_INFO(dev)->gen >= 6) { 985 if (INTEL_INFO(dev)->gen >= 6) {
904 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
905 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 986 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
906 error->semaphore_mboxes[ring->id][0] 987 error->semaphore_mboxes[ring->id][0]
907 = I915_READ(RING_SYNC_0(ring->mmio_base)); 988 = I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +991,7 @@ static void i915_record_ring_state(struct drm_device *dev,
910 } 991 }
911 992
912 if (INTEL_INFO(dev)->gen >= 4) { 993 if (INTEL_INFO(dev)->gen >= 4) {
994 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
913 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 995 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
914 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 996 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
915 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 997 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,11 +1001,13 @@ static void i915_record_ring_state(struct drm_device *dev,
919 error->bbaddr = I915_READ64(BB_ADDR); 1001 error->bbaddr = I915_READ64(BB_ADDR);
920 } 1002 }
921 } else { 1003 } else {
1004 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
922 error->ipeir[ring->id] = I915_READ(IPEIR); 1005 error->ipeir[ring->id] = I915_READ(IPEIR);
923 error->ipehr[ring->id] = I915_READ(IPEHR); 1006 error->ipehr[ring->id] = I915_READ(IPEHR);
924 error->instdone[ring->id] = I915_READ(INSTDONE); 1007 error->instdone[ring->id] = I915_READ(INSTDONE);
925 } 1008 }
926 1009
1010 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
927 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1011 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
928 error->seqno[ring->id] = ring->get_seqno(ring); 1012 error->seqno[ring->id] = ring->get_seqno(ring);
929 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1013 error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -938,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
938 struct drm_i915_error_state *error) 1022 struct drm_i915_error_state *error)
939{ 1023{
940 struct drm_i915_private *dev_priv = dev->dev_private; 1024 struct drm_i915_private *dev_priv = dev->dev_private;
1025 struct intel_ring_buffer *ring;
941 struct drm_i915_gem_request *request; 1026 struct drm_i915_gem_request *request;
942 int i, count; 1027 int i, count;
943 1028
944 for (i = 0; i < I915_NUM_RINGS; i++) { 1029 for_each_ring(ring, dev_priv, i) {
945 struct intel_ring_buffer *ring = &dev_priv->ring[i];
946
947 if (ring->obj == NULL)
948 continue;
949
950 i915_record_ring_state(dev, error, ring); 1030 i915_record_ring_state(dev, error, ring);
951 1031
952 error->ring[i].batchbuffer = 1032 error->ring[i].batchbuffer =
@@ -1013,8 +1093,19 @@ static void i915_capture_error_state(struct drm_device *dev)
1013 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1093 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1014 dev->primary->index); 1094 dev->primary->index);
1015 1095
1096 kref_init(&error->ref);
1016 error->eir = I915_READ(EIR); 1097 error->eir = I915_READ(EIR);
1017 error->pgtbl_er = I915_READ(PGTBL_ER); 1098 error->pgtbl_er = I915_READ(PGTBL_ER);
1099
1100 if (HAS_PCH_SPLIT(dev))
1101 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1102 else if (IS_VALLEYVIEW(dev))
1103 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1104 else if (IS_GEN2(dev))
1105 error->ier = I915_READ16(IER);
1106 else
1107 error->ier = I915_READ(IER);
1108
1018 for_each_pipe(pipe) 1109 for_each_pipe(pipe)
1019 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1110 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1020 1111
@@ -1034,8 +1125,9 @@ static void i915_capture_error_state(struct drm_device *dev)
1034 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1125 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1035 i++; 1126 i++;
1036 error->active_bo_count = i; 1127 error->active_bo_count = i;
1037 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 1128 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
1038 i++; 1129 if (obj->pin_count)
1130 i++;
1039 error->pinned_bo_count = i - error->active_bo_count; 1131 error->pinned_bo_count = i - error->active_bo_count;
1040 1132
1041 error->active_bo = NULL; 1133 error->active_bo = NULL;
@@ -1050,15 +1142,15 @@ static void i915_capture_error_state(struct drm_device *dev)
1050 1142
1051 if (error->active_bo) 1143 if (error->active_bo)
1052 error->active_bo_count = 1144 error->active_bo_count =
1053 capture_bo_list(error->active_bo, 1145 capture_active_bo(error->active_bo,
1054 error->active_bo_count, 1146 error->active_bo_count,
1055 &dev_priv->mm.active_list); 1147 &dev_priv->mm.active_list);
1056 1148
1057 if (error->pinned_bo) 1149 if (error->pinned_bo)
1058 error->pinned_bo_count = 1150 error->pinned_bo_count =
1059 capture_bo_list(error->pinned_bo, 1151 capture_pinned_bo(error->pinned_bo,
1060 error->pinned_bo_count, 1152 error->pinned_bo_count,
1061 &dev_priv->mm.pinned_list); 1153 &dev_priv->mm.gtt_list);
1062 1154
1063 do_gettimeofday(&error->time); 1155 do_gettimeofday(&error->time);
1064 1156
@@ -1073,7 +1165,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1073 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1165 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1074 1166
1075 if (error) 1167 if (error)
1076 i915_error_state_free(dev, error); 1168 i915_error_state_free(&error->ref);
1077} 1169}
1078 1170
1079void i915_destroy_error_state(struct drm_device *dev) 1171void i915_destroy_error_state(struct drm_device *dev)
@@ -1088,7 +1180,7 @@ void i915_destroy_error_state(struct drm_device *dev)
1088 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1180 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1089 1181
1090 if (error) 1182 if (error)
1091 i915_error_state_free(dev, error); 1183 kref_put(&error->ref, i915_error_state_free);
1092} 1184}
1093#else 1185#else
1094#define i915_capture_error_state(x) 1186#define i915_capture_error_state(x)
@@ -1103,33 +1195,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1103 if (!eir) 1195 if (!eir)
1104 return; 1196 return;
1105 1197
1106 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 1198 pr_err("render error detected, EIR: 0x%08x\n", eir);
1107 eir);
1108 1199
1109 if (IS_G4X(dev)) { 1200 if (IS_G4X(dev)) {
1110 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1201 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1111 u32 ipeir = I915_READ(IPEIR_I965); 1202 u32 ipeir = I915_READ(IPEIR_I965);
1112 1203
1113 printk(KERN_ERR " IPEIR: 0x%08x\n", 1204 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1114 I915_READ(IPEIR_I965)); 1205 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1115 printk(KERN_ERR " IPEHR: 0x%08x\n", 1206 pr_err(" INSTDONE: 0x%08x\n",
1116 I915_READ(IPEHR_I965));
1117 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1118 I915_READ(INSTDONE_I965)); 1207 I915_READ(INSTDONE_I965));
1119 printk(KERN_ERR " INSTPS: 0x%08x\n", 1208 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1120 I915_READ(INSTPS)); 1209 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1121 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 1210 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1122 I915_READ(INSTDONE1));
1123 printk(KERN_ERR " ACTHD: 0x%08x\n",
1124 I915_READ(ACTHD_I965));
1125 I915_WRITE(IPEIR_I965, ipeir); 1211 I915_WRITE(IPEIR_I965, ipeir);
1126 POSTING_READ(IPEIR_I965); 1212 POSTING_READ(IPEIR_I965);
1127 } 1213 }
1128 if (eir & GM45_ERROR_PAGE_TABLE) { 1214 if (eir & GM45_ERROR_PAGE_TABLE) {
1129 u32 pgtbl_err = I915_READ(PGTBL_ER); 1215 u32 pgtbl_err = I915_READ(PGTBL_ER);
1130 printk(KERN_ERR "page table error\n"); 1216 pr_err("page table error\n");
1131 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 1217 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1132 pgtbl_err);
1133 I915_WRITE(PGTBL_ER, pgtbl_err); 1218 I915_WRITE(PGTBL_ER, pgtbl_err);
1134 POSTING_READ(PGTBL_ER); 1219 POSTING_READ(PGTBL_ER);
1135 } 1220 }
@@ -1138,53 +1223,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1138 if (!IS_GEN2(dev)) { 1223 if (!IS_GEN2(dev)) {
1139 if (eir & I915_ERROR_PAGE_TABLE) { 1224 if (eir & I915_ERROR_PAGE_TABLE) {
1140 u32 pgtbl_err = I915_READ(PGTBL_ER); 1225 u32 pgtbl_err = I915_READ(PGTBL_ER);
1141 printk(KERN_ERR "page table error\n"); 1226 pr_err("page table error\n");
1142 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 1227 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1143 pgtbl_err);
1144 I915_WRITE(PGTBL_ER, pgtbl_err); 1228 I915_WRITE(PGTBL_ER, pgtbl_err);
1145 POSTING_READ(PGTBL_ER); 1229 POSTING_READ(PGTBL_ER);
1146 } 1230 }
1147 } 1231 }
1148 1232
1149 if (eir & I915_ERROR_MEMORY_REFRESH) { 1233 if (eir & I915_ERROR_MEMORY_REFRESH) {
1150 printk(KERN_ERR "memory refresh error:\n"); 1234 pr_err("memory refresh error:\n");
1151 for_each_pipe(pipe) 1235 for_each_pipe(pipe)
1152 printk(KERN_ERR "pipe %c stat: 0x%08x\n", 1236 pr_err("pipe %c stat: 0x%08x\n",
1153 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1237 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1154 /* pipestat has already been acked */ 1238 /* pipestat has already been acked */
1155 } 1239 }
1156 if (eir & I915_ERROR_INSTRUCTION) { 1240 if (eir & I915_ERROR_INSTRUCTION) {
1157 printk(KERN_ERR "instruction error\n"); 1241 pr_err("instruction error\n");
1158 printk(KERN_ERR " INSTPM: 0x%08x\n", 1242 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1159 I915_READ(INSTPM));
1160 if (INTEL_INFO(dev)->gen < 4) { 1243 if (INTEL_INFO(dev)->gen < 4) {
1161 u32 ipeir = I915_READ(IPEIR); 1244 u32 ipeir = I915_READ(IPEIR);
1162 1245
1163 printk(KERN_ERR " IPEIR: 0x%08x\n", 1246 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1164 I915_READ(IPEIR)); 1247 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1165 printk(KERN_ERR " IPEHR: 0x%08x\n", 1248 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1166 I915_READ(IPEHR)); 1249 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1167 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1168 I915_READ(INSTDONE));
1169 printk(KERN_ERR " ACTHD: 0x%08x\n",
1170 I915_READ(ACTHD));
1171 I915_WRITE(IPEIR, ipeir); 1250 I915_WRITE(IPEIR, ipeir);
1172 POSTING_READ(IPEIR); 1251 POSTING_READ(IPEIR);
1173 } else { 1252 } else {
1174 u32 ipeir = I915_READ(IPEIR_I965); 1253 u32 ipeir = I915_READ(IPEIR_I965);
1175 1254
1176 printk(KERN_ERR " IPEIR: 0x%08x\n", 1255 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1177 I915_READ(IPEIR_I965)); 1256 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1178 printk(KERN_ERR " IPEHR: 0x%08x\n", 1257 pr_err(" INSTDONE: 0x%08x\n",
1179 I915_READ(IPEHR_I965));
1180 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1181 I915_READ(INSTDONE_I965)); 1258 I915_READ(INSTDONE_I965));
1182 printk(KERN_ERR " INSTPS: 0x%08x\n", 1259 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1183 I915_READ(INSTPS)); 1260 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1184 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 1261 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1185 I915_READ(INSTDONE1));
1186 printk(KERN_ERR " ACTHD: 0x%08x\n",
1187 I915_READ(ACTHD_I965));
1188 I915_WRITE(IPEIR_I965, ipeir); 1262 I915_WRITE(IPEIR_I965, ipeir);
1189 POSTING_READ(IPEIR_I965); 1263 POSTING_READ(IPEIR_I965);
1190 } 1264 }
@@ -1217,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1217void i915_handle_error(struct drm_device *dev, bool wedged) 1291void i915_handle_error(struct drm_device *dev, bool wedged)
1218{ 1292{
1219 struct drm_i915_private *dev_priv = dev->dev_private; 1293 struct drm_i915_private *dev_priv = dev->dev_private;
1294 struct intel_ring_buffer *ring;
1295 int i;
1220 1296
1221 i915_capture_error_state(dev); 1297 i915_capture_error_state(dev);
1222 i915_report_and_clear_eir(dev); 1298 i915_report_and_clear_eir(dev);
@@ -1228,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1228 /* 1304 /*
1229 * Wakeup waiting processes so they don't hang 1305 * Wakeup waiting processes so they don't hang
1230 */ 1306 */
1231 wake_up_all(&dev_priv->ring[RCS].irq_queue); 1307 for_each_ring(ring, dev_priv, i)
1232 if (HAS_BSD(dev)) 1308 wake_up_all(&ring->irq_queue);
1233 wake_up_all(&dev_priv->ring[VCS].irq_queue);
1234 if (HAS_BLT(dev))
1235 wake_up_all(&dev_priv->ring[BCS].irq_queue);
1236 } 1309 }
1237 1310
1238 queue_work(dev_priv->wq, &dev_priv->error_work); 1311 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1265,7 +1338,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1265 obj = work->pending_flip_obj; 1338 obj = work->pending_flip_obj;
1266 if (INTEL_INFO(dev)->gen >= 4) { 1339 if (INTEL_INFO(dev)->gen >= 4) {
1267 int dspsurf = DSPSURF(intel_crtc->plane); 1340 int dspsurf = DSPSURF(intel_crtc->plane);
1268 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 1341 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1342 obj->gtt_offset;
1269 } else { 1343 } else {
1270 int dspaddr = DSPADDR(intel_crtc->plane); 1344 int dspaddr = DSPADDR(intel_crtc->plane);
1271 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1345 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1281,248 +1355,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1281 } 1355 }
1282} 1356}
1283 1357
1284static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1285{
1286 struct drm_device *dev = (struct drm_device *) arg;
1287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1288 struct drm_i915_master_private *master_priv;
1289 u32 iir, new_iir;
1290 u32 pipe_stats[I915_MAX_PIPES];
1291 u32 vblank_status;
1292 int vblank = 0;
1293 unsigned long irqflags;
1294 int irq_received;
1295 int ret = IRQ_NONE, pipe;
1296 bool blc_event = false;
1297
1298 atomic_inc(&dev_priv->irq_received);
1299
1300 iir = I915_READ(IIR);
1301
1302 if (INTEL_INFO(dev)->gen >= 4)
1303 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
1304 else
1305 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
1306
1307 for (;;) {
1308 irq_received = iir != 0;
1309
1310 /* Can't rely on pipestat interrupt bit in iir as it might
1311 * have been cleared after the pipestat interrupt was received.
1312 * It doesn't set the bit in iir again, but it still produces
1313 * interrupts (for non-MSI).
1314 */
1315 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1316 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1317 i915_handle_error(dev, false);
1318
1319 for_each_pipe(pipe) {
1320 int reg = PIPESTAT(pipe);
1321 pipe_stats[pipe] = I915_READ(reg);
1322
1323 /*
1324 * Clear the PIPE*STAT regs before the IIR
1325 */
1326 if (pipe_stats[pipe] & 0x8000ffff) {
1327 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1328 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1329 pipe_name(pipe));
1330 I915_WRITE(reg, pipe_stats[pipe]);
1331 irq_received = 1;
1332 }
1333 }
1334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1335
1336 if (!irq_received)
1337 break;
1338
1339 ret = IRQ_HANDLED;
1340
1341 /* Consume port. Then clear IIR or we'll miss events */
1342 if ((I915_HAS_HOTPLUG(dev)) &&
1343 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1344 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1345
1346 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1347 hotplug_status);
1348 if (hotplug_status & dev_priv->hotplug_supported_mask)
1349 queue_work(dev_priv->wq,
1350 &dev_priv->hotplug_work);
1351
1352 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1353 I915_READ(PORT_HOTPLUG_STAT);
1354 }
1355
1356 I915_WRITE(IIR, iir);
1357 new_iir = I915_READ(IIR); /* Flush posted writes */
1358
1359 if (dev->primary->master) {
1360 master_priv = dev->primary->master->driver_priv;
1361 if (master_priv->sarea_priv)
1362 master_priv->sarea_priv->last_dispatch =
1363 READ_BREADCRUMB(dev_priv);
1364 }
1365
1366 if (iir & I915_USER_INTERRUPT)
1367 notify_ring(dev, &dev_priv->ring[RCS]);
1368 if (iir & I915_BSD_USER_INTERRUPT)
1369 notify_ring(dev, &dev_priv->ring[VCS]);
1370
1371 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1372 intel_prepare_page_flip(dev, 0);
1373 if (dev_priv->flip_pending_is_done)
1374 intel_finish_page_flip_plane(dev, 0);
1375 }
1376
1377 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1378 intel_prepare_page_flip(dev, 1);
1379 if (dev_priv->flip_pending_is_done)
1380 intel_finish_page_flip_plane(dev, 1);
1381 }
1382
1383 for_each_pipe(pipe) {
1384 if (pipe_stats[pipe] & vblank_status &&
1385 drm_handle_vblank(dev, pipe)) {
1386 vblank++;
1387 if (!dev_priv->flip_pending_is_done) {
1388 i915_pageflip_stall_check(dev, pipe);
1389 intel_finish_page_flip(dev, pipe);
1390 }
1391 }
1392
1393 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1394 blc_event = true;
1395 }
1396
1397
1398 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1399 intel_opregion_asle_intr(dev);
1400
1401 /* With MSI, interrupts are only generated when iir
1402 * transitions from zero to nonzero. If another bit got
1403 * set while we were handling the existing iir bits, then
1404 * we would never get another interrupt.
1405 *
1406 * This is fine on non-MSI as well, as if we hit this path
1407 * we avoid exiting the interrupt handler only to generate
1408 * another one.
1409 *
1410 * Note that for MSI this could cause a stray interrupt report
1411 * if an interrupt landed in the time between writing IIR and
1412 * the posting read. This should be rare enough to never
1413 * trigger the 99% of 100,000 interrupts test for disabling
1414 * stray interrupts.
1415 */
1416 iir = new_iir;
1417 }
1418
1419 return ret;
1420}
1421
1422static int i915_emit_irq(struct drm_device * dev)
1423{
1424 drm_i915_private_t *dev_priv = dev->dev_private;
1425 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1426
1427 i915_kernel_lost_context(dev);
1428
1429 DRM_DEBUG_DRIVER("\n");
1430
1431 dev_priv->counter++;
1432 if (dev_priv->counter > 0x7FFFFFFFUL)
1433 dev_priv->counter = 1;
1434 if (master_priv->sarea_priv)
1435 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1436
1437 if (BEGIN_LP_RING(4) == 0) {
1438 OUT_RING(MI_STORE_DWORD_INDEX);
1439 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1440 OUT_RING(dev_priv->counter);
1441 OUT_RING(MI_USER_INTERRUPT);
1442 ADVANCE_LP_RING();
1443 }
1444
1445 return dev_priv->counter;
1446}
1447
1448static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1449{
1450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1451 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1452 int ret = 0;
1453 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1454
1455 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1456 READ_BREADCRUMB(dev_priv));
1457
1458 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1459 if (master_priv->sarea_priv)
1460 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1461 return 0;
1462 }
1463
1464 if (master_priv->sarea_priv)
1465 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1466
1467 if (ring->irq_get(ring)) {
1468 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1469 READ_BREADCRUMB(dev_priv) >= irq_nr);
1470 ring->irq_put(ring);
1471 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1472 ret = -EBUSY;
1473
1474 if (ret == -EBUSY) {
1475 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1476 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1477 }
1478
1479 return ret;
1480}
1481
1482/* Needs the lock as it touches the ring.
1483 */
1484int i915_irq_emit(struct drm_device *dev, void *data,
1485 struct drm_file *file_priv)
1486{
1487 drm_i915_private_t *dev_priv = dev->dev_private;
1488 drm_i915_irq_emit_t *emit = data;
1489 int result;
1490
1491 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1492 DRM_ERROR("called with no initialization\n");
1493 return -EINVAL;
1494 }
1495
1496 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1497
1498 mutex_lock(&dev->struct_mutex);
1499 result = i915_emit_irq(dev);
1500 mutex_unlock(&dev->struct_mutex);
1501
1502 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1503 DRM_ERROR("copy_to_user\n");
1504 return -EFAULT;
1505 }
1506
1507 return 0;
1508}
1509
1510/* Doesn't need the hardware lock.
1511 */
1512int i915_irq_wait(struct drm_device *dev, void *data,
1513 struct drm_file *file_priv)
1514{
1515 drm_i915_private_t *dev_priv = dev->dev_private;
1516 drm_i915_irq_wait_t *irqwait = data;
1517
1518 if (!dev_priv) {
1519 DRM_ERROR("called with no initialization\n");
1520 return -EINVAL;
1521 }
1522
1523 return i915_wait_irq(dev, irqwait->irq_seq);
1524}
1525
1526/* Called from drm generic code, passed 'crtc' which 1358/* Called from drm generic code, passed 'crtc' which
1527 * we use as a pipe index 1359 * we use as a pipe index
1528 */ 1360 */
@@ -1544,7 +1376,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
1544 1376
1545 /* maintain vblank delivery even in deep C-states */ 1377 /* maintain vblank delivery even in deep C-states */
1546 if (dev_priv->info->gen == 3) 1378 if (dev_priv->info->gen == 3)
1547 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1379 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1548 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1380 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1549 1381
1550 return 0; 1382 return 0;
@@ -1575,8 +1407,34 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1575 return -EINVAL; 1407 return -EINVAL;
1576 1408
1577 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1409 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1578 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1410 ironlake_enable_display_irq(dev_priv,
1579 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1411 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1412 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1413
1414 return 0;
1415}
1416
1417static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1418{
1419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1420 unsigned long irqflags;
1421 u32 dpfl, imr;
1422
1423 if (!i915_pipe_enabled(dev, pipe))
1424 return -EINVAL;
1425
1426 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1427 dpfl = I915_READ(VLV_DPFLIPSTAT);
1428 imr = I915_READ(VLV_IMR);
1429 if (pipe == 0) {
1430 dpfl |= PIPEA_VBLANK_INT_EN;
1431 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1432 } else {
1433 dpfl |= PIPEA_VBLANK_INT_EN;
1434 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1435 }
1436 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1437 I915_WRITE(VLV_IMR, imr);
1580 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1438 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1581 1439
1582 return 0; 1440 return 0;
@@ -1592,8 +1450,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
1592 1450
1593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1451 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1594 if (dev_priv->info->gen == 3) 1452 if (dev_priv->info->gen == 3)
1595 I915_WRITE(INSTPM, 1453 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1596 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1597 1454
1598 i915_disable_pipestat(dev_priv, pipe, 1455 i915_disable_pipestat(dev_priv, pipe,
1599 PIPE_VBLANK_INTERRUPT_ENABLE | 1456 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1618,63 +1475,30 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1618 unsigned long irqflags; 1475 unsigned long irqflags;
1619 1476
1620 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1477 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1621 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1478 ironlake_disable_display_irq(dev_priv,
1622 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1479 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1623 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1480 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1624} 1481}
1625 1482
1626/* Set the vblank monitor pipe 1483static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1627 */
1628int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1629 struct drm_file *file_priv)
1630{ 1484{
1631 drm_i915_private_t *dev_priv = dev->dev_private; 1485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1632 1486 unsigned long irqflags;
1633 if (!dev_priv) { 1487 u32 dpfl, imr;
1634 DRM_ERROR("called with no initialization\n");
1635 return -EINVAL;
1636 }
1637
1638 return 0;
1639}
1640
1641int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1642 struct drm_file *file_priv)
1643{
1644 drm_i915_private_t *dev_priv = dev->dev_private;
1645 drm_i915_vblank_pipe_t *pipe = data;
1646 1488
1647 if (!dev_priv) { 1489 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1648 DRM_ERROR("called with no initialization\n"); 1490 dpfl = I915_READ(VLV_DPFLIPSTAT);
1649 return -EINVAL; 1491 imr = I915_READ(VLV_IMR);
1492 if (pipe == 0) {
1493 dpfl &= ~PIPEA_VBLANK_INT_EN;
1494 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1495 } else {
1496 dpfl &= ~PIPEB_VBLANK_INT_EN;
1497 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1650 } 1498 }
1651 1499 I915_WRITE(VLV_IMR, imr);
1652 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1500 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1653 1501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1654 return 0;
1655}
1656
1657/**
1658 * Schedule buffer swap at given vertical blank.
1659 */
1660int i915_vblank_swap(struct drm_device *dev, void *data,
1661 struct drm_file *file_priv)
1662{
1663 /* The delayed swap mechanism was fundamentally racy, and has been
1664 * removed. The model was that the client requested a delayed flip/swap
1665 * from the kernel, then waited for vblank before continuing to perform
1666 * rendering. The problem was that the kernel might wake the client
1667 * up before it dispatched the vblank swap (since the lock has to be
1668 * held while touching the ringbuffer), in which case the client would
1669 * clear and start the next frame before the swap occurred, and
1670 * flicker would occur in addition to likely missing the vblank.
1671 *
1672 * In the absence of this ioctl, userland falls back to a correct path
1673 * of waiting for a vblank, then dispatching the swap on its own.
1674 * Context switching to userland and back is plenty fast enough for
1675 * meeting the requirements of vblank swapping.
1676 */
1677 return -EINVAL;
1678} 1502}
1679 1503
1680static u32 1504static u32
@@ -1689,11 +1513,9 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1689 if (list_empty(&ring->request_list) || 1513 if (list_empty(&ring->request_list) ||
1690 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1514 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1691 /* Issue a wake-up to catch stuck h/w. */ 1515 /* Issue a wake-up to catch stuck h/w. */
1692 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { 1516 if (waitqueue_active(&ring->irq_queue)) {
1693 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1517 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1694 ring->name, 1518 ring->name);
1695 ring->waiting_seqno,
1696 ring->get_seqno(ring));
1697 wake_up_all(&ring->irq_queue); 1519 wake_up_all(&ring->irq_queue);
1698 *err = true; 1520 *err = true;
1699 } 1521 }
@@ -1716,6 +1538,35 @@ static bool kick_ring(struct intel_ring_buffer *ring)
1716 return false; 1538 return false;
1717} 1539}
1718 1540
1541static bool i915_hangcheck_hung(struct drm_device *dev)
1542{
1543 drm_i915_private_t *dev_priv = dev->dev_private;
1544
1545 if (dev_priv->hangcheck_count++ > 1) {
1546 bool hung = true;
1547
1548 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1549 i915_handle_error(dev, true);
1550
1551 if (!IS_GEN2(dev)) {
1552 struct intel_ring_buffer *ring;
1553 int i;
1554
1555 /* Is the chip hanging on a WAIT_FOR_EVENT?
1556 * If so we can simply poke the RB_WAIT bit
1557 * and break the hang. This should work on
1558 * all but the second generation chipsets.
1559 */
1560 for_each_ring(ring, dev_priv, i)
1561 hung &= !kick_ring(ring);
1562 }
1563
1564 return hung;
1565 }
1566
1567 return false;
1568}
1569
1719/** 1570/**
1720 * This is called when the chip hasn't reported back with completed 1571 * This is called when the chip hasn't reported back with completed
1721 * batchbuffers in a long time. The first time this is called we simply record 1572 * batchbuffers in a long time. The first time this is called we simply record
@@ -1726,19 +1577,31 @@ void i915_hangcheck_elapsed(unsigned long data)
1726{ 1577{
1727 struct drm_device *dev = (struct drm_device *)data; 1578 struct drm_device *dev = (struct drm_device *)data;
1728 drm_i915_private_t *dev_priv = dev->dev_private; 1579 drm_i915_private_t *dev_priv = dev->dev_private;
1729 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; 1580 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1730 bool err = false; 1581 struct intel_ring_buffer *ring;
1582 bool err = false, idle;
1583 int i;
1731 1584
1732 if (!i915_enable_hangcheck) 1585 if (!i915_enable_hangcheck)
1733 return; 1586 return;
1734 1587
1588 memset(acthd, 0, sizeof(acthd));
1589 idle = true;
1590 for_each_ring(ring, dev_priv, i) {
1591 idle &= i915_hangcheck_ring_idle(ring, &err);
1592 acthd[i] = intel_ring_get_active_head(ring);
1593 }
1594
1735 /* If all work is done then ACTHD clearly hasn't advanced. */ 1595 /* If all work is done then ACTHD clearly hasn't advanced. */
1736 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1596 if (idle) {
1737 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1597 if (err) {
1738 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { 1598 if (i915_hangcheck_hung(dev))
1739 dev_priv->hangcheck_count = 0; 1599 return;
1740 if (err) 1600
1741 goto repeat; 1601 goto repeat;
1602 }
1603
1604 dev_priv->hangcheck_count = 0;
1742 return; 1605 return;
1743 } 1606 }
1744 1607
@@ -1749,47 +1612,16 @@ void i915_hangcheck_elapsed(unsigned long data)
1749 instdone = I915_READ(INSTDONE_I965); 1612 instdone = I915_READ(INSTDONE_I965);
1750 instdone1 = I915_READ(INSTDONE1); 1613 instdone1 = I915_READ(INSTDONE1);
1751 } 1614 }
1752 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
1753 acthd_bsd = HAS_BSD(dev) ?
1754 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
1755 acthd_blt = HAS_BLT(dev) ?
1756 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
1757 1615
1758 if (dev_priv->last_acthd == acthd && 1616 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1759 dev_priv->last_acthd_bsd == acthd_bsd &&
1760 dev_priv->last_acthd_blt == acthd_blt &&
1761 dev_priv->last_instdone == instdone && 1617 dev_priv->last_instdone == instdone &&
1762 dev_priv->last_instdone1 == instdone1) { 1618 dev_priv->last_instdone1 == instdone1) {
1763 if (dev_priv->hangcheck_count++ > 1) { 1619 if (i915_hangcheck_hung(dev))
1764 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1765 i915_handle_error(dev, true);
1766
1767 if (!IS_GEN2(dev)) {
1768 /* Is the chip hanging on a WAIT_FOR_EVENT?
1769 * If so we can simply poke the RB_WAIT bit
1770 * and break the hang. This should work on
1771 * all but the second generation chipsets.
1772 */
1773 if (kick_ring(&dev_priv->ring[RCS]))
1774 goto repeat;
1775
1776 if (HAS_BSD(dev) &&
1777 kick_ring(&dev_priv->ring[VCS]))
1778 goto repeat;
1779
1780 if (HAS_BLT(dev) &&
1781 kick_ring(&dev_priv->ring[BCS]))
1782 goto repeat;
1783 }
1784
1785 return; 1620 return;
1786 }
1787 } else { 1621 } else {
1788 dev_priv->hangcheck_count = 0; 1622 dev_priv->hangcheck_count = 0;
1789 1623
1790 dev_priv->last_acthd = acthd; 1624 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1791 dev_priv->last_acthd_bsd = acthd_bsd;
1792 dev_priv->last_acthd_blt = acthd_blt;
1793 dev_priv->last_instdone = instdone; 1625 dev_priv->last_instdone = instdone;
1794 dev_priv->last_instdone1 = instdone1; 1626 dev_priv->last_instdone1 = instdone1;
1795 } 1627 }
@@ -1808,10 +1640,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1808 1640
1809 atomic_set(&dev_priv->irq_received, 0); 1641 atomic_set(&dev_priv->irq_received, 0);
1810 1642
1811 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1812 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1813 if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1814 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1815 1643
1816 I915_WRITE(HWSTAM, 0xeffe); 1644 I915_WRITE(HWSTAM, 0xeffe);
1817 1645
@@ -1832,6 +1660,38 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1832 POSTING_READ(SDEIER); 1660 POSTING_READ(SDEIER);
1833} 1661}
1834 1662
1663static void valleyview_irq_preinstall(struct drm_device *dev)
1664{
1665 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1666 int pipe;
1667
1668 atomic_set(&dev_priv->irq_received, 0);
1669
1670 /* VLV magic */
1671 I915_WRITE(VLV_IMR, 0);
1672 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1673 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1674 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1675
1676 /* and GT */
1677 I915_WRITE(GTIIR, I915_READ(GTIIR));
1678 I915_WRITE(GTIIR, I915_READ(GTIIR));
1679 I915_WRITE(GTIMR, 0xffffffff);
1680 I915_WRITE(GTIER, 0x0);
1681 POSTING_READ(GTIER);
1682
1683 I915_WRITE(DPINVGTT, 0xff);
1684
1685 I915_WRITE(PORT_HOTPLUG_EN, 0);
1686 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1687 for_each_pipe(pipe)
1688 I915_WRITE(PIPESTAT(pipe), 0xffff);
1689 I915_WRITE(VLV_IIR, 0xffffffff);
1690 I915_WRITE(VLV_IMR, 0xffffffff);
1691 I915_WRITE(VLV_IER, 0x0);
1692 POSTING_READ(VLV_IER);
1693}
1694
1835/* 1695/*
1836 * Enable digital hotplug on the PCH, and configure the DP short pulse 1696 * Enable digital hotplug on the PCH, and configure the DP short pulse
1837 * duration to 2ms (which is the minimum in the Display Port spec) 1697 * duration to 2ms (which is the minimum in the Display Port spec)
@@ -1861,13 +1721,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1861 u32 render_irqs; 1721 u32 render_irqs;
1862 u32 hotplug_mask; 1722 u32 hotplug_mask;
1863 1723
1864 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1865 if (HAS_BSD(dev))
1866 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1867 if (HAS_BLT(dev))
1868 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1869
1870 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1871 dev_priv->irq_mask = ~display_mask; 1724 dev_priv->irq_mask = ~display_mask;
1872 1725
1873 /* should always can generate irq */ 1726 /* should always can generate irq */
@@ -1884,8 +1737,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1884 if (IS_GEN6(dev)) 1737 if (IS_GEN6(dev))
1885 render_irqs = 1738 render_irqs =
1886 GT_USER_INTERRUPT | 1739 GT_USER_INTERRUPT |
1887 GT_GEN6_BSD_USER_INTERRUPT | 1740 GEN6_BSD_USER_INTERRUPT |
1888 GT_BLT_USER_INTERRUPT; 1741 GEN6_BLITTER_USER_INTERRUPT;
1889 else 1742 else
1890 render_irqs = 1743 render_irqs =
1891 GT_USER_INTERRUPT | 1744 GT_USER_INTERRUPT |
@@ -1930,26 +1783,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1930{ 1783{
1931 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1784 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1932 /* enable kind of interrupts always enabled */ 1785 /* enable kind of interrupts always enabled */
1933 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1786 u32 display_mask =
1934 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1787 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1935 DE_PLANEB_FLIP_DONE_IVB; 1788 DE_PLANEC_FLIP_DONE_IVB |
1789 DE_PLANEB_FLIP_DONE_IVB |
1790 DE_PLANEA_FLIP_DONE_IVB;
1936 u32 render_irqs; 1791 u32 render_irqs;
1937 u32 hotplug_mask; 1792 u32 hotplug_mask;
1938 1793
1939 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1940 if (HAS_BSD(dev))
1941 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1942 if (HAS_BLT(dev))
1943 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1944
1945 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1946 dev_priv->irq_mask = ~display_mask; 1794 dev_priv->irq_mask = ~display_mask;
1947 1795
1948 /* should always can generate irq */ 1796 /* should always can generate irq */
1949 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1797 I915_WRITE(DEIIR, I915_READ(DEIIR));
1950 I915_WRITE(DEIMR, dev_priv->irq_mask); 1798 I915_WRITE(DEIMR, dev_priv->irq_mask);
1951 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1799 I915_WRITE(DEIER,
1952 DE_PIPEB_VBLANK_IVB); 1800 display_mask |
1801 DE_PIPEC_VBLANK_IVB |
1802 DE_PIPEB_VBLANK_IVB |
1803 DE_PIPEA_VBLANK_IVB);
1953 POSTING_READ(DEIER); 1804 POSTING_READ(DEIER);
1954 1805
1955 dev_priv->gt_irq_mask = ~0; 1806 dev_priv->gt_irq_mask = ~0;
@@ -1957,8 +1808,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1957 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1808 I915_WRITE(GTIIR, I915_READ(GTIIR));
1958 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1809 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1959 1810
1960 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 1811 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1961 GT_BLT_USER_INTERRUPT; 1812 GEN6_BLITTER_USER_INTERRUPT;
1962 I915_WRITE(GTIER, render_irqs); 1813 I915_WRITE(GTIER, render_irqs);
1963 POSTING_READ(GTIER); 1814 POSTING_READ(GTIER);
1964 1815
@@ -1978,15 +1829,496 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1978 return 0; 1829 return 0;
1979} 1830}
1980 1831
1981static void i915_driver_irq_preinstall(struct drm_device * dev) 1832static int valleyview_irq_postinstall(struct drm_device *dev)
1833{
1834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1835 u32 render_irqs;
1836 u32 enable_mask;
1837 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1838 u16 msid;
1839
1840 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1841 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1842 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1843
1844 dev_priv->irq_mask = ~enable_mask;
1845
1846 dev_priv->pipestat[0] = 0;
1847 dev_priv->pipestat[1] = 0;
1848
1849 /* Hack for broken MSIs on VLV */
1850 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1851 pci_read_config_word(dev->pdev, 0x98, &msid);
1852 msid &= 0xff; /* mask out delivery bits */
1853 msid |= (1<<14);
1854 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1855
1856 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1857 I915_WRITE(VLV_IER, enable_mask);
1858 I915_WRITE(VLV_IIR, 0xffffffff);
1859 I915_WRITE(PIPESTAT(0), 0xffff);
1860 I915_WRITE(PIPESTAT(1), 0xffff);
1861 POSTING_READ(VLV_IER);
1862
1863 I915_WRITE(VLV_IIR, 0xffffffff);
1864 I915_WRITE(VLV_IIR, 0xffffffff);
1865
1866 render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1867 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1868 GT_GEN6_BLT_USER_INTERRUPT |
1869 GT_GEN6_BSD_USER_INTERRUPT |
1870 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1871 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1872 GT_PIPE_NOTIFY |
1873 GT_RENDER_CS_ERROR_INTERRUPT |
1874 GT_SYNC_STATUS |
1875 GT_USER_INTERRUPT;
1876
1877 dev_priv->gt_irq_mask = ~render_irqs;
1878
1879 I915_WRITE(GTIIR, I915_READ(GTIIR));
1880 I915_WRITE(GTIIR, I915_READ(GTIIR));
1881 I915_WRITE(GTIMR, 0);
1882 I915_WRITE(GTIER, render_irqs);
1883 POSTING_READ(GTIER);
1884
1885 /* ack & enable invalid PTE error interrupts */
1886#if 0 /* FIXME: add support to irq handler for checking these bits */
1887 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1888 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1889#endif
1890
1891 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1892#if 0 /* FIXME: check register definitions; some have moved */
1893 /* Note HDMI and DP share bits */
1894 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1895 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1896 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1897 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1898 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1899 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1900 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1901 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1902 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1903 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1904 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1905 hotplug_en |= CRT_HOTPLUG_INT_EN;
1906 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1907 }
1908#endif
1909
1910 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1911
1912 return 0;
1913}
1914
1915static void valleyview_irq_uninstall(struct drm_device *dev)
1916{
1917 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1918 int pipe;
1919
1920 if (!dev_priv)
1921 return;
1922
1923 for_each_pipe(pipe)
1924 I915_WRITE(PIPESTAT(pipe), 0xffff);
1925
1926 I915_WRITE(HWSTAM, 0xffffffff);
1927 I915_WRITE(PORT_HOTPLUG_EN, 0);
1928 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1929 for_each_pipe(pipe)
1930 I915_WRITE(PIPESTAT(pipe), 0xffff);
1931 I915_WRITE(VLV_IIR, 0xffffffff);
1932 I915_WRITE(VLV_IMR, 0xffffffff);
1933 I915_WRITE(VLV_IER, 0x0);
1934 POSTING_READ(VLV_IER);
1935}
1936
1937static void ironlake_irq_uninstall(struct drm_device *dev)
1938{
1939 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1940
1941 if (!dev_priv)
1942 return;
1943
1944 I915_WRITE(HWSTAM, 0xffffffff);
1945
1946 I915_WRITE(DEIMR, 0xffffffff);
1947 I915_WRITE(DEIER, 0x0);
1948 I915_WRITE(DEIIR, I915_READ(DEIIR));
1949
1950 I915_WRITE(GTIMR, 0xffffffff);
1951 I915_WRITE(GTIER, 0x0);
1952 I915_WRITE(GTIIR, I915_READ(GTIIR));
1953
1954 I915_WRITE(SDEIMR, 0xffffffff);
1955 I915_WRITE(SDEIER, 0x0);
1956 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1957}
1958
1959static void i8xx_irq_preinstall(struct drm_device * dev)
1982{ 1960{
1983 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1961 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1984 int pipe; 1962 int pipe;
1985 1963
1986 atomic_set(&dev_priv->irq_received, 0); 1964 atomic_set(&dev_priv->irq_received, 0);
1987 1965
1988 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1966 for_each_pipe(pipe)
1989 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1967 I915_WRITE(PIPESTAT(pipe), 0);
1968 I915_WRITE16(IMR, 0xffff);
1969 I915_WRITE16(IER, 0x0);
1970 POSTING_READ16(IER);
1971}
1972
1973static int i8xx_irq_postinstall(struct drm_device *dev)
1974{
1975 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1976
1977 dev_priv->pipestat[0] = 0;
1978 dev_priv->pipestat[1] = 0;
1979
1980 I915_WRITE16(EMR,
1981 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1982
1983 /* Unmask the interrupts that we always want on. */
1984 dev_priv->irq_mask =
1985 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1986 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1987 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1988 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1989 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1990 I915_WRITE16(IMR, dev_priv->irq_mask);
1991
1992 I915_WRITE16(IER,
1993 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1994 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1995 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1996 I915_USER_INTERRUPT);
1997 POSTING_READ16(IER);
1998
1999 return 0;
2000}
2001
2002static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2003{
2004 struct drm_device *dev = (struct drm_device *) arg;
2005 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2006 u16 iir, new_iir;
2007 u32 pipe_stats[2];
2008 unsigned long irqflags;
2009 int irq_received;
2010 int pipe;
2011 u16 flip_mask =
2012 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2013 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2014
2015 atomic_inc(&dev_priv->irq_received);
2016
2017 iir = I915_READ16(IIR);
2018 if (iir == 0)
2019 return IRQ_NONE;
2020
2021 while (iir & ~flip_mask) {
2022 /* Can't rely on pipestat interrupt bit in iir as it might
2023 * have been cleared after the pipestat interrupt was received.
2024 * It doesn't set the bit in iir again, but it still produces
2025 * interrupts (for non-MSI).
2026 */
2027 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2028 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2029 i915_handle_error(dev, false);
2030
2031 for_each_pipe(pipe) {
2032 int reg = PIPESTAT(pipe);
2033 pipe_stats[pipe] = I915_READ(reg);
2034
2035 /*
2036 * Clear the PIPE*STAT regs before the IIR
2037 */
2038 if (pipe_stats[pipe] & 0x8000ffff) {
2039 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2040 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2041 pipe_name(pipe));
2042 I915_WRITE(reg, pipe_stats[pipe]);
2043 irq_received = 1;
2044 }
2045 }
2046 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2047
2048 I915_WRITE16(IIR, iir & ~flip_mask);
2049 new_iir = I915_READ16(IIR); /* Flush posted writes */
2050
2051 i915_update_dri1_breadcrumb(dev);
2052
2053 if (iir & I915_USER_INTERRUPT)
2054 notify_ring(dev, &dev_priv->ring[RCS]);
2055
2056 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2057 drm_handle_vblank(dev, 0)) {
2058 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2059 intel_prepare_page_flip(dev, 0);
2060 intel_finish_page_flip(dev, 0);
2061 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2062 }
2063 }
2064
2065 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2066 drm_handle_vblank(dev, 1)) {
2067 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2068 intel_prepare_page_flip(dev, 1);
2069 intel_finish_page_flip(dev, 1);
2070 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2071 }
2072 }
2073
2074 iir = new_iir;
2075 }
2076
2077 return IRQ_HANDLED;
2078}
2079
2080static void i8xx_irq_uninstall(struct drm_device * dev)
2081{
2082 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2083 int pipe;
2084
2085 for_each_pipe(pipe) {
2086 /* Clear enable bits; then clear status bits */
2087 I915_WRITE(PIPESTAT(pipe), 0);
2088 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2089 }
2090 I915_WRITE16(IMR, 0xffff);
2091 I915_WRITE16(IER, 0x0);
2092 I915_WRITE16(IIR, I915_READ16(IIR));
2093}
2094
2095static void i915_irq_preinstall(struct drm_device * dev)
2096{
2097 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2098 int pipe;
2099
2100 atomic_set(&dev_priv->irq_received, 0);
2101
2102 if (I915_HAS_HOTPLUG(dev)) {
2103 I915_WRITE(PORT_HOTPLUG_EN, 0);
2104 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2105 }
2106
2107 I915_WRITE16(HWSTAM, 0xeffe);
2108 for_each_pipe(pipe)
2109 I915_WRITE(PIPESTAT(pipe), 0);
2110 I915_WRITE(IMR, 0xffffffff);
2111 I915_WRITE(IER, 0x0);
2112 POSTING_READ(IER);
2113}
2114
2115static int i915_irq_postinstall(struct drm_device *dev)
2116{
2117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2118 u32 enable_mask;
2119
2120 dev_priv->pipestat[0] = 0;
2121 dev_priv->pipestat[1] = 0;
2122
2123 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2124
2125 /* Unmask the interrupts that we always want on. */
2126 dev_priv->irq_mask =
2127 ~(I915_ASLE_INTERRUPT |
2128 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2129 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2130 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2131 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2132 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2133
2134 enable_mask =
2135 I915_ASLE_INTERRUPT |
2136 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2137 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2138 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2139 I915_USER_INTERRUPT;
2140
2141 if (I915_HAS_HOTPLUG(dev)) {
2142 /* Enable in IER... */
2143 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2144 /* and unmask in IMR */
2145 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2146 }
2147
2148 I915_WRITE(IMR, dev_priv->irq_mask);
2149 I915_WRITE(IER, enable_mask);
2150 POSTING_READ(IER);
2151
2152 if (I915_HAS_HOTPLUG(dev)) {
2153 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2154
2155 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2156 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2157 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2158 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2159 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2160 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2161 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2162 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2163 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2164 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2165 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2166 hotplug_en |= CRT_HOTPLUG_INT_EN;
2167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2168 }
2169
2170 /* Ignore TV since it's buggy */
2171
2172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2173 }
2174
2175 intel_opregion_enable_asle(dev);
2176
2177 return 0;
2178}
2179
2180static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2181{
2182 struct drm_device *dev = (struct drm_device *) arg;
2183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2184 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2185 unsigned long irqflags;
2186 u32 flip_mask =
2187 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2188 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2189 u32 flip[2] = {
2190 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2191 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2192 };
2193 int pipe, ret = IRQ_NONE;
2194
2195 atomic_inc(&dev_priv->irq_received);
2196
2197 iir = I915_READ(IIR);
2198 do {
2199 bool irq_received = (iir & ~flip_mask) != 0;
2200 bool blc_event = false;
2201
2202 /* Can't rely on pipestat interrupt bit in iir as it might
2203 * have been cleared after the pipestat interrupt was received.
2204 * It doesn't set the bit in iir again, but it still produces
2205 * interrupts (for non-MSI).
2206 */
2207 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2208 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2209 i915_handle_error(dev, false);
2210
2211 for_each_pipe(pipe) {
2212 int reg = PIPESTAT(pipe);
2213 pipe_stats[pipe] = I915_READ(reg);
2214
2215 /* Clear the PIPE*STAT regs before the IIR */
2216 if (pipe_stats[pipe] & 0x8000ffff) {
2217 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2218 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2219 pipe_name(pipe));
2220 I915_WRITE(reg, pipe_stats[pipe]);
2221 irq_received = true;
2222 }
2223 }
2224 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2225
2226 if (!irq_received)
2227 break;
2228
2229 /* Consume port. Then clear IIR or we'll miss events */
2230 if ((I915_HAS_HOTPLUG(dev)) &&
2231 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2232 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2233
2234 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2235 hotplug_status);
2236 if (hotplug_status & dev_priv->hotplug_supported_mask)
2237 queue_work(dev_priv->wq,
2238 &dev_priv->hotplug_work);
2239
2240 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2241 POSTING_READ(PORT_HOTPLUG_STAT);
2242 }
2243
2244 I915_WRITE(IIR, iir & ~flip_mask);
2245 new_iir = I915_READ(IIR); /* Flush posted writes */
2246
2247 if (iir & I915_USER_INTERRUPT)
2248 notify_ring(dev, &dev_priv->ring[RCS]);
2249
2250 for_each_pipe(pipe) {
2251 int plane = pipe;
2252 if (IS_MOBILE(dev))
2253 plane = !plane;
2254 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2255 drm_handle_vblank(dev, pipe)) {
2256 if (iir & flip[plane]) {
2257 intel_prepare_page_flip(dev, plane);
2258 intel_finish_page_flip(dev, pipe);
2259 flip_mask &= ~flip[plane];
2260 }
2261 }
2262
2263 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2264 blc_event = true;
2265 }
2266
2267 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2268 intel_opregion_asle_intr(dev);
2269
2270 /* With MSI, interrupts are only generated when iir
2271 * transitions from zero to nonzero. If another bit got
2272 * set while we were handling the existing iir bits, then
2273 * we would never get another interrupt.
2274 *
2275 * This is fine on non-MSI as well, as if we hit this path
2276 * we avoid exiting the interrupt handler only to generate
2277 * another one.
2278 *
2279 * Note that for MSI this could cause a stray interrupt report
2280 * if an interrupt landed in the time between writing IIR and
2281 * the posting read. This should be rare enough to never
2282 * trigger the 99% of 100,000 interrupts test for disabling
2283 * stray interrupts.
2284 */
2285 ret = IRQ_HANDLED;
2286 iir = new_iir;
2287 } while (iir & ~flip_mask);
2288
2289 i915_update_dri1_breadcrumb(dev);
2290
2291 return ret;
2292}
2293
2294static void i915_irq_uninstall(struct drm_device * dev)
2295{
2296 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2297 int pipe;
2298
2299 if (I915_HAS_HOTPLUG(dev)) {
2300 I915_WRITE(PORT_HOTPLUG_EN, 0);
2301 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2302 }
2303
2304 I915_WRITE16(HWSTAM, 0xffff);
2305 for_each_pipe(pipe) {
2306 /* Clear enable bits; then clear status bits */
2307 I915_WRITE(PIPESTAT(pipe), 0);
2308 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2309 }
2310 I915_WRITE(IMR, 0xffffffff);
2311 I915_WRITE(IER, 0x0);
2312
2313 I915_WRITE(IIR, I915_READ(IIR));
2314}
2315
2316static void i965_irq_preinstall(struct drm_device * dev)
2317{
2318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2319 int pipe;
2320
2321 atomic_set(&dev_priv->irq_received, 0);
1990 2322
1991 if (I915_HAS_HOTPLUG(dev)) { 2323 if (I915_HAS_HOTPLUG(dev)) {
1992 I915_WRITE(PORT_HOTPLUG_EN, 0); 2324 I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2001,20 +2333,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
2001 POSTING_READ(IER); 2333 POSTING_READ(IER);
2002} 2334}
2003 2335
2004/* 2336static int i965_irq_postinstall(struct drm_device *dev)
2005 * Must be called after intel_modeset_init or hotplug interrupts won't be
2006 * enabled correctly.
2007 */
2008static int i915_driver_irq_postinstall(struct drm_device *dev)
2009{ 2337{
2010 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2338 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2011 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 2339 u32 enable_mask;
2012 u32 error_mask; 2340 u32 error_mask;
2013 2341
2014 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2015
2016 /* Unmask the interrupts that we always want on. */ 2342 /* Unmask the interrupts that we always want on. */
2017 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 2343 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2344 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2345 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2346 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2347 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2348 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2349
2350 enable_mask = ~dev_priv->irq_mask;
2351 enable_mask |= I915_USER_INTERRUPT;
2352
2353 if (IS_G4X(dev))
2354 enable_mask |= I915_BSD_USER_INTERRUPT;
2018 2355
2019 dev_priv->pipestat[0] = 0; 2356 dev_priv->pipestat[0] = 0;
2020 dev_priv->pipestat[1] = 0; 2357 dev_priv->pipestat[1] = 0;
@@ -2081,31 +2418,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
2081 return 0; 2418 return 0;
2082} 2419}
2083 2420
2084static void ironlake_irq_uninstall(struct drm_device *dev) 2421static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2085{ 2422{
2423 struct drm_device *dev = (struct drm_device *) arg;
2086 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2425 u32 iir, new_iir;
2426 u32 pipe_stats[I915_MAX_PIPES];
2427 unsigned long irqflags;
2428 int irq_received;
2429 int ret = IRQ_NONE, pipe;
2087 2430
2088 if (!dev_priv) 2431 atomic_inc(&dev_priv->irq_received);
2089 return;
2090 2432
2091 dev_priv->vblank_pipe = 0; 2433 iir = I915_READ(IIR);
2092 2434
2093 I915_WRITE(HWSTAM, 0xffffffff); 2435 for (;;) {
2436 bool blc_event = false;
2094 2437
2095 I915_WRITE(DEIMR, 0xffffffff); 2438 irq_received = iir != 0;
2096 I915_WRITE(DEIER, 0x0);
2097 I915_WRITE(DEIIR, I915_READ(DEIIR));
2098 2439
2099 I915_WRITE(GTIMR, 0xffffffff); 2440 /* Can't rely on pipestat interrupt bit in iir as it might
2100 I915_WRITE(GTIER, 0x0); 2441 * have been cleared after the pipestat interrupt was received.
2101 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2442 * It doesn't set the bit in iir again, but it still produces
2443 * interrupts (for non-MSI).
2444 */
2445 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2446 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2447 i915_handle_error(dev, false);
2102 2448
2103 I915_WRITE(SDEIMR, 0xffffffff); 2449 for_each_pipe(pipe) {
2104 I915_WRITE(SDEIER, 0x0); 2450 int reg = PIPESTAT(pipe);
2105 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2451 pipe_stats[pipe] = I915_READ(reg);
2452
2453 /*
2454 * Clear the PIPE*STAT regs before the IIR
2455 */
2456 if (pipe_stats[pipe] & 0x8000ffff) {
2457 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2458 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2459 pipe_name(pipe));
2460 I915_WRITE(reg, pipe_stats[pipe]);
2461 irq_received = 1;
2462 }
2463 }
2464 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2465
2466 if (!irq_received)
2467 break;
2468
2469 ret = IRQ_HANDLED;
2470
2471 /* Consume port. Then clear IIR or we'll miss events */
2472 if ((I915_HAS_HOTPLUG(dev)) &&
2473 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2474 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2475
2476 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2477 hotplug_status);
2478 if (hotplug_status & dev_priv->hotplug_supported_mask)
2479 queue_work(dev_priv->wq,
2480 &dev_priv->hotplug_work);
2481
2482 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2483 I915_READ(PORT_HOTPLUG_STAT);
2484 }
2485
2486 I915_WRITE(IIR, iir);
2487 new_iir = I915_READ(IIR); /* Flush posted writes */
2488
2489 if (iir & I915_USER_INTERRUPT)
2490 notify_ring(dev, &dev_priv->ring[RCS]);
2491 if (iir & I915_BSD_USER_INTERRUPT)
2492 notify_ring(dev, &dev_priv->ring[VCS]);
2493
2494 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2495 intel_prepare_page_flip(dev, 0);
2496
2497 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2498 intel_prepare_page_flip(dev, 1);
2499
2500 for_each_pipe(pipe) {
2501 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2502 drm_handle_vblank(dev, pipe)) {
2503 i915_pageflip_stall_check(dev, pipe);
2504 intel_finish_page_flip(dev, pipe);
2505 }
2506
2507 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2508 blc_event = true;
2509 }
2510
2511
2512 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2513 intel_opregion_asle_intr(dev);
2514
2515 /* With MSI, interrupts are only generated when iir
2516 * transitions from zero to nonzero. If another bit got
2517 * set while we were handling the existing iir bits, then
2518 * we would never get another interrupt.
2519 *
2520 * This is fine on non-MSI as well, as if we hit this path
2521 * we avoid exiting the interrupt handler only to generate
2522 * another one.
2523 *
2524 * Note that for MSI this could cause a stray interrupt report
2525 * if an interrupt landed in the time between writing IIR and
2526 * the posting read. This should be rare enough to never
2527 * trigger the 99% of 100,000 interrupts test for disabling
2528 * stray interrupts.
2529 */
2530 iir = new_iir;
2531 }
2532
2533 i915_update_dri1_breadcrumb(dev);
2534
2535 return ret;
2106} 2536}
2107 2537
2108static void i915_driver_irq_uninstall(struct drm_device * dev) 2538static void i965_irq_uninstall(struct drm_device * dev)
2109{ 2539{
2110 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2540 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2111 int pipe; 2541 int pipe;
@@ -2113,8 +2543,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
2113 if (!dev_priv) 2543 if (!dev_priv)
2114 return; 2544 return;
2115 2545
2116 dev_priv->vblank_pipe = 0;
2117
2118 if (I915_HAS_HOTPLUG(dev)) { 2546 if (I915_HAS_HOTPLUG(dev)) {
2119 I915_WRITE(PORT_HOTPLUG_EN, 0); 2547 I915_WRITE(PORT_HOTPLUG_EN, 0);
2120 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2548 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2134,9 +2562,15 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
2134 2562
2135void intel_irq_init(struct drm_device *dev) 2563void intel_irq_init(struct drm_device *dev)
2136{ 2564{
2565 struct drm_i915_private *dev_priv = dev->dev_private;
2566
2567 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2568 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2569 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2570
2137 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2571 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2138 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2572 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2139 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 2573 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2140 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2574 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2141 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2575 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2142 } 2576 }
@@ -2147,7 +2581,14 @@ void intel_irq_init(struct drm_device *dev)
2147 dev->driver->get_vblank_timestamp = NULL; 2581 dev->driver->get_vblank_timestamp = NULL;
2148 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2582 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2149 2583
2150 if (IS_IVYBRIDGE(dev)) { 2584 if (IS_VALLEYVIEW(dev)) {
2585 dev->driver->irq_handler = valleyview_irq_handler;
2586 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2587 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2588 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2589 dev->driver->enable_vblank = valleyview_enable_vblank;
2590 dev->driver->disable_vblank = valleyview_disable_vblank;
2591 } else if (IS_IVYBRIDGE(dev)) {
2151 /* Share pre & uninstall handlers with ILK/SNB */ 2592 /* Share pre & uninstall handlers with ILK/SNB */
2152 dev->driver->irq_handler = ivybridge_irq_handler; 2593 dev->driver->irq_handler = ivybridge_irq_handler;
2153 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2594 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2155,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev)
2155 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2596 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2156 dev->driver->enable_vblank = ivybridge_enable_vblank; 2597 dev->driver->enable_vblank = ivybridge_enable_vblank;
2157 dev->driver->disable_vblank = ivybridge_disable_vblank; 2598 dev->driver->disable_vblank = ivybridge_disable_vblank;
2599 } else if (IS_HASWELL(dev)) {
2600 /* Share interrupts handling with IVB */
2601 dev->driver->irq_handler = ivybridge_irq_handler;
2602 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2603 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2604 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2605 dev->driver->enable_vblank = ivybridge_enable_vblank;
2606 dev->driver->disable_vblank = ivybridge_disable_vblank;
2158 } else if (HAS_PCH_SPLIT(dev)) { 2607 } else if (HAS_PCH_SPLIT(dev)) {
2159 dev->driver->irq_handler = ironlake_irq_handler; 2608 dev->driver->irq_handler = ironlake_irq_handler;
2160 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2609 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2163,10 +2612,25 @@ void intel_irq_init(struct drm_device *dev)
2163 dev->driver->enable_vblank = ironlake_enable_vblank; 2612 dev->driver->enable_vblank = ironlake_enable_vblank;
2164 dev->driver->disable_vblank = ironlake_disable_vblank; 2613 dev->driver->disable_vblank = ironlake_disable_vblank;
2165 } else { 2614 } else {
2166 dev->driver->irq_preinstall = i915_driver_irq_preinstall; 2615 if (INTEL_INFO(dev)->gen == 2) {
2167 dev->driver->irq_postinstall = i915_driver_irq_postinstall; 2616 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2168 dev->driver->irq_uninstall = i915_driver_irq_uninstall; 2617 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2169 dev->driver->irq_handler = i915_driver_irq_handler; 2618 dev->driver->irq_handler = i8xx_irq_handler;
2619 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2620 } else if (INTEL_INFO(dev)->gen == 3) {
2621 /* IIR "flip pending" means done if this bit is set */
2622 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2623
2624 dev->driver->irq_preinstall = i915_irq_preinstall;
2625 dev->driver->irq_postinstall = i915_irq_postinstall;
2626 dev->driver->irq_uninstall = i915_irq_uninstall;
2627 dev->driver->irq_handler = i915_irq_handler;
2628 } else {
2629 dev->driver->irq_preinstall = i965_irq_preinstall;
2630 dev->driver->irq_postinstall = i965_irq_postinstall;
2631 dev->driver->irq_uninstall = i965_irq_uninstall;
2632 dev->driver->irq_handler = i965_irq_handler;
2633 }
2170 dev->driver->enable_vblank = i915_enable_vblank; 2634 dev->driver->enable_vblank = i915_enable_vblank;
2171 dev->driver->disable_vblank = i915_disable_vblank; 2635 dev->driver->disable_vblank = i915_disable_vblank;
2172 } 2636 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24d65f0c3e..2d49b9507ed0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,11 @@
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29 29
30#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
31
32#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
33#define _MASKED_BIT_DISABLE(a) ((a) << 16)
34
30/* 35/*
31 * The Bridge device's PCI config space has information about the 36 * The Bridge device's PCI config space has information about the
32 * fb aperture size and the amount of pre-reserved memory. 37 * fb aperture size and the amount of pre-reserved memory.
@@ -77,6 +82,7 @@
77#define GRDOM_FULL (0<<2) 82#define GRDOM_FULL (0<<2)
78#define GRDOM_RENDER (1<<2) 83#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2) 84#define GRDOM_MEDIA (3<<2)
85#define GRDOM_RESET_ENABLE (1<<0)
80 86
81#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 87#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
82#define GEN6_MBC_SNPCR_SHIFT 21 88#define GEN6_MBC_SNPCR_SHIFT 21
@@ -125,6 +131,13 @@
125#define ECOCHK_PPGTT_CACHE64B (0x3<<3) 131#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
126#define ECOCHK_PPGTT_CACHE4B (0x0<<3) 132#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
127 133
134#define GAC_ECO_BITS 0x14090
135#define ECOBITS_PPGTT_CACHE64B (3<<8)
136#define ECOBITS_PPGTT_CACHE4B (0<<8)
137
138#define GAB_CTL 0x24000
139#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
140
128/* VGA stuff */ 141/* VGA stuff */
129 142
130#define VGA_ST01_MDA 0x3ba 143#define VGA_ST01_MDA 0x3ba
@@ -222,6 +235,7 @@
222#define MI_BATCH_NON_SECURE (1) 235#define MI_BATCH_NON_SECURE (1)
223#define MI_BATCH_NON_SECURE_I965 (1<<8) 236#define MI_BATCH_NON_SECURE_I965 (1<<8)
224#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 237#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
238#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
225#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 239#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
226#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 240#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
227#define MI_SEMAPHORE_UPDATE (1<<21) 241#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -301,6 +315,61 @@
301#define DEBUG_RESET_RENDER (1<<8) 315#define DEBUG_RESET_RENDER (1<<8)
302#define DEBUG_RESET_DISPLAY (1<<9) 316#define DEBUG_RESET_DISPLAY (1<<9)
303 317
318/*
319 * DPIO - a special bus for various display related registers to hide behind:
320 * 0x800c: m1, m2, n, p1, p2, k dividers
321 * 0x8014: REF and SFR select
322 * 0x8014: N divider, VCO select
323 * 0x801c/3c: core clock bits
324 * 0x8048/68: low pass filter coefficients
325 * 0x8100: fast clock controls
326 */
327#define DPIO_PKT 0x2100
328#define DPIO_RID (0<<24)
329#define DPIO_OP_WRITE (1<<16)
330#define DPIO_OP_READ (0<<16)
331#define DPIO_PORTID (0x12<<8)
332#define DPIO_BYTE (0xf<<4)
333#define DPIO_BUSY (1<<0) /* status only */
334#define DPIO_DATA 0x2104
335#define DPIO_REG 0x2108
336#define DPIO_CTL 0x2110
337#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
338#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
339#define DPIO_SFR_BYPASS (1<<1)
340#define DPIO_RESET (1<<0)
341
342#define _DPIO_DIV_A 0x800c
343#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
344#define DPIO_K_SHIFT (24) /* 4 bits */
345#define DPIO_P1_SHIFT (21) /* 3 bits */
346#define DPIO_P2_SHIFT (16) /* 5 bits */
347#define DPIO_N_SHIFT (12) /* 4 bits */
348#define DPIO_ENABLE_CALIBRATION (1<<11)
349#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
350#define DPIO_M2DIV_MASK 0xff
351#define _DPIO_DIV_B 0x802c
352#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
353
354#define _DPIO_REFSFR_A 0x8014
355#define DPIO_REFSEL_OVERRIDE 27
356#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
357#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
358#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
359#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
360#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
361#define _DPIO_REFSFR_B 0x8034
362#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
363
364#define _DPIO_CORE_CLK_A 0x801c
365#define _DPIO_CORE_CLK_B 0x803c
366#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
367
368#define _DPIO_LFP_COEFF_A 0x8048
369#define _DPIO_LFP_COEFF_B 0x8068
370#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
371
372#define DPIO_FASTCLK_DISABLE 0x8100
304 373
305/* 374/*
306 * Fence registers 375 * Fence registers
@@ -360,8 +429,6 @@
360#define ARB_MODE 0x04030 429#define ARB_MODE 0x04030
361#define ARB_MODE_SWIZZLE_SNB (1<<4) 430#define ARB_MODE_SWIZZLE_SNB (1<<4)
362#define ARB_MODE_SWIZZLE_IVB (1<<5) 431#define ARB_MODE_SWIZZLE_IVB (1<<5)
363#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
364#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
365#define RENDER_HWS_PGA_GEN7 (0x04080) 432#define RENDER_HWS_PGA_GEN7 (0x04080)
366#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 433#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
367#define DONE_REG 0x40b0 434#define DONE_REG 0x40b0
@@ -417,6 +484,7 @@
417#define INSTDONE 0x02090 484#define INSTDONE 0x02090
418#define NOPID 0x02094 485#define NOPID 0x02094
419#define HWSTAM 0x02098 486#define HWSTAM 0x02098
487#define DMA_FADD_I8XX 0x020d0
420 488
421#define ERROR_GEN6 0x040a0 489#define ERROR_GEN6 0x040a0
422 490
@@ -432,6 +500,7 @@
432 */ 500 */
433# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 501# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
434#define _3D_CHICKEN3 0x02090 502#define _3D_CHICKEN3 0x02090
503#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
435 504
436#define MI_MODE 0x0209c 505#define MI_MODE 0x0209c
437# define VS_TIMER_DISPATCH (1 << 6) 506# define VS_TIMER_DISPATCH (1 << 6)
@@ -447,14 +516,16 @@
447#define GFX_PSMI_GRANULARITY (1<<10) 516#define GFX_PSMI_GRANULARITY (1<<10)
448#define GFX_PPGTT_ENABLE (1<<9) 517#define GFX_PPGTT_ENABLE (1<<9)
449 518
450#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
451#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
452
453#define SCPD0 0x0209c /* 915+ only */ 519#define SCPD0 0x0209c /* 915+ only */
454#define IER 0x020a0 520#define IER 0x020a0
455#define IIR 0x020a4 521#define IIR 0x020a4
456#define IMR 0x020a8 522#define IMR 0x020a8
457#define ISR 0x020ac 523#define ISR 0x020ac
524#define VLV_IIR_RW 0x182084
525#define VLV_IER 0x1820a0
526#define VLV_IIR 0x1820a4
527#define VLV_IMR 0x1820a8
528#define VLV_ISR 0x1820ac
458#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 529#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
459#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 530#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
460#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 531#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -500,7 +571,6 @@
500#define LM_BURST_LENGTH 0x00000700 571#define LM_BURST_LENGTH 0x00000700
501#define LM_FIFO_WATERMARK 0x0000001F 572#define LM_FIFO_WATERMARK 0x0000001F
502#define MI_ARB_STATE 0x020e4 /* 915+ only */ 573#define MI_ARB_STATE 0x020e4 /* 915+ only */
503#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
504 574
505/* Make render/texture TLB fetches lower priorty than associated data 575/* Make render/texture TLB fetches lower priorty than associated data
506 * fetches. This is not turned on by default 576 * fetches. This is not turned on by default
@@ -565,7 +635,6 @@
565#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 635#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
566 636
567#define CACHE_MODE_0 0x02120 /* 915+ only */ 637#define CACHE_MODE_0 0x02120 /* 915+ only */
568#define CM0_MASK_SHIFT 16
569#define CM0_IZ_OPT_DISABLE (1<<6) 638#define CM0_IZ_OPT_DISABLE (1<<6)
570#define CM0_ZR_OPT_DISABLE (1<<5) 639#define CM0_ZR_OPT_DISABLE (1<<5)
571#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) 640#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -579,7 +648,12 @@
579#define ECO_GATING_CX_ONLY (1<<3) 648#define ECO_GATING_CX_ONLY (1<<3)
580#define ECO_FLIP_DONE (1<<0) 649#define ECO_FLIP_DONE (1<<0)
581 650
582/* GEN6 interrupt control */ 651#define CACHE_MODE_1 0x7004 /* IVB+ */
652#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
653
654/* GEN6 interrupt control
655 * Note that the per-ring interrupt bits do alias with the global interrupt bits
656 * in GTIMR. */
583#define GEN6_RENDER_HWSTAM 0x2098 657#define GEN6_RENDER_HWSTAM 0x2098
584#define GEN6_RENDER_IMR 0x20a8 658#define GEN6_RENDER_IMR 0x20a8
585#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 659#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -615,6 +689,21 @@
615 689
616#define GEN6_BSD_RNCID 0x12198 690#define GEN6_BSD_RNCID 0x12198
617 691
692#define GEN7_FF_THREAD_MODE 0x20a0
693#define GEN7_FF_SCHED_MASK 0x0077070
694#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
695#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
696#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
697#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
698#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
699#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
700#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
701#define GEN7_FF_VS_SCHED_HW (0x0<<12)
702#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
703#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
704#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
705#define GEN7_FF_DS_SCHED_HW (0x0<<4)
706
618/* 707/*
619 * Framebuffer compression (915+ only) 708 * Framebuffer compression (915+ only)
620 */ 709 */
@@ -743,9 +832,9 @@
743#define GMBUS_PORT_PANEL 3 832#define GMBUS_PORT_PANEL 3
744#define GMBUS_PORT_DPC 4 /* HDMIC */ 833#define GMBUS_PORT_DPC 4 /* HDMIC */
745#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ 834#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
746 /* 6 reserved */ 835#define GMBUS_PORT_DPD 6 /* HDMID */
747#define GMBUS_PORT_DPD 7 /* HDMID */ 836#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
748#define GMBUS_NUM_PORTS 8 837#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
749#define GMBUS1 0x5104 /* command/status */ 838#define GMBUS1 0x5104 /* command/status */
750#define GMBUS_SW_CLR_INT (1<<31) 839#define GMBUS_SW_CLR_INT (1<<31)
751#define GMBUS_SW_RDY (1<<30) 840#define GMBUS_SW_RDY (1<<30)
@@ -797,7 +886,9 @@
797#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 886#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
798#define DPLL_VCO_ENABLE (1 << 31) 887#define DPLL_VCO_ENABLE (1 << 31)
799#define DPLL_DVO_HIGH_SPEED (1 << 30) 888#define DPLL_DVO_HIGH_SPEED (1 << 30)
889#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
800#define DPLL_SYNCLOCK_ENABLE (1 << 29) 890#define DPLL_SYNCLOCK_ENABLE (1 << 29)
891#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
801#define DPLL_VGA_MODE_DIS (1 << 28) 892#define DPLL_VGA_MODE_DIS (1 << 28)
802#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 893#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
803#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 894#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -809,6 +900,7 @@
809#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 900#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
810#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 901#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
811#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 902#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
903#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
812 904
813#define SRX_INDEX 0x3c4 905#define SRX_INDEX 0x3c4
814#define SRX_DATA 0x3c5 906#define SRX_DATA 0x3c5
@@ -904,6 +996,7 @@
904#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 996#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
905#define _DPLL_B_MD 0x06020 /* 965+ only */ 997#define _DPLL_B_MD 0x06020 /* 965+ only */
906#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 998#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
999
907#define _FPA0 0x06040 1000#define _FPA0 0x06040
908#define _FPA1 0x06044 1001#define _FPA1 0x06044
909#define _FPB0 0x06048 1002#define _FPB0 0x06048
@@ -1044,6 +1137,9 @@
1044#define RAMCLK_GATE_D 0x6210 /* CRL only */ 1137#define RAMCLK_GATE_D 0x6210 /* CRL only */
1045#define DEUC 0x6214 /* CRL only */ 1138#define DEUC 0x6214 /* CRL only */
1046 1139
1140#define FW_BLC_SELF_VLV 0x6500
1141#define FW_CSPWRDWNEN (1<<15)
1142
1047/* 1143/*
1048 * Palette regs 1144 * Palette regs
1049 */ 1145 */
@@ -1601,9 +1697,12 @@
1601/* Video Data Island Packet control */ 1697/* Video Data Island Packet control */
1602#define VIDEO_DIP_DATA 0x61178 1698#define VIDEO_DIP_DATA 0x61178
1603#define VIDEO_DIP_CTL 0x61170 1699#define VIDEO_DIP_CTL 0x61170
1700/* Pre HSW: */
1604#define VIDEO_DIP_ENABLE (1 << 31) 1701#define VIDEO_DIP_ENABLE (1 << 31)
1605#define VIDEO_DIP_PORT_B (1 << 29) 1702#define VIDEO_DIP_PORT_B (1 << 29)
1606#define VIDEO_DIP_PORT_C (2 << 29) 1703#define VIDEO_DIP_PORT_C (2 << 29)
1704#define VIDEO_DIP_PORT_D (3 << 29)
1705#define VIDEO_DIP_PORT_MASK (3 << 29)
1607#define VIDEO_DIP_ENABLE_AVI (1 << 21) 1706#define VIDEO_DIP_ENABLE_AVI (1 << 21)
1608#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) 1707#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
1609#define VIDEO_DIP_ENABLE_SPD (8 << 21) 1708#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1614,6 +1713,10 @@
1614#define VIDEO_DIP_FREQ_ONCE (0 << 16) 1713#define VIDEO_DIP_FREQ_ONCE (0 << 16)
1615#define VIDEO_DIP_FREQ_VSYNC (1 << 16) 1714#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
1616#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 1715#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
1716#define VIDEO_DIP_FREQ_MASK (3 << 16)
1717/* HSW and later: */
1718#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
1719#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
1617 1720
1618/* Panel power sequencing */ 1721/* Panel power sequencing */
1619#define PP_STATUS 0x61200 1722#define PP_STATUS 0x61200
@@ -2380,7 +2483,8 @@
2380 2483
2381/* Pipe A */ 2484/* Pipe A */
2382#define _PIPEADSL 0x70000 2485#define _PIPEADSL 0x70000
2383#define DSL_LINEMASK 0x00000fff 2486#define DSL_LINEMASK_GEN2 0x00000fff
2487#define DSL_LINEMASK_GEN3 0x00001fff
2384#define _PIPEACONF 0x70008 2488#define _PIPEACONF 0x70008
2385#define PIPECONF_ENABLE (1<<31) 2489#define PIPECONF_ENABLE (1<<31)
2386#define PIPECONF_DISABLE 0 2490#define PIPECONF_DISABLE 0
@@ -2422,23 +2526,30 @@
2422#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2526#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2423#define _PIPEASTAT 0x70024 2527#define _PIPEASTAT 0x70024
2424#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2528#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2529#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
2425#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2530#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2426#define PIPE_CRC_DONE_ENABLE (1UL<<28) 2531#define PIPE_CRC_DONE_ENABLE (1UL<<28)
2427#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 2532#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
2533#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
2428#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) 2534#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
2429#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 2535#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
2430#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 2536#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
2431#define PIPE_DPST_EVENT_ENABLE (1UL<<23) 2537#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
2538#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
2432#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 2539#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
2433#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 2540#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
2434#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 2541#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2435#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 2542#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
2436#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 2543#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
2437#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 2544#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
2545#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
2438#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 2546#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
2547#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2548#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2439#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 2549#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
2440#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 2550#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
2441#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 2551#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
2552#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
2442#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 2553#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
2443#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 2554#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
2444#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 2555#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2463,6 +2574,40 @@
2463#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2574#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2464#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 2575#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
2465 2576
2577#define VLV_DPFLIPSTAT 0x70028
2578#define PIPEB_LINE_COMPARE_STATUS (1<<29)
2579#define PIPEB_HLINE_INT_EN (1<<28)
2580#define PIPEB_VBLANK_INT_EN (1<<27)
2581#define SPRITED_FLIPDONE_INT_EN (1<<26)
2582#define SPRITEC_FLIPDONE_INT_EN (1<<25)
2583#define PLANEB_FLIPDONE_INT_EN (1<<24)
2584#define PIPEA_LINE_COMPARE_STATUS (1<<21)
2585#define PIPEA_HLINE_INT_EN (1<<20)
2586#define PIPEA_VBLANK_INT_EN (1<<19)
2587#define SPRITEB_FLIPDONE_INT_EN (1<<18)
2588#define SPRITEA_FLIPDONE_INT_EN (1<<17)
2589#define PLANEA_FLIPDONE_INT_EN (1<<16)
2590
2591#define DPINVGTT 0x7002c /* VLV only */
2592#define CURSORB_INVALID_GTT_INT_EN (1<<23)
2593#define CURSORA_INVALID_GTT_INT_EN (1<<22)
2594#define SPRITED_INVALID_GTT_INT_EN (1<<21)
2595#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
2596#define PLANEB_INVALID_GTT_INT_EN (1<<19)
2597#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
2598#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
2599#define PLANEA_INVALID_GTT_INT_EN (1<<16)
2600#define DPINVGTT_EN_MASK 0xff0000
2601#define CURSORB_INVALID_GTT_STATUS (1<<7)
2602#define CURSORA_INVALID_GTT_STATUS (1<<6)
2603#define SPRITED_INVALID_GTT_STATUS (1<<5)
2604#define SPRITEC_INVALID_GTT_STATUS (1<<4)
2605#define PLANEB_INVALID_GTT_STATUS (1<<3)
2606#define SPRITEB_INVALID_GTT_STATUS (1<<2)
2607#define SPRITEA_INVALID_GTT_STATUS (1<<1)
2608#define PLANEA_INVALID_GTT_STATUS (1<<0)
2609#define DPINVGTT_STATUS_MASK 0xff
2610
2466#define DSPARB 0x70030 2611#define DSPARB 0x70030
2467#define DSPARB_CSTART_MASK (0x7f << 7) 2612#define DSPARB_CSTART_MASK (0x7f << 7)
2468#define DSPARB_CSTART_SHIFT 7 2613#define DSPARB_CSTART_SHIFT 7
@@ -2492,11 +2637,28 @@
2492#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 2637#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2493#define DSPFW_HPLL_SR_MASK (0x1ff) 2638#define DSPFW_HPLL_SR_MASK (0x1ff)
2494 2639
2640/* drain latency register values*/
2641#define DRAIN_LATENCY_PRECISION_32 32
2642#define DRAIN_LATENCY_PRECISION_16 16
2643#define VLV_DDL1 0x70050
2644#define DDL_CURSORA_PRECISION_32 (1<<31)
2645#define DDL_CURSORA_PRECISION_16 (0<<31)
2646#define DDL_CURSORA_SHIFT 24
2647#define DDL_PLANEA_PRECISION_32 (1<<7)
2648#define DDL_PLANEA_PRECISION_16 (0<<7)
2649#define VLV_DDL2 0x70054
2650#define DDL_CURSORB_PRECISION_32 (1<<31)
2651#define DDL_CURSORB_PRECISION_16 (0<<31)
2652#define DDL_CURSORB_SHIFT 24
2653#define DDL_PLANEB_PRECISION_32 (1<<7)
2654#define DDL_PLANEB_PRECISION_16 (0<<7)
2655
2495/* FIFO watermark sizes etc */ 2656/* FIFO watermark sizes etc */
2496#define G4X_FIFO_LINE_SIZE 64 2657#define G4X_FIFO_LINE_SIZE 64
2497#define I915_FIFO_LINE_SIZE 64 2658#define I915_FIFO_LINE_SIZE 64
2498#define I830_FIFO_LINE_SIZE 32 2659#define I830_FIFO_LINE_SIZE 32
2499 2660
2661#define VALLEYVIEW_FIFO_SIZE 255
2500#define G4X_FIFO_SIZE 127 2662#define G4X_FIFO_SIZE 127
2501#define I965_FIFO_SIZE 512 2663#define I965_FIFO_SIZE 512
2502#define I945_FIFO_SIZE 127 2664#define I945_FIFO_SIZE 127
@@ -2504,6 +2666,7 @@
2504#define I855GM_FIFO_SIZE 127 /* In cachelines */ 2666#define I855GM_FIFO_SIZE 127 /* In cachelines */
2505#define I830_FIFO_SIZE 95 2667#define I830_FIFO_SIZE 95
2506 2668
2669#define VALLEYVIEW_MAX_WM 0xff
2507#define G4X_MAX_WM 0x3f 2670#define G4X_MAX_WM 0x3f
2508#define I915_MAX_WM 0x3f 2671#define I915_MAX_WM 0x3f
2509 2672
@@ -2518,6 +2681,7 @@
2518#define PINEVIEW_CURSOR_DFT_WM 0 2681#define PINEVIEW_CURSOR_DFT_WM 0
2519#define PINEVIEW_CURSOR_GUARD_WM 5 2682#define PINEVIEW_CURSOR_GUARD_WM 5
2520 2683
2684#define VALLEYVIEW_CURSOR_MAX_WM 64
2521#define I965_CURSOR_FIFO 64 2685#define I965_CURSOR_FIFO 64
2522#define I965_CURSOR_MAX_WM 32 2686#define I965_CURSOR_MAX_WM 32
2523#define I965_CURSOR_DFT_WM 8 2687#define I965_CURSOR_DFT_WM 8
@@ -2726,6 +2890,13 @@
2726#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 2890#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
2727#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 2891#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
2728 2892
2893/* Display/Sprite base address macros */
2894#define DISP_BASEADDR_MASK (0xfffff000)
2895#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
2896#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
2897#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
2898 (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
2899
2729/* VBIOS flags */ 2900/* VBIOS flags */
2730#define SWF00 0x71410 2901#define SWF00 0x71410
2731#define SWF01 0x71414 2902#define SWF01 0x71414
@@ -3058,25 +3229,38 @@
3058#define DE_PCH_EVENT_IVB (1<<28) 3229#define DE_PCH_EVENT_IVB (1<<28)
3059#define DE_DP_A_HOTPLUG_IVB (1<<27) 3230#define DE_DP_A_HOTPLUG_IVB (1<<27)
3060#define DE_AUX_CHANNEL_A_IVB (1<<26) 3231#define DE_AUX_CHANNEL_A_IVB (1<<26)
3232#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
3233#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
3234#define DE_PIPEC_VBLANK_IVB (1<<10)
3061#define DE_SPRITEB_FLIP_DONE_IVB (1<<9) 3235#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
3062#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3063#define DE_PLANEB_FLIP_DONE_IVB (1<<8) 3236#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
3064#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3065#define DE_PIPEB_VBLANK_IVB (1<<5) 3237#define DE_PIPEB_VBLANK_IVB (1<<5)
3238#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3239#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3066#define DE_PIPEA_VBLANK_IVB (1<<0) 3240#define DE_PIPEA_VBLANK_IVB (1<<0)
3067 3241
3242#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3243#define MASTER_INTERRUPT_ENABLE (1<<31)
3244
3068#define DEISR 0x44000 3245#define DEISR 0x44000
3069#define DEIMR 0x44004 3246#define DEIMR 0x44004
3070#define DEIIR 0x44008 3247#define DEIIR 0x44008
3071#define DEIER 0x4400c 3248#define DEIER 0x4400c
3072 3249
3073/* GT interrupt */ 3250/* GT interrupt.
3074#define GT_PIPE_NOTIFY (1 << 4) 3251 * Note that for gen6+ the ring-specific interrupt bits do alias with the
3075#define GT_SYNC_STATUS (1 << 2) 3252 * corresponding bits in the per-ring interrupt control registers. */
3076#define GT_USER_INTERRUPT (1 << 0) 3253#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
3077#define GT_BSD_USER_INTERRUPT (1 << 5) 3254#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
3078#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) 3255#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
3079#define GT_BLT_USER_INTERRUPT (1 << 22) 3256#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
3257#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3258#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
3259#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
3260#define GT_PIPE_NOTIFY (1 << 4)
3261#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
3262#define GT_SYNC_STATUS (1 << 2)
3263#define GT_USER_INTERRUPT (1 << 0)
3080 3264
3081#define GTISR 0x44010 3265#define GTISR 0x44010
3082#define GTIMR 0x44014 3266#define GTIMR 0x44014
@@ -3226,15 +3410,15 @@
3226 3410
3227#define _PCH_DPLL_A 0xc6014 3411#define _PCH_DPLL_A 0xc6014
3228#define _PCH_DPLL_B 0xc6018 3412#define _PCH_DPLL_B 0xc6018
3229#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 3413#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
3230 3414
3231#define _PCH_FPA0 0xc6040 3415#define _PCH_FPA0 0xc6040
3232#define FP_CB_TUNE (0x3<<22) 3416#define FP_CB_TUNE (0x3<<22)
3233#define _PCH_FPA1 0xc6044 3417#define _PCH_FPA1 0xc6044
3234#define _PCH_FPB0 0xc6048 3418#define _PCH_FPB0 0xc6048
3235#define _PCH_FPB1 0xc604c 3419#define _PCH_FPB1 0xc604c
3236#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0) 3420#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
3237#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1) 3421#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
3238 3422
3239#define PCH_DPLL_TEST 0xc606c 3423#define PCH_DPLL_TEST 0xc606c
3240 3424
@@ -3329,6 +3513,57 @@
3329#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3513#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3330#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3514#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3331 3515
3516#define VLV_VIDEO_DIP_CTL_A 0x60220
3517#define VLV_VIDEO_DIP_DATA_A 0x60208
3518#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
3519
3520#define VLV_VIDEO_DIP_CTL_B 0x61170
3521#define VLV_VIDEO_DIP_DATA_B 0x61174
3522#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
3523
3524#define VLV_TVIDEO_DIP_CTL(pipe) \
3525 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
3526#define VLV_TVIDEO_DIP_DATA(pipe) \
3527 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
3528#define VLV_TVIDEO_DIP_GCP(pipe) \
3529 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
3530
3531/* Haswell DIP controls */
3532#define HSW_VIDEO_DIP_CTL_A 0x60200
3533#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
3534#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
3535#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
3536#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
3537#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
3538#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
3539#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
3540#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
3541#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
3542#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
3543#define HSW_VIDEO_DIP_GCP_A 0x60210
3544
3545#define HSW_VIDEO_DIP_CTL_B 0x61200
3546#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
3547#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
3548#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
3549#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
3550#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
3551#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
3552#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
3553#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
3554#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
3555#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
3556#define HSW_VIDEO_DIP_GCP_B 0x61210
3557
3558#define HSW_TVIDEO_DIP_CTL(pipe) \
3559 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
3560#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
3561 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
3562#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
3563 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
3564#define HSW_TVIDEO_DIP_GCP(pipe) \
3565 _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
3566
3332#define _TRANS_HTOTAL_B 0xe1000 3567#define _TRANS_HTOTAL_B 0xe1000
3333#define _TRANS_HBLANK_B 0xe1004 3568#define _TRANS_HBLANK_B 0xe1004
3334#define _TRANS_HSYNC_B 0xe1008 3569#define _TRANS_HSYNC_B 0xe1008
@@ -3489,6 +3724,9 @@
3489#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) 3724#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
3490#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) 3725#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
3491#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) 3726#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
3727/* LPT */
3728#define FDI_PORT_WIDTH_2X_LPT (1<<19)
3729#define FDI_PORT_WIDTH_1X_LPT (0<<19)
3492 3730
3493#define _FDI_RXA_MISC 0xf0010 3731#define _FDI_RXA_MISC 0xf0010
3494#define _FDI_RXB_MISC 0xf1010 3732#define _FDI_RXB_MISC 0xf1010
@@ -3549,6 +3787,7 @@
3549#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3787#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3550 3788
3551/* or SDVOB */ 3789/* or SDVOB */
3790#define VLV_HDMIB 0x61140
3552#define HDMIB 0xe1140 3791#define HDMIB 0xe1140
3553#define PORT_ENABLE (1 << 31) 3792#define PORT_ENABLE (1 << 31)
3554#define TRANSCODER(pipe) ((pipe) << 30) 3793#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3714,6 +3953,8 @@
3714#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) 3953#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
3715 3954
3716#define FORCEWAKE 0xA18C 3955#define FORCEWAKE 0xA18C
3956#define FORCEWAKE_VLV 0x1300b0
3957#define FORCEWAKE_ACK_VLV 0x1300b4
3717#define FORCEWAKE_ACK 0x130090 3958#define FORCEWAKE_ACK 0x130090
3718#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 3959#define FORCEWAKE_MT 0xa188 /* multi-threaded */
3719#define FORCEWAKE_MT_ACK 0x130040 3960#define FORCEWAKE_MT_ACK 0x130040
@@ -3731,6 +3972,7 @@
3731 3972
3732#define GEN6_UCGCTL1 0x9400 3973#define GEN6_UCGCTL1 0x9400
3733# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 3974# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
3975# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
3734 3976
3735#define GEN6_UCGCTL2 0x9404 3977#define GEN6_UCGCTL2 0x9404
3736# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) 3978# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3811,6 +4053,11 @@
3811 GEN6_PM_RP_DOWN_THRESHOLD | \ 4053 GEN6_PM_RP_DOWN_THRESHOLD | \
3812 GEN6_PM_RP_DOWN_TIMEOUT) 4054 GEN6_PM_RP_DOWN_TIMEOUT)
3813 4055
4056#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4057#define GEN6_GT_GFX_RC6 0x138108
4058#define GEN6_GT_GFX_RC6p 0x13810C
4059#define GEN6_GT_GFX_RC6pp 0x138110
4060
3814#define GEN6_PCODE_MAILBOX 0x138124 4061#define GEN6_PCODE_MAILBOX 0x138124
3815#define GEN6_PCODE_READY (1<<31) 4062#define GEN6_PCODE_READY (1<<31)
3816#define GEN6_READ_OC_PARAMS 0xc 4063#define GEN6_READ_OC_PARAMS 0xc
@@ -3870,4 +4117,197 @@
3870#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 4117#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
3871#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 4118#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
3872 4119
4120/* HSW Power Wells */
4121#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
4122#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
4123#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
4124#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
4125#define HSW_PWR_WELL_ENABLE (1<<31)
4126#define HSW_PWR_WELL_STATE (1<<30)
4127#define HSW_PWR_WELL_CTL5 0x45410
4128#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4129#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4130#define HSW_PWR_WELL_FORCE_ON (1<<19)
4131#define HSW_PWR_WELL_CTL6 0x45414
4132
4133/* Per-pipe DDI Function Control */
4134#define PIPE_DDI_FUNC_CTL_A 0x60400
4135#define PIPE_DDI_FUNC_CTL_B 0x61400
4136#define PIPE_DDI_FUNC_CTL_C 0x62400
4137#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
4138#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
4139 PIPE_DDI_FUNC_CTL_A, \
4140 PIPE_DDI_FUNC_CTL_B)
4141#define PIPE_DDI_FUNC_ENABLE (1<<31)
4142/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4143#define PIPE_DDI_PORT_MASK (0xf<<28)
4144#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
4145#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
4146#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
4147#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
4148#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
4149#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
4150#define PIPE_DDI_BPC_8 (0<<20)
4151#define PIPE_DDI_BPC_10 (1<<20)
4152#define PIPE_DDI_BPC_6 (2<<20)
4153#define PIPE_DDI_BPC_12 (3<<20)
4154#define PIPE_DDI_BFI_ENABLE (1<<4)
4155#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
4156#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
4157#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
4158
4159/* DisplayPort Transport Control */
4160#define DP_TP_CTL_A 0x64040
4161#define DP_TP_CTL_B 0x64140
4162#define DP_TP_CTL(port) _PORT(port, \
4163 DP_TP_CTL_A, \
4164 DP_TP_CTL_B)
4165#define DP_TP_CTL_ENABLE (1<<31)
4166#define DP_TP_CTL_MODE_SST (0<<27)
4167#define DP_TP_CTL_MODE_MST (1<<27)
4168#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
4169#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
4170#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4171#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4172#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4173#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4174
4175/* DisplayPort Transport Status */
4176#define DP_TP_STATUS_A 0x64044
4177#define DP_TP_STATUS_B 0x64144
4178#define DP_TP_STATUS(port) _PORT(port, \
4179 DP_TP_STATUS_A, \
4180 DP_TP_STATUS_B)
4181#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4182
4183/* DDI Buffer Control */
4184#define DDI_BUF_CTL_A 0x64000
4185#define DDI_BUF_CTL_B 0x64100
4186#define DDI_BUF_CTL(port) _PORT(port, \
4187 DDI_BUF_CTL_A, \
4188 DDI_BUF_CTL_B)
4189#define DDI_BUF_CTL_ENABLE (1<<31)
4190#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4191#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4192#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4193#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
4194#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
4195#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
4196#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4197#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4198#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4199#define DDI_BUF_EMP_MASK (0xf<<24)
4200#define DDI_BUF_IS_IDLE (1<<7)
4201#define DDI_PORT_WIDTH_X1 (0<<1)
4202#define DDI_PORT_WIDTH_X2 (1<<1)
4203#define DDI_PORT_WIDTH_X4 (3<<1)
4204#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4205
4206/* DDI Buffer Translations */
4207#define DDI_BUF_TRANS_A 0x64E00
4208#define DDI_BUF_TRANS_B 0x64E60
4209#define DDI_BUF_TRANS(port) _PORT(port, \
4210 DDI_BUF_TRANS_A, \
4211 DDI_BUF_TRANS_B)
4212
4213/* Sideband Interface (SBI) is programmed indirectly, via
4214 * SBI_ADDR, which contains the register offset; and SBI_DATA,
4215 * which contains the payload */
4216#define SBI_ADDR 0xC6000
4217#define SBI_DATA 0xC6004
4218#define SBI_CTL_STAT 0xC6008
4219#define SBI_CTL_OP_CRRD (0x6<<8)
4220#define SBI_CTL_OP_CRWR (0x7<<8)
4221#define SBI_RESPONSE_FAIL (0x1<<1)
4222#define SBI_RESPONSE_SUCCESS (0x0<<1)
4223#define SBI_BUSY (0x1<<0)
4224#define SBI_READY (0x0<<0)
4225
4226/* SBI offsets */
4227#define SBI_SSCDIVINTPHASE6 0x0600
4228#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
4229#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
4230#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
4231#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
4232#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
4233#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4234#define SBI_SSCCTL 0x020c
4235#define SBI_SSCCTL6 0x060C
4236#define SBI_SSCCTL_DISABLE (1<<0)
4237#define SBI_SSCAUXDIV6 0x0610
4238#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4239#define SBI_DBUFF0 0x2a00
4240
4241/* LPT PIXCLK_GATE */
4242#define PIXCLK_GATE 0xC6020
4243#define PIXCLK_GATE_UNGATE 1<<0
4244#define PIXCLK_GATE_GATE 0<<0
4245
4246/* SPLL */
4247#define SPLL_CTL 0x46020
4248#define SPLL_PLL_ENABLE (1<<31)
4249#define SPLL_PLL_SCC (1<<28)
4250#define SPLL_PLL_NON_SCC (2<<28)
4251#define SPLL_PLL_FREQ_810MHz (0<<26)
4252#define SPLL_PLL_FREQ_1350MHz (1<<26)
4253
4254/* WRPLL */
4255#define WRPLL_CTL1 0x46040
4256#define WRPLL_CTL2 0x46060
4257#define WRPLL_PLL_ENABLE (1<<31)
4258#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4259#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
4260#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4261/* WRPLL divider programming */
4262#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4263#define WRPLL_DIVIDER_POST(x) ((x)<<8)
4264#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
4265
4266/* Port clock selection */
4267#define PORT_CLK_SEL_A 0x46100
4268#define PORT_CLK_SEL_B 0x46104
4269#define PORT_CLK_SEL(port) _PORT(port, \
4270 PORT_CLK_SEL_A, \
4271 PORT_CLK_SEL_B)
4272#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
4273#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
4274#define PORT_CLK_SEL_LCPLL_810 (2<<29)
4275#define PORT_CLK_SEL_SPLL (3<<29)
4276#define PORT_CLK_SEL_WRPLL1 (4<<29)
4277#define PORT_CLK_SEL_WRPLL2 (5<<29)
4278
4279/* Pipe clock selection */
4280#define PIPE_CLK_SEL_A 0x46140
4281#define PIPE_CLK_SEL_B 0x46144
4282#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
4283 PIPE_CLK_SEL_A, \
4284 PIPE_CLK_SEL_B)
4285/* For each pipe, we need to select the corresponding port clock */
4286#define PIPE_CLK_SEL_DISABLED (0x0<<29)
4287#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
4288
4289/* LCPLL Control */
4290#define LCPLL_CTL 0x130040
4291#define LCPLL_PLL_DISABLE (1<<31)
4292#define LCPLL_PLL_LOCK (1<<30)
4293#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4294#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4295
4296/* Pipe WM_LINETIME - watermark line time */
4297#define PIPE_WM_LINETIME_A 0x45270
4298#define PIPE_WM_LINETIME_B 0x45274
4299#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
4300 PIPE_WM_LINETIME_A, \
4301 PIPE_WM_LINETIME_A)
4302#define PIPE_WM_LINETIME_MASK (0x1ff)
4303#define PIPE_WM_LINETIME_TIME(x) ((x))
4304#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
4305#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
4306
4307/* SFUSE_STRAP */
4308#define SFUSE_STRAP 0xc2014
4309#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
4310#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4311#define SFUSE_STRAP_DDID_DETECTED (1<<0)
4312
3873#endif /* _I915_REG_H_ */ 4313#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2c..0ede02a99d91 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
40 return false; 40 return false;
41 41
42 if (HAS_PCH_SPLIT(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 dpll_reg = PCH_DPLL(pipe); 43 dpll_reg = _PCH_DPLL(pipe);
44 else 44 else
45 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 45 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
46 46
@@ -876,22 +876,6 @@ int i915_restore_state(struct drm_device *dev)
876 I915_WRITE(IER, dev_priv->saveIER); 876 I915_WRITE(IER, dev_priv->saveIER);
877 I915_WRITE(IMR, dev_priv->saveIMR); 877 I915_WRITE(IMR, dev_priv->saveIMR);
878 } 878 }
879 mutex_unlock(&dev->struct_mutex);
880
881 if (drm_core_check_feature(dev, DRIVER_MODESET))
882 intel_init_clock_gating(dev);
883
884 if (IS_IRONLAKE_M(dev)) {
885 ironlake_enable_drps(dev);
886 intel_init_emon(dev);
887 }
888
889 if (INTEL_INFO(dev)->gen >= 6) {
890 gen6_enable_rps(dev_priv);
891 gen6_update_ring_freq(dev_priv);
892 }
893
894 mutex_lock(&dev->struct_mutex);
895 879
896 /* Cache mode state */ 880 /* Cache mode state */
897 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 881 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 000000000000..79f83445afa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
32#include "i915_drv.h"
33
34static u32 calc_residency(struct drm_device *dev, const u32 reg)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37 u64 raw_time; /* 32b value may overflow during fixed point math */
38
39 if (!intel_enable_rc6(dev))
40 return 0;
41
42 raw_time = I915_READ(reg) * 128ULL;
43 return DIV_ROUND_UP_ULL(raw_time, 100000);
44}
45
46static ssize_t
47show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
48{
49 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
50 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
51}
52
53static ssize_t
54show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
55{
56 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
57 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
58 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
59}
60
61static ssize_t
62show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
63{
64 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
65 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
66 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
67}
68
69static ssize_t
70show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
71{
72 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
73 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
74 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
75}
76
77static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
78static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
79static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
80static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
81
82static struct attribute *rc6_attrs[] = {
83 &dev_attr_rc6_enable.attr,
84 &dev_attr_rc6_residency_ms.attr,
85 &dev_attr_rc6p_residency_ms.attr,
86 &dev_attr_rc6pp_residency_ms.attr,
87 NULL
88};
89
90static struct attribute_group rc6_attr_group = {
91 .name = power_group_name,
92 .attrs = rc6_attrs
93};
94
95void i915_setup_sysfs(struct drm_device *dev)
96{
97 int ret;
98
99 /* ILK doesn't have any residency information */
100 if (INTEL_INFO(dev)->gen < 6)
101 return;
102
103 ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
104 if (ret)
105 DRM_ERROR("sysfs setup failed\n");
106}
107
108void i915_teardown_sysfs(struct drm_device *dev)
109{
110 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
111}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea0..f1df2bd4ecf4 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
7 7
8#include "i915_drv.h" 8#include "i915_drv.h"
9 9
10#ifndef __CHECKER__
10#define CREATE_TRACE_POINTS 11#define CREATE_TRACE_POINTS
11#include "i915_trace.h" 12#include "i915_trace.h"
13#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a4..f413899475e9 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
9#include <acpi/acpi_drivers.h> 9#include <acpi/acpi_drivers.h>
10 10
11#include "drmP.h" 11#include "drmP.h"
12#include "i915_drv.h"
12 13
13#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ 14#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
14 15
@@ -182,8 +183,6 @@ static void intel_dsm_platform_mux_info(void)
182 DRM_DEBUG_DRIVER(" hpd mux info: %s\n", 183 DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
183 intel_dsm_mux_type(info->buffer.pointer[3])); 184 intel_dsm_mux_type(info->buffer.pointer[3]));
184 } 185 }
185 } else {
186 DRM_ERROR("MUX INFO call failed\n");
187 } 186 }
188 187
189out: 188out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410c..353459362f6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
174 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); 174 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
175} 175}
176 176
177/* get lvds_fp_timing entry
178 * this function may return NULL if the corresponding entry is invalid
179 */
180static const struct lvds_fp_timing *
181get_lvds_fp_timing(const struct bdb_header *bdb,
182 const struct bdb_lvds_lfp_data *data,
183 const struct bdb_lvds_lfp_data_ptrs *ptrs,
184 int index)
185{
186 size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
187 u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
188 size_t ofs;
189
190 if (index >= ARRAY_SIZE(ptrs->ptr))
191 return NULL;
192 ofs = ptrs->ptr[index].fp_timing_offset;
193 if (ofs < data_ofs ||
194 ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
195 return NULL;
196 return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
197}
198
177/* Try to find integrated panel data */ 199/* Try to find integrated panel data */
178static void 200static void
179parse_lfp_panel_data(struct drm_i915_private *dev_priv, 201parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
183 const struct bdb_lvds_lfp_data *lvds_lfp_data; 205 const struct bdb_lvds_lfp_data *lvds_lfp_data;
184 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 206 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
185 const struct lvds_dvo_timing *panel_dvo_timing; 207 const struct lvds_dvo_timing *panel_dvo_timing;
208 const struct lvds_fp_timing *fp_timing;
186 struct drm_display_mode *panel_fixed_mode; 209 struct drm_display_mode *panel_fixed_mode;
187 int i, downclock; 210 int i, downclock;
188 211
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
244 "Normal Clock %dKHz, downclock %dKHz\n", 267 "Normal Clock %dKHz, downclock %dKHz\n",
245 panel_fixed_mode->clock, 10*downclock); 268 panel_fixed_mode->clock, 10*downclock);
246 } 269 }
270
271 fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
272 lvds_lfp_data_ptrs,
273 lvds_options->panel_type);
274 if (fp_timing) {
275 /* check the resolution, just to be sure */
276 if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
277 fp_timing->y_res == panel_fixed_mode->vdisplay) {
278 dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
279 DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
280 dev_priv->bios_lvds_val);
281 }
282 }
247} 283}
248 284
249/* Try to find sdvo panel data */ 285/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
256 int index; 292 int index;
257 293
258 index = i915_vbt_sdvo_panel_type; 294 index = i915_vbt_sdvo_panel_type;
295 if (index == -2) {
296 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
297 return;
298 }
299
259 if (index == -1) { 300 if (index == -1) {
260 struct bdb_sdvo_lvds_options *sdvo_lvds_options; 301 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
261 302
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
332 if (block_size >= sizeof(*general)) { 373 if (block_size >= sizeof(*general)) {
333 int bus_pin = general->crt_ddc_gmbus_pin; 374 int bus_pin = general->crt_ddc_gmbus_pin;
334 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 375 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
335 if (bus_pin >= 1 && bus_pin <= 6) 376 if (intel_gmbus_is_port_valid(bus_pin))
336 dev_priv->crt_ddc_pin = bus_pin; 377 dev_priv->crt_ddc_pin = bus_pin;
337 } else { 378 } else {
338 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 379 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
339 block_size); 380 block_size);
340 } 381 }
341 } 382 }
342} 383}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 90b9793fd5da..75a70c46ef1b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
55 struct intel_crt, base); 55 struct intel_crt, base);
56} 56}
57 57
58static void intel_crt_dpms(struct drm_encoder *encoder, int mode) 58static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
59{ 59{
60 struct drm_device *dev = encoder->dev; 60 struct drm_device *dev = encoder->dev;
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
62 u32 temp, reg; 62 u32 temp;
63 63
64 if (HAS_PCH_SPLIT(dev)) 64 temp = I915_READ(PCH_ADPA);
65 reg = PCH_ADPA; 65 temp &= ~ADPA_DAC_ENABLE;
66 else 66
67 reg = ADPA; 67 switch (mode) {
68 case DRM_MODE_DPMS_ON:
69 temp |= ADPA_DAC_ENABLE;
70 break;
71 case DRM_MODE_DPMS_STANDBY:
72 case DRM_MODE_DPMS_SUSPEND:
73 case DRM_MODE_DPMS_OFF:
74 /* Just leave port enable cleared */
75 break;
76 }
77
78 I915_WRITE(PCH_ADPA, temp);
79}
68 80
69 temp = I915_READ(reg); 81static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
82{
83 struct drm_device *dev = encoder->dev;
84 struct drm_i915_private *dev_priv = dev->dev_private;
85 u32 temp;
86
87 temp = I915_READ(ADPA);
70 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 88 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
71 temp &= ~ADPA_DAC_ENABLE; 89 temp &= ~ADPA_DAC_ENABLE;
72 90
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
85 break; 103 break;
86 } 104 }
87 105
88 I915_WRITE(reg, temp); 106 I915_WRITE(ADPA, temp);
89} 107}
90 108
91static int intel_crt_mode_valid(struct drm_connector *connector, 109static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -278,9 +296,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
278 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { 296 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
279 struct edid *edid; 297 struct edid *edid;
280 bool is_digital = false; 298 bool is_digital = false;
299 struct i2c_adapter *i2c;
281 300
282 edid = drm_get_edid(connector, 301 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
283 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 302 edid = drm_get_edid(connector, i2c);
284 /* 303 /*
285 * This may be a DVI-I connector with a shared DDC 304 * This may be a DVI-I connector with a shared DDC
286 * link between analog and digital outputs, so we 305 * link between analog and digital outputs, so we
@@ -476,15 +495,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
476 struct drm_device *dev = connector->dev; 495 struct drm_device *dev = connector->dev;
477 struct drm_i915_private *dev_priv = dev->dev_private; 496 struct drm_i915_private *dev_priv = dev->dev_private;
478 int ret; 497 int ret;
498 struct i2c_adapter *i2c;
479 499
480 ret = intel_ddc_get_modes(connector, 500 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
481 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 501 ret = intel_ddc_get_modes(connector, i2c);
482 if (ret || !IS_G4X(dev)) 502 if (ret || !IS_G4X(dev))
483 return ret; 503 return ret;
484 504
485 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 505 /* Try to probe digital port for output in DVI-I -> VGA mode. */
486 return intel_ddc_get_modes(connector, 506 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
487 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); 507 return intel_ddc_get_modes(connector, i2c);
488} 508}
489 509
490static int intel_crt_set_property(struct drm_connector *connector, 510static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,12 +527,20 @@ static void intel_crt_reset(struct drm_connector *connector)
507 * Routines for controlling stuff on the analog port 527 * Routines for controlling stuff on the analog port
508 */ 528 */
509 529
510static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { 530static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
511 .dpms = intel_crt_dpms,
512 .mode_fixup = intel_crt_mode_fixup, 531 .mode_fixup = intel_crt_mode_fixup,
513 .prepare = intel_encoder_prepare, 532 .prepare = intel_encoder_prepare,
514 .commit = intel_encoder_commit, 533 .commit = intel_encoder_commit,
515 .mode_set = intel_crt_mode_set, 534 .mode_set = intel_crt_mode_set,
535 .dpms = pch_crt_dpms,
536};
537
538static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
539 .mode_fixup = intel_crt_mode_fixup,
540 .prepare = intel_encoder_prepare,
541 .commit = intel_encoder_commit,
542 .mode_set = intel_crt_mode_set,
543 .dpms = gmch_crt_dpms,
516}; 544};
517 545
518static const struct drm_connector_funcs intel_crt_connector_funcs = { 546static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -536,7 +564,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
536 564
537static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) 565static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
538{ 566{
539 DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident); 567 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
540 return 1; 568 return 1;
541} 569}
542 570
@@ -558,6 +586,7 @@ void intel_crt_init(struct drm_device *dev)
558 struct intel_crt *crt; 586 struct intel_crt *crt;
559 struct intel_connector *intel_connector; 587 struct intel_connector *intel_connector;
560 struct drm_i915_private *dev_priv = dev->dev_private; 588 struct drm_i915_private *dev_priv = dev->dev_private;
589 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
561 590
562 /* Skip machines without VGA that falsely report hotplug events */ 591 /* Skip machines without VGA that falsely report hotplug events */
563 if (dmi_check_system(intel_no_crt)) 592 if (dmi_check_system(intel_no_crt))
@@ -586,14 +615,23 @@ void intel_crt_init(struct drm_device *dev)
586 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | 615 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
587 1 << INTEL_ANALOG_CLONE_BIT | 616 1 << INTEL_ANALOG_CLONE_BIT |
588 1 << INTEL_SDVO_LVDS_CLONE_BIT); 617 1 << INTEL_SDVO_LVDS_CLONE_BIT);
589 crt->base.crtc_mask = (1 << 0) | (1 << 1); 618 if (IS_HASWELL(dev))
619 crt->base.crtc_mask = (1 << 0);
620 else
621 crt->base.crtc_mask = (1 << 0) | (1 << 1);
622
590 if (IS_GEN2(dev)) 623 if (IS_GEN2(dev))
591 connector->interlace_allowed = 0; 624 connector->interlace_allowed = 0;
592 else 625 else
593 connector->interlace_allowed = 1; 626 connector->interlace_allowed = 1;
594 connector->doublescan_allowed = 0; 627 connector->doublescan_allowed = 0;
595 628
596 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); 629 if (HAS_PCH_SPLIT(dev))
630 encoder_helper_funcs = &pch_encoder_funcs;
631 else
632 encoder_helper_funcs = &gmch_encoder_funcs;
633
634 drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
597 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 635 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
598 636
599 drm_sysfs_connector_add(connector); 637 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 000000000000..46d1e886c692
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/* HDMI/DVI modes ignore everything but the last 2 items. So we share
32 * them for both DP and FDI transports, allowing those ports to
33 * automatically adapt to HDMI connections as well
34 */
35static const u32 hsw_ddi_translations_dp[] = {
36 0x00FFFFFF, 0x0006000E, /* DP parameters */
37 0x00D75FFF, 0x0005000A,
38 0x00C30FFF, 0x00040006,
39 0x80AAAFFF, 0x000B0000,
40 0x00FFFFFF, 0x0005000A,
41 0x00D75FFF, 0x000C0004,
42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46};
47
48static const u32 hsw_ddi_translations_fdi[] = {
49 0x00FFFFFF, 0x0007000E, /* FDI parameters */
50 0x00D75FFF, 0x000F000A,
51 0x00C30FFF, 0x00060006,
52 0x00AAAFFF, 0x001E0000,
53 0x00FFFFFF, 0x000F000A,
54 0x00D75FFF, 0x00160004,
55 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59};
60
61/* On Haswell, DDI port buffers must be programmed with correct values
62 * in advance. The buffer values are different for FDI and DP modes,
63 * but the HDMI/DVI fields are shared among those. So we program the DDI
64 * in either FDI or DP modes only, as HDMI connections will work with both
65 * of those
66 */
67void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
68{
69 struct drm_i915_private *dev_priv = dev->dev_private;
70 u32 reg;
71 int i;
72 const u32 *ddi_translations = ((use_fdi_mode) ?
73 hsw_ddi_translations_fdi :
74 hsw_ddi_translations_dp);
75
76 DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
77 port_name(port),
78 use_fdi_mode ? "FDI" : "DP");
79
80 WARN((use_fdi_mode && (port != PORT_E)),
81 "Programming port %c in FDI mode, this probably will not work.\n",
82 port_name(port));
83
84 for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
85 I915_WRITE(reg, ddi_translations[i]);
86 reg += 4;
87 }
88}
89
90/* Program DDI buffers translations for DP. By default, program ports A-D in DP
91 * mode and port E for FDI.
92 */
93void intel_prepare_ddi(struct drm_device *dev)
94{
95 int port;
96
97 if (IS_HASWELL(dev)) {
98 for (port = PORT_A; port < PORT_E; port++)
99 intel_prepare_ddi_buffers(dev, port, false);
100
101 /* DDI E is the suggested one to work in FDI mode, so program is as such by
102 * default. It will have to be re-programmed in case a digital DP output
103 * will be detected on it
104 */
105 intel_prepare_ddi_buffers(dev, PORT_E, true);
106 }
107}
108
109static const long hsw_ddi_buf_ctl_values[] = {
110 DDI_BUF_EMP_400MV_0DB_HSW,
111 DDI_BUF_EMP_400MV_3_5DB_HSW,
112 DDI_BUF_EMP_400MV_6DB_HSW,
113 DDI_BUF_EMP_400MV_9_5DB_HSW,
114 DDI_BUF_EMP_600MV_0DB_HSW,
115 DDI_BUF_EMP_600MV_3_5DB_HSW,
116 DDI_BUF_EMP_600MV_6DB_HSW,
117 DDI_BUF_EMP_800MV_0DB_HSW,
118 DDI_BUF_EMP_800MV_3_5DB_HSW
119};
120
121
122/* Starting with Haswell, different DDI ports can work in FDI mode for
123 * connection to the PCH-located connectors. For this, it is necessary to train
124 * both the DDI port and PCH receiver for the desired DDI buffer settings.
125 *
126 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
127 * please note that when FDI mode is active on DDI E, it shares 2 lines with
128 * DDI A (which is used for eDP)
129 */
130
131void hsw_fdi_link_train(struct drm_crtc *crtc)
132{
133 struct drm_device *dev = crtc->dev;
134 struct drm_i915_private *dev_priv = dev->dev_private;
135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
136 int pipe = intel_crtc->pipe;
137 u32 reg, temp, i;
138
139 /* Configure CPU PLL, wait for warmup */
140 I915_WRITE(SPLL_CTL,
141 SPLL_PLL_ENABLE |
142 SPLL_PLL_FREQ_1350MHz |
143 SPLL_PLL_SCC);
144
145 /* Use SPLL to drive the output when in FDI mode */
146 I915_WRITE(PORT_CLK_SEL(PORT_E),
147 PORT_CLK_SEL_SPLL);
148 I915_WRITE(PIPE_CLK_SEL(pipe),
149 PIPE_CLK_SEL_PORT(PORT_E));
150
151 udelay(20);
152
153 /* Start the training iterating through available voltages and emphasis */
154 for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
155 /* Configure DP_TP_CTL with auto-training */
156 I915_WRITE(DP_TP_CTL(PORT_E),
157 DP_TP_CTL_FDI_AUTOTRAIN |
158 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
159 DP_TP_CTL_LINK_TRAIN_PAT1 |
160 DP_TP_CTL_ENABLE);
161
162 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
163 temp = I915_READ(DDI_BUF_CTL(PORT_E));
164 temp = (temp & ~DDI_BUF_EMP_MASK);
165 I915_WRITE(DDI_BUF_CTL(PORT_E),
166 temp |
167 DDI_BUF_CTL_ENABLE |
168 DDI_PORT_WIDTH_X2 |
169 hsw_ddi_buf_ctl_values[i]);
170
171 udelay(600);
172
173 /* Enable CPU FDI Receiver with auto-training */
174 reg = FDI_RX_CTL(pipe);
175 I915_WRITE(reg,
176 I915_READ(reg) |
177 FDI_LINK_TRAIN_AUTO |
178 FDI_RX_ENABLE |
179 FDI_LINK_TRAIN_PATTERN_1_CPT |
180 FDI_RX_ENHANCE_FRAME_ENABLE |
181 FDI_PORT_WIDTH_2X_LPT |
182 FDI_RX_PLL_ENABLE);
183 POSTING_READ(reg);
184 udelay(100);
185
186 temp = I915_READ(DP_TP_STATUS(PORT_E));
187 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
188 DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
189
190 /* Enable normal pixel sending for FDI */
191 I915_WRITE(DP_TP_CTL(PORT_E),
192 DP_TP_CTL_FDI_AUTOTRAIN |
193 DP_TP_CTL_LINK_TRAIN_NORMAL |
194 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
195 DP_TP_CTL_ENABLE);
196
197 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
198 temp = I915_READ(DDI_FUNC_CTL(pipe));
199 temp &= ~PIPE_DDI_PORT_MASK;
200 temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
201 PIPE_DDI_MODE_SELECT_FDI |
202 PIPE_DDI_FUNC_ENABLE |
203 PIPE_DDI_PORT_WIDTH_X2;
204 I915_WRITE(DDI_FUNC_CTL(pipe),
205 temp);
206 break;
207 } else {
208 DRM_ERROR("Error training BUF_CTL %d\n", i);
209
210 /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
211 I915_WRITE(DP_TP_CTL(PORT_E),
212 I915_READ(DP_TP_CTL(PORT_E)) &
213 ~DP_TP_CTL_ENABLE);
214 I915_WRITE(FDI_RX_CTL(pipe),
215 I915_READ(FDI_RX_CTL(pipe)) &
216 ~FDI_RX_PLL_ENABLE);
217 continue;
218 }
219 }
220
221 DRM_DEBUG_KMS("FDI train done.\n");
222}
223
224/* For DDI connections, it is possible to support different outputs over the
225 * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
226 * the time the output is detected what exactly is on the other end of it. This
227 * function aims at providing support for this detection and proper output
228 * configuration.
229 */
230void intel_ddi_init(struct drm_device *dev, enum port port)
231{
232 /* For now, we don't do any proper output detection and assume that we
233 * handle HDMI only */
234
235 switch(port){
236 case PORT_A:
237 /* We don't handle eDP and DP yet */
238 DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
239 break;
240 /* Assume that the ports B, C and D are working in HDMI mode for now */
241 case PORT_B:
242 case PORT_C:
243 case PORT_D:
244 intel_hdmi_init(dev, DDI_BUF_CTL(port));
245 break;
246 default:
247 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
248 port);
249 break;
250 }
251}
252
253/* WRPLL clock dividers */
254struct wrpll_tmds_clock {
255 u32 clock;
256 u16 p; /* Post divider */
257 u16 n2; /* Feedback divider */
258 u16 r2; /* Reference divider */
259};
260
261/* Table of matching values for WRPLL clocks programming for each frequency */
262static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
263 {19750, 38, 25, 18},
264 {20000, 48, 32, 18},
265 {21000, 36, 21, 15},
266 {21912, 42, 29, 17},
267 {22000, 36, 22, 15},
268 {23000, 36, 23, 15},
269 {23500, 40, 40, 23},
270 {23750, 26, 16, 14},
271 {23750, 26, 16, 14},
272 {24000, 36, 24, 15},
273 {25000, 36, 25, 15},
274 {25175, 26, 40, 33},
275 {25200, 30, 21, 15},
276 {26000, 36, 26, 15},
277 {27000, 30, 21, 14},
278 {27027, 18, 100, 111},
279 {27500, 30, 29, 19},
280 {28000, 34, 30, 17},
281 {28320, 26, 30, 22},
282 {28322, 32, 42, 25},
283 {28750, 24, 23, 18},
284 {29000, 30, 29, 18},
285 {29750, 32, 30, 17},
286 {30000, 30, 25, 15},
287 {30750, 30, 41, 24},
288 {31000, 30, 31, 18},
289 {31500, 30, 28, 16},
290 {32000, 30, 32, 18},
291 {32500, 28, 32, 19},
292 {33000, 24, 22, 15},
293 {34000, 28, 30, 17},
294 {35000, 26, 32, 19},
295 {35500, 24, 30, 19},
296 {36000, 26, 26, 15},
297 {36750, 26, 46, 26},
298 {37000, 24, 23, 14},
299 {37762, 22, 40, 26},
300 {37800, 20, 21, 15},
301 {38000, 24, 27, 16},
302 {38250, 24, 34, 20},
303 {39000, 24, 26, 15},
304 {40000, 24, 32, 18},
305 {40500, 20, 21, 14},
306 {40541, 22, 147, 89},
307 {40750, 18, 19, 14},
308 {41000, 16, 17, 14},
309 {41500, 22, 44, 26},
310 {41540, 22, 44, 26},
311 {42000, 18, 21, 15},
312 {42500, 22, 45, 26},
313 {43000, 20, 43, 27},
314 {43163, 20, 24, 15},
315 {44000, 18, 22, 15},
316 {44900, 20, 108, 65},
317 {45000, 20, 25, 15},
318 {45250, 20, 52, 31},
319 {46000, 18, 23, 15},
320 {46750, 20, 45, 26},
321 {47000, 20, 40, 23},
322 {48000, 18, 24, 15},
323 {49000, 18, 49, 30},
324 {49500, 16, 22, 15},
325 {50000, 18, 25, 15},
326 {50500, 18, 32, 19},
327 {51000, 18, 34, 20},
328 {52000, 18, 26, 15},
329 {52406, 14, 34, 25},
330 {53000, 16, 22, 14},
331 {54000, 16, 24, 15},
332 {54054, 16, 173, 108},
333 {54500, 14, 24, 17},
334 {55000, 12, 22, 18},
335 {56000, 14, 45, 31},
336 {56250, 16, 25, 15},
337 {56750, 14, 25, 17},
338 {57000, 16, 27, 16},
339 {58000, 16, 43, 25},
340 {58250, 16, 38, 22},
341 {58750, 16, 40, 23},
342 {59000, 14, 26, 17},
343 {59341, 14, 40, 26},
344 {59400, 16, 44, 25},
345 {60000, 16, 32, 18},
346 {60500, 12, 39, 29},
347 {61000, 14, 49, 31},
348 {62000, 14, 37, 23},
349 {62250, 14, 42, 26},
350 {63000, 12, 21, 15},
351 {63500, 14, 28, 17},
352 {64000, 12, 27, 19},
353 {65000, 14, 32, 19},
354 {65250, 12, 29, 20},
355 {65500, 12, 32, 22},
356 {66000, 12, 22, 15},
357 {66667, 14, 38, 22},
358 {66750, 10, 21, 17},
359 {67000, 14, 33, 19},
360 {67750, 14, 58, 33},
361 {68000, 14, 30, 17},
362 {68179, 14, 46, 26},
363 {68250, 14, 46, 26},
364 {69000, 12, 23, 15},
365 {70000, 12, 28, 18},
366 {71000, 12, 30, 19},
367 {72000, 12, 24, 15},
368 {73000, 10, 23, 17},
369 {74000, 12, 23, 14},
370 {74176, 8, 100, 91},
371 {74250, 10, 22, 16},
372 {74481, 12, 43, 26},
373 {74500, 10, 29, 21},
374 {75000, 12, 25, 15},
375 {75250, 10, 39, 28},
376 {76000, 12, 27, 16},
377 {77000, 12, 53, 31},
378 {78000, 12, 26, 15},
379 {78750, 12, 28, 16},
380 {79000, 10, 38, 26},
381 {79500, 10, 28, 19},
382 {80000, 12, 32, 18},
383 {81000, 10, 21, 14},
384 {81081, 6, 100, 111},
385 {81624, 8, 29, 24},
386 {82000, 8, 17, 14},
387 {83000, 10, 40, 26},
388 {83950, 10, 28, 18},
389 {84000, 10, 28, 18},
390 {84750, 6, 16, 17},
391 {85000, 6, 17, 18},
392 {85250, 10, 30, 19},
393 {85750, 10, 27, 17},
394 {86000, 10, 43, 27},
395 {87000, 10, 29, 18},
396 {88000, 10, 44, 27},
397 {88500, 10, 41, 25},
398 {89000, 10, 28, 17},
399 {89012, 6, 90, 91},
400 {89100, 10, 33, 20},
401 {90000, 10, 25, 15},
402 {91000, 10, 32, 19},
403 {92000, 10, 46, 27},
404 {93000, 10, 31, 18},
405 {94000, 10, 40, 23},
406 {94500, 10, 28, 16},
407 {95000, 10, 44, 25},
408 {95654, 10, 39, 22},
409 {95750, 10, 39, 22},
410 {96000, 10, 32, 18},
411 {97000, 8, 23, 16},
412 {97750, 8, 42, 29},
413 {98000, 8, 45, 31},
414 {99000, 8, 22, 15},
415 {99750, 8, 34, 23},
416 {100000, 6, 20, 18},
417 {100500, 6, 19, 17},
418 {101000, 6, 37, 33},
419 {101250, 8, 21, 14},
420 {102000, 6, 17, 15},
421 {102250, 6, 25, 22},
422 {103000, 8, 29, 19},
423 {104000, 8, 37, 24},
424 {105000, 8, 28, 18},
425 {106000, 8, 22, 14},
426 {107000, 8, 46, 29},
427 {107214, 8, 27, 17},
428 {108000, 8, 24, 15},
429 {108108, 8, 173, 108},
430 {109000, 6, 23, 19},
431 {109000, 6, 23, 19},
432 {110000, 6, 22, 18},
433 {110013, 6, 22, 18},
434 {110250, 8, 49, 30},
435 {110500, 8, 36, 22},
436 {111000, 8, 23, 14},
437 {111264, 8, 150, 91},
438 {111375, 8, 33, 20},
439 {112000, 8, 63, 38},
440 {112500, 8, 25, 15},
441 {113100, 8, 57, 34},
442 {113309, 8, 42, 25},
443 {114000, 8, 27, 16},
444 {115000, 6, 23, 18},
445 {116000, 8, 43, 25},
446 {117000, 8, 26, 15},
447 {117500, 8, 40, 23},
448 {118000, 6, 38, 29},
449 {119000, 8, 30, 17},
450 {119500, 8, 46, 26},
451 {119651, 8, 39, 22},
452 {120000, 8, 32, 18},
453 {121000, 6, 39, 29},
454 {121250, 6, 31, 23},
455 {121750, 6, 23, 17},
456 {122000, 6, 42, 31},
457 {122614, 6, 30, 22},
458 {123000, 6, 41, 30},
459 {123379, 6, 37, 27},
460 {124000, 6, 51, 37},
461 {125000, 6, 25, 18},
462 {125250, 4, 13, 14},
463 {125750, 4, 27, 29},
464 {126000, 6, 21, 15},
465 {127000, 6, 24, 17},
466 {127250, 6, 41, 29},
467 {128000, 6, 27, 19},
468 {129000, 6, 43, 30},
469 {129859, 4, 25, 26},
470 {130000, 6, 26, 18},
471 {130250, 6, 42, 29},
472 {131000, 6, 32, 22},
473 {131500, 6, 38, 26},
474 {131850, 6, 41, 28},
475 {132000, 6, 22, 15},
476 {132750, 6, 28, 19},
477 {133000, 6, 34, 23},
478 {133330, 6, 37, 25},
479 {134000, 6, 61, 41},
480 {135000, 6, 21, 14},
481 {135250, 6, 167, 111},
482 {136000, 6, 62, 41},
483 {137000, 6, 35, 23},
484 {138000, 6, 23, 15},
485 {138500, 6, 40, 26},
486 {138750, 6, 37, 24},
487 {139000, 6, 34, 22},
488 {139050, 6, 34, 22},
489 {139054, 6, 34, 22},
490 {140000, 6, 28, 18},
491 {141000, 6, 36, 23},
492 {141500, 6, 22, 14},
493 {142000, 6, 30, 19},
494 {143000, 6, 27, 17},
495 {143472, 4, 17, 16},
496 {144000, 6, 24, 15},
497 {145000, 6, 29, 18},
498 {146000, 6, 47, 29},
499 {146250, 6, 26, 16},
500 {147000, 6, 49, 30},
501 {147891, 6, 23, 14},
502 {148000, 6, 23, 14},
503 {148250, 6, 28, 17},
504 {148352, 4, 100, 91},
505 {148500, 6, 33, 20},
506 {149000, 6, 48, 29},
507 {150000, 6, 25, 15},
508 {151000, 4, 19, 17},
509 {152000, 6, 27, 16},
510 {152280, 6, 44, 26},
511 {153000, 6, 34, 20},
512 {154000, 6, 53, 31},
513 {155000, 6, 31, 18},
514 {155250, 6, 50, 29},
515 {155750, 6, 45, 26},
516 {156000, 6, 26, 15},
517 {157000, 6, 61, 35},
518 {157500, 6, 28, 16},
519 {158000, 6, 65, 37},
520 {158250, 6, 44, 25},
521 {159000, 6, 53, 30},
522 {159500, 6, 39, 22},
523 {160000, 6, 32, 18},
524 {161000, 4, 31, 26},
525 {162000, 4, 18, 15},
526 {162162, 4, 131, 109},
527 {162500, 4, 53, 44},
528 {163000, 4, 29, 24},
529 {164000, 4, 17, 14},
530 {165000, 4, 22, 18},
531 {166000, 4, 32, 26},
532 {167000, 4, 26, 21},
533 {168000, 4, 46, 37},
534 {169000, 4, 104, 83},
535 {169128, 4, 64, 51},
536 {169500, 4, 39, 31},
537 {170000, 4, 34, 27},
538 {171000, 4, 19, 15},
539 {172000, 4, 51, 40},
540 {172750, 4, 32, 25},
541 {172800, 4, 32, 25},
542 {173000, 4, 41, 32},
543 {174000, 4, 49, 38},
544 {174787, 4, 22, 17},
545 {175000, 4, 35, 27},
546 {176000, 4, 30, 23},
547 {177000, 4, 38, 29},
548 {178000, 4, 29, 22},
549 {178500, 4, 37, 28},
550 {179000, 4, 53, 40},
551 {179500, 4, 73, 55},
552 {180000, 4, 20, 15},
553 {181000, 4, 55, 41},
554 {182000, 4, 31, 23},
555 {183000, 4, 42, 31},
556 {184000, 4, 30, 22},
557 {184750, 4, 26, 19},
558 {185000, 4, 37, 27},
559 {186000, 4, 51, 37},
560 {187000, 4, 36, 26},
561 {188000, 4, 32, 23},
562 {189000, 4, 21, 15},
563 {190000, 4, 38, 27},
564 {190960, 4, 41, 29},
565 {191000, 4, 41, 29},
566 {192000, 4, 27, 19},
567 {192250, 4, 37, 26},
568 {193000, 4, 20, 14},
569 {193250, 4, 53, 37},
570 {194000, 4, 23, 16},
571 {194208, 4, 23, 16},
572 {195000, 4, 26, 18},
573 {196000, 4, 45, 31},
574 {197000, 4, 35, 24},
575 {197750, 4, 41, 28},
576 {198000, 4, 22, 15},
577 {198500, 4, 25, 17},
578 {199000, 4, 28, 19},
579 {200000, 4, 37, 25},
580 {201000, 4, 61, 41},
581 {202000, 4, 112, 75},
582 {202500, 4, 21, 14},
583 {203000, 4, 146, 97},
584 {204000, 4, 62, 41},
585 {204750, 4, 44, 29},
586 {205000, 4, 38, 25},
587 {206000, 4, 29, 19},
588 {207000, 4, 23, 15},
589 {207500, 4, 40, 26},
590 {208000, 4, 37, 24},
591 {208900, 4, 48, 31},
592 {209000, 4, 48, 31},
593 {209250, 4, 31, 20},
594 {210000, 4, 28, 18},
595 {211000, 4, 25, 16},
596 {212000, 4, 22, 14},
597 {213000, 4, 30, 19},
598 {213750, 4, 38, 24},
599 {214000, 4, 46, 29},
600 {214750, 4, 35, 22},
601 {215000, 4, 43, 27},
602 {216000, 4, 24, 15},
603 {217000, 4, 37, 23},
604 {218000, 4, 42, 26},
605 {218250, 4, 42, 26},
606 {218750, 4, 34, 21},
607 {219000, 4, 47, 29},
608 {219000, 4, 47, 29},
609 {220000, 4, 44, 27},
610 {220640, 4, 49, 30},
611 {220750, 4, 36, 22},
612 {221000, 4, 36, 22},
613 {222000, 4, 23, 14},
614 {222525, 4, 28, 17},
615 {222750, 4, 33, 20},
616 {227000, 4, 37, 22},
617 {230250, 4, 29, 17},
618 {233500, 4, 38, 22},
619 {235000, 4, 40, 23},
620 {238000, 4, 30, 17},
621 {241500, 2, 17, 19},
622 {245250, 2, 20, 22},
623 {247750, 2, 22, 24},
624 {253250, 2, 15, 16},
625 {256250, 2, 18, 19},
626 {262500, 2, 31, 32},
627 {267250, 2, 66, 67},
628 {268500, 2, 94, 95},
629 {270000, 2, 14, 14},
630 {272500, 2, 77, 76},
631 {273750, 2, 57, 56},
632 {280750, 2, 24, 23},
633 {281250, 2, 23, 22},
634 {286000, 2, 17, 16},
635 {291750, 2, 26, 24},
636 {296703, 2, 56, 51},
637 {297000, 2, 22, 20},
638 {298000, 2, 21, 19},
639};
640
641void intel_ddi_mode_set(struct drm_encoder *encoder,
642 struct drm_display_mode *mode,
643 struct drm_display_mode *adjusted_mode)
644{
645 struct drm_device *dev = encoder->dev;
646 struct drm_i915_private *dev_priv = dev->dev_private;
647 struct drm_crtc *crtc = encoder->crtc;
648 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
649 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
650 int port = intel_hdmi->ddi_port;
651 int pipe = intel_crtc->pipe;
652 int p, n2, r2, valid=0;
653 u32 temp, i;
654
655 /* On Haswell, we need to enable the clocks and prepare DDI function to
656 * work in HDMI mode for this pipe.
657 */
658 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
659
660 for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
661 if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
662 p = wrpll_tmds_clock_table[i].p;
663 n2 = wrpll_tmds_clock_table[i].n2;
664 r2 = wrpll_tmds_clock_table[i].r2;
665
666 DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
667 crtc->mode.clock,
668 p, n2, r2);
669
670 valid = 1;
671 break;
672 }
673 }
674
675 if (!valid) {
676 DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
677 crtc->mode.clock);
678 return;
679 }
680
681 /* Enable LCPLL if disabled */
682 temp = I915_READ(LCPLL_CTL);
683 if (temp & LCPLL_PLL_DISABLE)
684 I915_WRITE(LCPLL_CTL,
685 temp & ~LCPLL_PLL_DISABLE);
686
687 /* Configure WR PLL 1, program the correct divider values for
688 * the desired frequency and wait for warmup */
689 I915_WRITE(WRPLL_CTL1,
690 WRPLL_PLL_ENABLE |
691 WRPLL_PLL_SELECT_LCPLL_2700 |
692 WRPLL_DIVIDER_REFERENCE(r2) |
693 WRPLL_DIVIDER_FEEDBACK(n2) |
694 WRPLL_DIVIDER_POST(p));
695
696 udelay(20);
697
698 /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
699 * this port for connection.
700 */
701 I915_WRITE(PORT_CLK_SEL(port),
702 PORT_CLK_SEL_WRPLL1);
703 I915_WRITE(PIPE_CLK_SEL(pipe),
704 PIPE_CLK_SEL_PORT(port));
705
706 udelay(20);
707
708 if (intel_hdmi->has_audio) {
709 /* Proper support for digital audio needs a new logic and a new set
710 * of registers, so we leave it for future patch bombing.
711 */
712 DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
713 pipe_name(intel_crtc->pipe));
714 }
715
716 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
717 temp = I915_READ(DDI_FUNC_CTL(pipe));
718 temp &= ~PIPE_DDI_PORT_MASK;
719 temp &= ~PIPE_DDI_BPC_12;
720 temp |= PIPE_DDI_SELECT_PORT(port) |
721 PIPE_DDI_MODE_SELECT_HDMI |
722 ((intel_crtc->bpp > 24) ?
723 PIPE_DDI_BPC_12 :
724 PIPE_DDI_BPC_8) |
725 PIPE_DDI_FUNC_ENABLE;
726
727 I915_WRITE(DDI_FUNC_CTL(pipe), temp);
728
729 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
730 intel_hdmi_set_spd_infoframe(encoder);
731}
732
733void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
734{
735 struct drm_device *dev = encoder->dev;
736 struct drm_i915_private *dev_priv = dev->dev_private;
737 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
738 int port = intel_hdmi->ddi_port;
739 u32 temp;
740
741 temp = I915_READ(DDI_BUF_CTL(port));
742
743 if (mode != DRM_MODE_DPMS_ON) {
744 temp &= ~DDI_BUF_CTL_ENABLE;
745 } else {
746 temp |= DDI_BUF_CTL_ENABLE;
747 }
748
749 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
750 * and swing/emphasis values are ignored so nothing special needs
751 * to be done besides enabling the port.
752 */
753 I915_WRITE(DDI_BUF_CTL(port),
754 temp);
755}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1cf3b3ff51..ee61ad1e642b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/cpufreq.h> 27#include <linux/dmi.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/input.h> 29#include <linux/input.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -44,7 +44,6 @@
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45 45
46bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47static void intel_update_watermarks(struct drm_device *dev);
48static void intel_increase_pllclock(struct drm_crtc *crtc); 47static void intel_increase_pllclock(struct drm_crtc *crtc);
49static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50 49
@@ -360,6 +359,88 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
360 .find_pll = intel_find_pll_ironlake_dp, 359 .find_pll = intel_find_pll_ironlake_dp,
361}; 360};
362 361
362u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
363{
364 unsigned long flags;
365 u32 val = 0;
366
367 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
368 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
369 DRM_ERROR("DPIO idle wait timed out\n");
370 goto out_unlock;
371 }
372
373 I915_WRITE(DPIO_REG, reg);
374 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
375 DPIO_BYTE);
376 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
377 DRM_ERROR("DPIO read wait timed out\n");
378 goto out_unlock;
379 }
380 val = I915_READ(DPIO_DATA);
381
382out_unlock:
383 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
384 return val;
385}
386
387static void vlv_init_dpio(struct drm_device *dev)
388{
389 struct drm_i915_private *dev_priv = dev->dev_private;
390
391 /* Reset the DPIO config */
392 I915_WRITE(DPIO_CTL, 0);
393 POSTING_READ(DPIO_CTL);
394 I915_WRITE(DPIO_CTL, 1);
395 POSTING_READ(DPIO_CTL);
396}
397
398static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
399{
400 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
401 return 1;
402}
403
404static const struct dmi_system_id intel_dual_link_lvds[] = {
405 {
406 .callback = intel_dual_link_lvds_callback,
407 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
408 .matches = {
409 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
410 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
411 },
412 },
413 { } /* terminating entry */
414};
415
416static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
417 unsigned int reg)
418{
419 unsigned int val;
420
421 /* use the module option value if specified */
422 if (i915_lvds_channel_mode > 0)
423 return i915_lvds_channel_mode == 2;
424
425 if (dmi_check_system(intel_dual_link_lvds))
426 return true;
427
428 if (dev_priv->lvds_val)
429 val = dev_priv->lvds_val;
430 else {
431 /* BIOS should set the proper LVDS register value at boot, but
432 * in reality, it doesn't set the value when the lid is closed;
433 * we need to check "the value to be set" in VBT when LVDS
434 * register is uninitialized.
435 */
436 val = I915_READ(reg);
437 if (!(val & ~LVDS_DETECTED))
438 val = dev_priv->bios_lvds_val;
439 dev_priv->lvds_val = val;
440 }
441 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
442}
443
363static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 444static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
364 int refclk) 445 int refclk)
365{ 446{
@@ -368,8 +449,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
368 const intel_limit_t *limit; 449 const intel_limit_t *limit;
369 450
370 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 451 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
371 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 452 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
372 LVDS_CLKB_POWER_UP) {
373 /* LVDS dual channel */ 453 /* LVDS dual channel */
374 if (refclk == 100000) 454 if (refclk == 100000)
375 limit = &intel_limits_ironlake_dual_lvds_100m; 455 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +477,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
397 const intel_limit_t *limit; 477 const intel_limit_t *limit;
398 478
399 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 479 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
400 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 480 if (is_dual_link_lvds(dev_priv, LVDS))
401 LVDS_CLKB_POWER_UP)
402 /* LVDS with dual channel */ 481 /* LVDS with dual channel */
403 limit = &intel_limits_g4x_dual_channel_lvds; 482 limit = &intel_limits_g4x_dual_channel_lvds;
404 else 483 else
@@ -536,8 +615,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
536 * reliably set up different single/dual channel state, if we 615 * reliably set up different single/dual channel state, if we
537 * even can. 616 * even can.
538 */ 617 */
539 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 618 if (is_dual_link_lvds(dev_priv, LVDS))
540 LVDS_CLKB_POWER_UP)
541 clock.p2 = limit->p2.p2_fast; 619 clock.p2 = limit->p2.p2_fast;
542 else 620 else
543 clock.p2 = limit->p2.p2_slow; 621 clock.p2 = limit->p2.p2_slow;
@@ -706,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
706 return true; 784 return true;
707} 785}
708 786
787static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
788{
789 struct drm_i915_private *dev_priv = dev->dev_private;
790 u32 frame, frame_reg = PIPEFRAME(pipe);
791
792 frame = I915_READ(frame_reg);
793
794 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
795 DRM_DEBUG_KMS("vblank wait timed out\n");
796}
797
709/** 798/**
710 * intel_wait_for_vblank - wait for vblank on a given pipe 799 * intel_wait_for_vblank - wait for vblank on a given pipe
711 * @dev: drm device 800 * @dev: drm device
@@ -719,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
719 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = dev->dev_private;
720 int pipestat_reg = PIPESTAT(pipe); 809 int pipestat_reg = PIPESTAT(pipe);
721 810
811 if (INTEL_INFO(dev)->gen >= 5) {
812 ironlake_wait_for_vblank(dev, pipe);
813 return;
814 }
815
722 /* Clear existing vblank status. Note this will clear any other 816 /* Clear existing vblank status. Note this will clear any other
723 * sticky status fields as well. 817 * sticky status fields as well.
724 * 818 *
@@ -771,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
771 100)) 865 100))
772 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 866 DRM_DEBUG_KMS("pipe_off wait timed out\n");
773 } else { 867 } else {
774 u32 last_line; 868 u32 last_line, line_mask;
775 int reg = PIPEDSL(pipe); 869 int reg = PIPEDSL(pipe);
776 unsigned long timeout = jiffies + msecs_to_jiffies(100); 870 unsigned long timeout = jiffies + msecs_to_jiffies(100);
777 871
872 if (IS_GEN2(dev))
873 line_mask = DSL_LINEMASK_GEN2;
874 else
875 line_mask = DSL_LINEMASK_GEN3;
876
778 /* Wait for the display line to settle */ 877 /* Wait for the display line to settle */
779 do { 878 do {
780 last_line = I915_READ(reg) & DSL_LINEMASK; 879 last_line = I915_READ(reg) & line_mask;
781 mdelay(5); 880 mdelay(5);
782 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && 881 } while (((I915_READ(reg) & line_mask) != last_line) &&
783 time_after(timeout, jiffies)); 882 time_after(timeout, jiffies));
784 if (time_after(jiffies, timeout)) 883 if (time_after(jiffies, timeout))
785 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 884 DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -811,26 +910,33 @@ static void assert_pll(struct drm_i915_private *dev_priv,
811 910
812/* For ILK+ */ 911/* For ILK+ */
813static void assert_pch_pll(struct drm_i915_private *dev_priv, 912static void assert_pch_pll(struct drm_i915_private *dev_priv,
814 enum pipe pipe, bool state) 913 struct intel_crtc *intel_crtc, bool state)
815{ 914{
816 int reg; 915 int reg;
817 u32 val; 916 u32 val;
818 bool cur_state; 917 bool cur_state;
819 918
919 if (HAS_PCH_LPT(dev_priv->dev)) {
920 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
921 return;
922 }
923
924 if (!intel_crtc->pch_pll) {
925 WARN(1, "asserting PCH PLL enabled with no PLL\n");
926 return;
927 }
928
820 if (HAS_PCH_CPT(dev_priv->dev)) { 929 if (HAS_PCH_CPT(dev_priv->dev)) {
821 u32 pch_dpll; 930 u32 pch_dpll;
822 931
823 pch_dpll = I915_READ(PCH_DPLL_SEL); 932 pch_dpll = I915_READ(PCH_DPLL_SEL);
824 933
825 /* Make sure the selected PLL is enabled to the transcoder */ 934 /* Make sure the selected PLL is enabled to the transcoder */
826 WARN(!((pch_dpll >> (4 * pipe)) & 8), 935 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
827 "transcoder %d PLL not enabled\n", pipe); 936 "transcoder %d PLL not enabled\n", intel_crtc->pipe);
828
829 /* Convert the transcoder pipe number to a pll pipe number */
830 pipe = (pch_dpll >> (4 * pipe)) & 1;
831 } 937 }
832 938
833 reg = PCH_DPLL(pipe); 939 reg = intel_crtc->pch_pll->pll_reg;
834 val = I915_READ(reg); 940 val = I915_READ(reg);
835 cur_state = !!(val & DPLL_VCO_ENABLE); 941 cur_state = !!(val & DPLL_VCO_ENABLE);
836 WARN(cur_state != state, 942 WARN(cur_state != state,
@@ -847,9 +953,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847 u32 val; 953 u32 val;
848 bool cur_state; 954 bool cur_state;
849 955
850 reg = FDI_TX_CTL(pipe); 956 if (IS_HASWELL(dev_priv->dev)) {
851 val = I915_READ(reg); 957 /* On Haswell, DDI is used instead of FDI_TX_CTL */
852 cur_state = !!(val & FDI_TX_ENABLE); 958 reg = DDI_FUNC_CTL(pipe);
959 val = I915_READ(reg);
960 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
961 } else {
962 reg = FDI_TX_CTL(pipe);
963 val = I915_READ(reg);
964 cur_state = !!(val & FDI_TX_ENABLE);
965 }
853 WARN(cur_state != state, 966 WARN(cur_state != state,
854 "FDI TX state assertion failure (expected %s, current %s)\n", 967 "FDI TX state assertion failure (expected %s, current %s)\n",
855 state_string(state), state_string(cur_state)); 968 state_string(state), state_string(cur_state));
@@ -864,9 +977,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864 u32 val; 977 u32 val;
865 bool cur_state; 978 bool cur_state;
866 979
867 reg = FDI_RX_CTL(pipe); 980 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
868 val = I915_READ(reg); 981 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
869 cur_state = !!(val & FDI_RX_ENABLE); 982 return;
983 } else {
984 reg = FDI_RX_CTL(pipe);
985 val = I915_READ(reg);
986 cur_state = !!(val & FDI_RX_ENABLE);
987 }
870 WARN(cur_state != state, 988 WARN(cur_state != state,
871 "FDI RX state assertion failure (expected %s, current %s)\n", 989 "FDI RX state assertion failure (expected %s, current %s)\n",
872 state_string(state), state_string(cur_state)); 990 state_string(state), state_string(cur_state));
@@ -884,6 +1002,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
884 if (dev_priv->info->gen == 5) 1002 if (dev_priv->info->gen == 5)
885 return; 1003 return;
886 1004
1005 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1006 if (IS_HASWELL(dev_priv->dev))
1007 return;
1008
887 reg = FDI_TX_CTL(pipe); 1009 reg = FDI_TX_CTL(pipe);
888 val = I915_READ(reg); 1010 val = I915_READ(reg);
889 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1011 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -895,6 +1017,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
895 int reg; 1017 int reg;
896 u32 val; 1018 u32 val;
897 1019
1020 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1021 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1022 return;
1023 }
898 reg = FDI_RX_CTL(pipe); 1024 reg = FDI_RX_CTL(pipe);
899 val = I915_READ(reg); 1025 val = I915_READ(reg);
900 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1026 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1000,6 +1126,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1000 u32 val; 1126 u32 val;
1001 bool enabled; 1127 bool enabled;
1002 1128
1129 if (HAS_PCH_LPT(dev_priv->dev)) {
1130 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1131 return;
1132 }
1133
1003 val = I915_READ(PCH_DREF_CONTROL); 1134 val = I915_READ(PCH_DREF_CONTROL);
1004 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1135 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1005 DREF_SUPERSPREAD_SOURCE_MASK)); 1136 DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1198,6 +1329,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1198 POSTING_READ(reg); 1329 POSTING_READ(reg);
1199} 1330}
1200 1331
1332/* SBI access */
1333static void
1334intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1335{
1336 unsigned long flags;
1337
1338 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1339 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1340 100)) {
1341 DRM_ERROR("timeout waiting for SBI to become ready\n");
1342 goto out_unlock;
1343 }
1344
1345 I915_WRITE(SBI_ADDR,
1346 (reg << 16));
1347 I915_WRITE(SBI_DATA,
1348 value);
1349 I915_WRITE(SBI_CTL_STAT,
1350 SBI_BUSY |
1351 SBI_CTL_OP_CRWR);
1352
1353 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1354 100)) {
1355 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1356 goto out_unlock;
1357 }
1358
1359out_unlock:
1360 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1361}
1362
1363static u32
1364intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1365{
1366 unsigned long flags;
1367 u32 value;
1368
1369 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1370 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1371 100)) {
1372 DRM_ERROR("timeout waiting for SBI to become ready\n");
1373 goto out_unlock;
1374 }
1375
1376 I915_WRITE(SBI_ADDR,
1377 (reg << 16));
1378 I915_WRITE(SBI_CTL_STAT,
1379 SBI_BUSY |
1380 SBI_CTL_OP_CRRD);
1381
1382 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1383 100)) {
1384 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1385 goto out_unlock;
1386 }
1387
1388 value = I915_READ(SBI_DATA);
1389
1390out_unlock:
1391 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1392 return value;
1393}
1394
1201/** 1395/**
1202 * intel_enable_pch_pll - enable PCH PLL 1396 * intel_enable_pch_pll - enable PCH PLL
1203 * @dev_priv: i915 private structure 1397 * @dev_priv: i915 private structure
@@ -1206,60 +1400,88 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1206 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1400 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1207 * drives the transcoder clock. 1401 * drives the transcoder clock.
1208 */ 1402 */
1209static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1403static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1210 enum pipe pipe)
1211{ 1404{
1405 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1406 struct intel_pch_pll *pll;
1212 int reg; 1407 int reg;
1213 u32 val; 1408 u32 val;
1214 1409
1215 if (pipe > 1) 1410 /* PCH PLLs only available on ILK, SNB and IVB */
1411 BUG_ON(dev_priv->info->gen < 5);
1412 pll = intel_crtc->pch_pll;
1413 if (pll == NULL)
1216 return; 1414 return;
1217 1415
1218 /* PCH only available on ILK+ */ 1416 if (WARN_ON(pll->refcount == 0))
1219 BUG_ON(dev_priv->info->gen < 5); 1417 return;
1418
1419 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1420 pll->pll_reg, pll->active, pll->on,
1421 intel_crtc->base.base.id);
1220 1422
1221 /* PCH refclock must be enabled first */ 1423 /* PCH refclock must be enabled first */
1222 assert_pch_refclk_enabled(dev_priv); 1424 assert_pch_refclk_enabled(dev_priv);
1223 1425
1224 reg = PCH_DPLL(pipe); 1426 if (pll->active++ && pll->on) {
1427 assert_pch_pll_enabled(dev_priv, intel_crtc);
1428 return;
1429 }
1430
1431 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1432
1433 reg = pll->pll_reg;
1225 val = I915_READ(reg); 1434 val = I915_READ(reg);
1226 val |= DPLL_VCO_ENABLE; 1435 val |= DPLL_VCO_ENABLE;
1227 I915_WRITE(reg, val); 1436 I915_WRITE(reg, val);
1228 POSTING_READ(reg); 1437 POSTING_READ(reg);
1229 udelay(200); 1438 udelay(200);
1439
1440 pll->on = true;
1230} 1441}
1231 1442
1232static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1443static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1233 enum pipe pipe)
1234{ 1444{
1445 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1446 struct intel_pch_pll *pll = intel_crtc->pch_pll;
1235 int reg; 1447 int reg;
1236 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, 1448 u32 val;
1237 pll_sel = TRANSC_DPLL_ENABLE;
1238
1239 if (pipe > 1)
1240 return;
1241 1449
1242 /* PCH only available on ILK+ */ 1450 /* PCH only available on ILK+ */
1243 BUG_ON(dev_priv->info->gen < 5); 1451 BUG_ON(dev_priv->info->gen < 5);
1452 if (pll == NULL)
1453 return;
1244 1454
1245 /* Make sure transcoder isn't still depending on us */ 1455 if (WARN_ON(pll->refcount == 0))
1246 assert_transcoder_disabled(dev_priv, pipe); 1456 return;
1247 1457
1248 if (pipe == 0) 1458 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1249 pll_sel |= TRANSC_DPLLA_SEL; 1459 pll->pll_reg, pll->active, pll->on,
1250 else if (pipe == 1) 1460 intel_crtc->base.base.id);
1251 pll_sel |= TRANSC_DPLLB_SEL;
1252 1461
1462 if (WARN_ON(pll->active == 0)) {
1463 assert_pch_pll_disabled(dev_priv, intel_crtc);
1464 return;
1465 }
1253 1466
1254 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) 1467 if (--pll->active) {
1468 assert_pch_pll_enabled(dev_priv, intel_crtc);
1255 return; 1469 return;
1470 }
1471
1472 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1256 1473
1257 reg = PCH_DPLL(pipe); 1474 /* Make sure transcoder isn't still depending on us */
1475 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1476
1477 reg = pll->pll_reg;
1258 val = I915_READ(reg); 1478 val = I915_READ(reg);
1259 val &= ~DPLL_VCO_ENABLE; 1479 val &= ~DPLL_VCO_ENABLE;
1260 I915_WRITE(reg, val); 1480 I915_WRITE(reg, val);
1261 POSTING_READ(reg); 1481 POSTING_READ(reg);
1262 udelay(200); 1482 udelay(200);
1483
1484 pll->on = false;
1263} 1485}
1264 1486
1265static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1487static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1273,12 +1495,16 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1273 BUG_ON(dev_priv->info->gen < 5); 1495 BUG_ON(dev_priv->info->gen < 5);
1274 1496
1275 /* Make sure PCH DPLL is enabled */ 1497 /* Make sure PCH DPLL is enabled */
1276 assert_pch_pll_enabled(dev_priv, pipe); 1498 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
1277 1499
1278 /* FDI must be feeding us bits for PCH ports */ 1500 /* FDI must be feeding us bits for PCH ports */
1279 assert_fdi_tx_enabled(dev_priv, pipe); 1501 assert_fdi_tx_enabled(dev_priv, pipe);
1280 assert_fdi_rx_enabled(dev_priv, pipe); 1502 assert_fdi_rx_enabled(dev_priv, pipe);
1281 1503
1504 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1505 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1506 return;
1507 }
1282 reg = TRANSCONF(pipe); 1508 reg = TRANSCONF(pipe);
1283 val = I915_READ(reg); 1509 val = I915_READ(reg);
1284 pipeconf_val = I915_READ(PIPECONF(pipe)); 1510 pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1415,7 +1641,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1415 * Plane regs are double buffered, going from enabled->disabled needs a 1641 * Plane regs are double buffered, going from enabled->disabled needs a
1416 * trigger in order to latch. The display address reg provides this. 1642 * trigger in order to latch. The display address reg provides this.
1417 */ 1643 */
1418static void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1644void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1419 enum plane plane) 1645 enum plane plane)
1420{ 1646{
1421 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1647 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1526,490 +1752,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1526 disable_pch_hdmi(dev_priv, pipe, HDMID); 1752 disable_pch_hdmi(dev_priv, pipe, HDMID);
1527} 1753}
1528 1754
1529static void i8xx_disable_fbc(struct drm_device *dev)
1530{
1531 struct drm_i915_private *dev_priv = dev->dev_private;
1532 u32 fbc_ctl;
1533
1534 /* Disable compression */
1535 fbc_ctl = I915_READ(FBC_CONTROL);
1536 if ((fbc_ctl & FBC_CTL_EN) == 0)
1537 return;
1538
1539 fbc_ctl &= ~FBC_CTL_EN;
1540 I915_WRITE(FBC_CONTROL, fbc_ctl);
1541
1542 /* Wait for compressing bit to clear */
1543 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1544 DRM_DEBUG_KMS("FBC idle timed out\n");
1545 return;
1546 }
1547
1548 DRM_DEBUG_KMS("disabled FBC\n");
1549}
1550
1551static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1552{
1553 struct drm_device *dev = crtc->dev;
1554 struct drm_i915_private *dev_priv = dev->dev_private;
1555 struct drm_framebuffer *fb = crtc->fb;
1556 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1557 struct drm_i915_gem_object *obj = intel_fb->obj;
1558 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1559 int cfb_pitch;
1560 int plane, i;
1561 u32 fbc_ctl, fbc_ctl2;
1562
1563 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1564 if (fb->pitches[0] < cfb_pitch)
1565 cfb_pitch = fb->pitches[0];
1566
1567 /* FBC_CTL wants 64B units */
1568 cfb_pitch = (cfb_pitch / 64) - 1;
1569 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1570
1571 /* Clear old tags */
1572 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1573 I915_WRITE(FBC_TAG + (i * 4), 0);
1574
1575 /* Set it up... */
1576 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1577 fbc_ctl2 |= plane;
1578 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1579 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1580
1581 /* enable it... */
1582 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1583 if (IS_I945GM(dev))
1584 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1585 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1586 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1587 fbc_ctl |= obj->fence_reg;
1588 I915_WRITE(FBC_CONTROL, fbc_ctl);
1589
1590 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1591 cfb_pitch, crtc->y, intel_crtc->plane);
1592}
1593
1594static bool i8xx_fbc_enabled(struct drm_device *dev)
1595{
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597
1598 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1599}
1600
1601static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1602{
1603 struct drm_device *dev = crtc->dev;
1604 struct drm_i915_private *dev_priv = dev->dev_private;
1605 struct drm_framebuffer *fb = crtc->fb;
1606 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1607 struct drm_i915_gem_object *obj = intel_fb->obj;
1608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1609 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1610 unsigned long stall_watermark = 200;
1611 u32 dpfc_ctl;
1612
1613 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1614 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1615 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1616
1617 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1618 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1619 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1620 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1621
1622 /* enable it... */
1623 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1624
1625 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1626}
1627
1628static void g4x_disable_fbc(struct drm_device *dev)
1629{
1630 struct drm_i915_private *dev_priv = dev->dev_private;
1631 u32 dpfc_ctl;
1632
1633 /* Disable compression */
1634 dpfc_ctl = I915_READ(DPFC_CONTROL);
1635 if (dpfc_ctl & DPFC_CTL_EN) {
1636 dpfc_ctl &= ~DPFC_CTL_EN;
1637 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1638
1639 DRM_DEBUG_KMS("disabled FBC\n");
1640 }
1641}
1642
1643static bool g4x_fbc_enabled(struct drm_device *dev)
1644{
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1646
1647 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1648}
1649
1650static void sandybridge_blit_fbc_update(struct drm_device *dev)
1651{
1652 struct drm_i915_private *dev_priv = dev->dev_private;
1653 u32 blt_ecoskpd;
1654
1655 /* Make sure blitter notifies FBC of writes */
1656 gen6_gt_force_wake_get(dev_priv);
1657 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1658 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1659 GEN6_BLITTER_LOCK_SHIFT;
1660 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1661 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1662 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1663 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1664 GEN6_BLITTER_LOCK_SHIFT);
1665 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1666 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1667 gen6_gt_force_wake_put(dev_priv);
1668}
1669
1670static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1671{
1672 struct drm_device *dev = crtc->dev;
1673 struct drm_i915_private *dev_priv = dev->dev_private;
1674 struct drm_framebuffer *fb = crtc->fb;
1675 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1676 struct drm_i915_gem_object *obj = intel_fb->obj;
1677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1678 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1679 unsigned long stall_watermark = 200;
1680 u32 dpfc_ctl;
1681
1682 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1683 dpfc_ctl &= DPFC_RESERVED;
1684 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1685 /* Set persistent mode for front-buffer rendering, ala X. */
1686 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1687 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1688 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1689
1690 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1691 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1692 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1693 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1694 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1695 /* enable it... */
1696 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1697
1698 if (IS_GEN6(dev)) {
1699 I915_WRITE(SNB_DPFC_CTL_SA,
1700 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1701 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1702 sandybridge_blit_fbc_update(dev);
1703 }
1704
1705 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1706}
1707
1708static void ironlake_disable_fbc(struct drm_device *dev)
1709{
1710 struct drm_i915_private *dev_priv = dev->dev_private;
1711 u32 dpfc_ctl;
1712
1713 /* Disable compression */
1714 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1715 if (dpfc_ctl & DPFC_CTL_EN) {
1716 dpfc_ctl &= ~DPFC_CTL_EN;
1717 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1718
1719 DRM_DEBUG_KMS("disabled FBC\n");
1720 }
1721}
1722
1723static bool ironlake_fbc_enabled(struct drm_device *dev)
1724{
1725 struct drm_i915_private *dev_priv = dev->dev_private;
1726
1727 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1728}
1729
1730bool intel_fbc_enabled(struct drm_device *dev)
1731{
1732 struct drm_i915_private *dev_priv = dev->dev_private;
1733
1734 if (!dev_priv->display.fbc_enabled)
1735 return false;
1736
1737 return dev_priv->display.fbc_enabled(dev);
1738}
1739
1740static void intel_fbc_work_fn(struct work_struct *__work)
1741{
1742 struct intel_fbc_work *work =
1743 container_of(to_delayed_work(__work),
1744 struct intel_fbc_work, work);
1745 struct drm_device *dev = work->crtc->dev;
1746 struct drm_i915_private *dev_priv = dev->dev_private;
1747
1748 mutex_lock(&dev->struct_mutex);
1749 if (work == dev_priv->fbc_work) {
1750 /* Double check that we haven't switched fb without cancelling
1751 * the prior work.
1752 */
1753 if (work->crtc->fb == work->fb) {
1754 dev_priv->display.enable_fbc(work->crtc,
1755 work->interval);
1756
1757 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1758 dev_priv->cfb_fb = work->crtc->fb->base.id;
1759 dev_priv->cfb_y = work->crtc->y;
1760 }
1761
1762 dev_priv->fbc_work = NULL;
1763 }
1764 mutex_unlock(&dev->struct_mutex);
1765
1766 kfree(work);
1767}
1768
1769static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1770{
1771 if (dev_priv->fbc_work == NULL)
1772 return;
1773
1774 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1775
1776 /* Synchronisation is provided by struct_mutex and checking of
1777 * dev_priv->fbc_work, so we can perform the cancellation
1778 * entirely asynchronously.
1779 */
1780 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1781 /* tasklet was killed before being run, clean up */
1782 kfree(dev_priv->fbc_work);
1783
1784 /* Mark the work as no longer wanted so that if it does
1785 * wake-up (because the work was already running and waiting
1786 * for our mutex), it will discover that is no longer
1787 * necessary to run.
1788 */
1789 dev_priv->fbc_work = NULL;
1790}
1791
1792static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1793{
1794 struct intel_fbc_work *work;
1795 struct drm_device *dev = crtc->dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797
1798 if (!dev_priv->display.enable_fbc)
1799 return;
1800
1801 intel_cancel_fbc_work(dev_priv);
1802
1803 work = kzalloc(sizeof *work, GFP_KERNEL);
1804 if (work == NULL) {
1805 dev_priv->display.enable_fbc(crtc, interval);
1806 return;
1807 }
1808
1809 work->crtc = crtc;
1810 work->fb = crtc->fb;
1811 work->interval = interval;
1812 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1813
1814 dev_priv->fbc_work = work;
1815
1816 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1817
1818 /* Delay the actual enabling to let pageflipping cease and the
1819 * display to settle before starting the compression. Note that
1820 * this delay also serves a second purpose: it allows for a
1821 * vblank to pass after disabling the FBC before we attempt
1822 * to modify the control registers.
1823 *
1824 * A more complicated solution would involve tracking vblanks
1825 * following the termination of the page-flipping sequence
1826 * and indeed performing the enable as a co-routine and not
1827 * waiting synchronously upon the vblank.
1828 */
1829 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1830}
1831
1832void intel_disable_fbc(struct drm_device *dev)
1833{
1834 struct drm_i915_private *dev_priv = dev->dev_private;
1835
1836 intel_cancel_fbc_work(dev_priv);
1837
1838 if (!dev_priv->display.disable_fbc)
1839 return;
1840
1841 dev_priv->display.disable_fbc(dev);
1842 dev_priv->cfb_plane = -1;
1843}
1844
1845/**
1846 * intel_update_fbc - enable/disable FBC as needed
1847 * @dev: the drm_device
1848 *
1849 * Set up the framebuffer compression hardware at mode set time. We
1850 * enable it if possible:
1851 * - plane A only (on pre-965)
1852 * - no pixel mulitply/line duplication
1853 * - no alpha buffer discard
1854 * - no dual wide
1855 * - framebuffer <= 2048 in width, 1536 in height
1856 *
1857 * We can't assume that any compression will take place (worst case),
1858 * so the compressed buffer has to be the same size as the uncompressed
1859 * one. It also must reside (along with the line length buffer) in
1860 * stolen memory.
1861 *
1862 * We need to enable/disable FBC on a global basis.
1863 */
1864static void intel_update_fbc(struct drm_device *dev)
1865{
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1867 struct drm_crtc *crtc = NULL, *tmp_crtc;
1868 struct intel_crtc *intel_crtc;
1869 struct drm_framebuffer *fb;
1870 struct intel_framebuffer *intel_fb;
1871 struct drm_i915_gem_object *obj;
1872 int enable_fbc;
1873
1874 DRM_DEBUG_KMS("\n");
1875
1876 if (!i915_powersave)
1877 return;
1878
1879 if (!I915_HAS_FBC(dev))
1880 return;
1881
1882 /*
1883 * If FBC is already on, we just have to verify that we can
1884 * keep it that way...
1885 * Need to disable if:
1886 * - more than one pipe is active
1887 * - changing FBC params (stride, fence, mode)
1888 * - new fb is too large to fit in compressed buffer
1889 * - going to an unsupported config (interlace, pixel multiply, etc.)
1890 */
1891 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1892 if (tmp_crtc->enabled && tmp_crtc->fb) {
1893 if (crtc) {
1894 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1895 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1896 goto out_disable;
1897 }
1898 crtc = tmp_crtc;
1899 }
1900 }
1901
1902 if (!crtc || crtc->fb == NULL) {
1903 DRM_DEBUG_KMS("no output, disabling\n");
1904 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1905 goto out_disable;
1906 }
1907
1908 intel_crtc = to_intel_crtc(crtc);
1909 fb = crtc->fb;
1910 intel_fb = to_intel_framebuffer(fb);
1911 obj = intel_fb->obj;
1912
1913 enable_fbc = i915_enable_fbc;
1914 if (enable_fbc < 0) {
1915 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1916 enable_fbc = 1;
1917 if (INTEL_INFO(dev)->gen <= 6)
1918 enable_fbc = 0;
1919 }
1920 if (!enable_fbc) {
1921 DRM_DEBUG_KMS("fbc disabled per module param\n");
1922 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1923 goto out_disable;
1924 }
1925 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1926 DRM_DEBUG_KMS("framebuffer too large, disabling "
1927 "compression\n");
1928 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1929 goto out_disable;
1930 }
1931 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1932 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1933 DRM_DEBUG_KMS("mode incompatible with compression, "
1934 "disabling\n");
1935 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1936 goto out_disable;
1937 }
1938 if ((crtc->mode.hdisplay > 2048) ||
1939 (crtc->mode.vdisplay > 1536)) {
1940 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1941 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1942 goto out_disable;
1943 }
1944 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1945 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1946 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1947 goto out_disable;
1948 }
1949
1950 /* The use of a CPU fence is mandatory in order to detect writes
1951 * by the CPU to the scanout and trigger updates to the FBC.
1952 */
1953 if (obj->tiling_mode != I915_TILING_X ||
1954 obj->fence_reg == I915_FENCE_REG_NONE) {
1955 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1956 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1957 goto out_disable;
1958 }
1959
1960 /* If the kernel debugger is active, always disable compression */
1961 if (in_dbg_master())
1962 goto out_disable;
1963
1964 /* If the scanout has not changed, don't modify the FBC settings.
1965 * Note that we make the fundamental assumption that the fb->obj
1966 * cannot be unpinned (and have its GTT offset and fence revoked)
1967 * without first being decoupled from the scanout and FBC disabled.
1968 */
1969 if (dev_priv->cfb_plane == intel_crtc->plane &&
1970 dev_priv->cfb_fb == fb->base.id &&
1971 dev_priv->cfb_y == crtc->y)
1972 return;
1973
1974 if (intel_fbc_enabled(dev)) {
1975 /* We update FBC along two paths, after changing fb/crtc
1976 * configuration (modeswitching) and after page-flipping
1977 * finishes. For the latter, we know that not only did
1978 * we disable the FBC at the start of the page-flip
1979 * sequence, but also more than one vblank has passed.
1980 *
1981 * For the former case of modeswitching, it is possible
1982 * to switch between two FBC valid configurations
1983 * instantaneously so we do need to disable the FBC
1984 * before we can modify its control registers. We also
1985 * have to wait for the next vblank for that to take
1986 * effect. However, since we delay enabling FBC we can
1987 * assume that a vblank has passed since disabling and
1988 * that we can safely alter the registers in the deferred
1989 * callback.
1990 *
1991 * In the scenario that we go from a valid to invalid
1992 * and then back to valid FBC configuration we have
1993 * no strict enforcement that a vblank occurred since
1994 * disabling the FBC. However, along all current pipe
1995 * disabling paths we do need to wait for a vblank at
1996 * some point. And we wait before enabling FBC anyway.
1997 */
1998 DRM_DEBUG_KMS("disabling active FBC for update\n");
1999 intel_disable_fbc(dev);
2000 }
2001
2002 intel_enable_fbc(crtc, 500);
2003 return;
2004
2005out_disable:
2006 /* Multiple disables should be harmless */
2007 if (intel_fbc_enabled(dev)) {
2008 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2009 intel_disable_fbc(dev);
2010 }
2011}
2012
2013int 1755int
2014intel_pin_and_fence_fb_obj(struct drm_device *dev, 1756intel_pin_and_fence_fb_obj(struct drm_device *dev,
2015 struct drm_i915_gem_object *obj, 1757 struct drm_i915_gem_object *obj,
@@ -2050,13 +1792,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2050 * framebuffer compression. For simplicity, we always install 1792 * framebuffer compression. For simplicity, we always install
2051 * a fence as the cost is not that onerous. 1793 * a fence as the cost is not that onerous.
2052 */ 1794 */
2053 if (obj->tiling_mode != I915_TILING_NONE) { 1795 ret = i915_gem_object_get_fence(obj);
2054 ret = i915_gem_object_get_fence(obj, pipelined); 1796 if (ret)
2055 if (ret) 1797 goto err_unpin;
2056 goto err_unpin;
2057 1798
2058 i915_gem_object_pin_fence(obj); 1799 i915_gem_object_pin_fence(obj);
2059 }
2060 1800
2061 dev_priv->mm.interruptible = true; 1801 dev_priv->mm.interruptible = true;
2062 return 0; 1802 return 0;
@@ -2137,7 +1877,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2137 Start, Offset, x, y, fb->pitches[0]); 1877 Start, Offset, x, y, fb->pitches[0]);
2138 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1878 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2139 if (INTEL_INFO(dev)->gen >= 4) { 1879 if (INTEL_INFO(dev)->gen >= 4) {
2140 I915_WRITE(DSPSURF(plane), Start); 1880 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2141 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1881 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2142 I915_WRITE(DSPADDR(plane), Offset); 1882 I915_WRITE(DSPADDR(plane), Offset);
2143 } else 1883 } else
@@ -2217,7 +1957,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2217 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1957 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2218 Start, Offset, x, y, fb->pitches[0]); 1958 Start, Offset, x, y, fb->pitches[0]);
2219 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1959 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2220 I915_WRITE(DSPSURF(plane), Start); 1960 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2221 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1961 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2222 I915_WRITE(DSPADDR(plane), Offset); 1962 I915_WRITE(DSPADDR(plane), Offset);
2223 POSTING_READ(reg); 1963 POSTING_READ(reg);
@@ -2232,16 +1972,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2232{ 1972{
2233 struct drm_device *dev = crtc->dev; 1973 struct drm_device *dev = crtc->dev;
2234 struct drm_i915_private *dev_priv = dev->dev_private; 1974 struct drm_i915_private *dev_priv = dev->dev_private;
2235 int ret;
2236
2237 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2238 if (ret)
2239 return ret;
2240 1975
2241 intel_update_fbc(dev); 1976 if (dev_priv->display.disable_fbc)
1977 dev_priv->display.disable_fbc(dev);
2242 intel_increase_pllclock(crtc); 1978 intel_increase_pllclock(crtc);
2243 1979
2244 return 0; 1980 return dev_priv->display.update_plane(crtc, fb, x, y);
2245} 1981}
2246 1982
2247static int 1983static int
@@ -2276,6 +2012,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2276 struct drm_framebuffer *old_fb) 2012 struct drm_framebuffer *old_fb)
2277{ 2013{
2278 struct drm_device *dev = crtc->dev; 2014 struct drm_device *dev = crtc->dev;
2015 struct drm_i915_private *dev_priv = dev->dev_private;
2279 struct drm_i915_master_private *master_priv; 2016 struct drm_i915_master_private *master_priv;
2280 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2281 int ret; 2018 int ret;
@@ -2286,16 +2023,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2286 return 0; 2023 return 0;
2287 } 2024 }
2288 2025
2289 switch (intel_crtc->plane) { 2026 if(intel_crtc->plane > dev_priv->num_pipe) {
2290 case 0: 2027 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2291 case 1: 2028 intel_crtc->plane,
2292 break; 2029 dev_priv->num_pipe);
2293 case 2:
2294 if (IS_IVYBRIDGE(dev))
2295 break;
2296 /* fall through otherwise */
2297 default:
2298 DRM_ERROR("no plane for crtc\n");
2299 return -EINVAL; 2030 return -EINVAL;
2300 } 2031 }
2301 2032
@@ -2312,8 +2043,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2312 if (old_fb) 2043 if (old_fb)
2313 intel_finish_fb(old_fb); 2044 intel_finish_fb(old_fb);
2314 2045
2315 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 2046 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
2316 LEAVE_ATOMIC_MODE_SET);
2317 if (ret) { 2047 if (ret) {
2318 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2048 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2319 mutex_unlock(&dev->struct_mutex); 2049 mutex_unlock(&dev->struct_mutex);
@@ -2326,6 +2056,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2326 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2056 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2327 } 2057 }
2328 2058
2059 intel_update_fbc(dev);
2329 mutex_unlock(&dev->struct_mutex); 2060 mutex_unlock(&dev->struct_mutex);
2330 2061
2331 if (!dev->primary->master) 2062 if (!dev->primary->master)
@@ -2547,7 +2278,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2547 struct drm_i915_private *dev_priv = dev->dev_private; 2278 struct drm_i915_private *dev_priv = dev->dev_private;
2548 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2279 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2549 int pipe = intel_crtc->pipe; 2280 int pipe = intel_crtc->pipe;
2550 u32 reg, temp, i; 2281 u32 reg, temp, i, retry;
2551 2282
2552 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2283 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2553 for train result */ 2284 for train result */
@@ -2599,15 +2330,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2599 POSTING_READ(reg); 2330 POSTING_READ(reg);
2600 udelay(500); 2331 udelay(500);
2601 2332
2602 reg = FDI_RX_IIR(pipe); 2333 for (retry = 0; retry < 5; retry++) {
2603 temp = I915_READ(reg); 2334 reg = FDI_RX_IIR(pipe);
2604 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2335 temp = I915_READ(reg);
2605 2336 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2606 if (temp & FDI_RX_BIT_LOCK) { 2337 if (temp & FDI_RX_BIT_LOCK) {
2607 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2338 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2608 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2339 DRM_DEBUG_KMS("FDI train 1 done.\n");
2609 break; 2340 break;
2341 }
2342 udelay(50);
2610 } 2343 }
2344 if (retry < 5)
2345 break;
2611 } 2346 }
2612 if (i == 4) 2347 if (i == 4)
2613 DRM_ERROR("FDI train 1 fail!\n"); 2348 DRM_ERROR("FDI train 1 fail!\n");
@@ -2648,15 +2383,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2648 POSTING_READ(reg); 2383 POSTING_READ(reg);
2649 udelay(500); 2384 udelay(500);
2650 2385
2651 reg = FDI_RX_IIR(pipe); 2386 for (retry = 0; retry < 5; retry++) {
2652 temp = I915_READ(reg); 2387 reg = FDI_RX_IIR(pipe);
2653 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2388 temp = I915_READ(reg);
2654 2389 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2655 if (temp & FDI_RX_SYMBOL_LOCK) { 2390 if (temp & FDI_RX_SYMBOL_LOCK) {
2656 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2391 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2657 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2392 DRM_DEBUG_KMS("FDI train 2 done.\n");
2658 break; 2393 break;
2394 }
2395 udelay(50);
2659 } 2396 }
2397 if (retry < 5)
2398 break;
2660 } 2399 }
2661 if (i == 4) 2400 if (i == 4)
2662 DRM_ERROR("FDI train 2 fail!\n"); 2401 DRM_ERROR("FDI train 2 fail!\n");
@@ -2808,14 +2547,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2808 POSTING_READ(reg); 2547 POSTING_READ(reg);
2809 udelay(200); 2548 udelay(200);
2810 2549
2811 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2550 /* On Haswell, the PLL configuration for ports and pipes is handled
2812 reg = FDI_TX_CTL(pipe); 2551 * separately, as part of DDI setup */
2813 temp = I915_READ(reg); 2552 if (!IS_HASWELL(dev)) {
2814 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2553 /* Enable CPU FDI TX PLL, always on for Ironlake */
2815 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2554 reg = FDI_TX_CTL(pipe);
2555 temp = I915_READ(reg);
2556 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2557 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2816 2558
2817 POSTING_READ(reg); 2559 POSTING_READ(reg);
2818 udelay(100); 2560 udelay(100);
2561 }
2819 } 2562 }
2820} 2563}
2821 2564
@@ -2888,38 +2631,16 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2888 udelay(100); 2631 udelay(100);
2889} 2632}
2890 2633
2891/*
2892 * When we disable a pipe, we need to clear any pending scanline wait events
2893 * to avoid hanging the ring, which we assume we are waiting on.
2894 */
2895static void intel_clear_scanline_wait(struct drm_device *dev)
2896{
2897 struct drm_i915_private *dev_priv = dev->dev_private;
2898 struct intel_ring_buffer *ring;
2899 u32 tmp;
2900
2901 if (IS_GEN2(dev))
2902 /* Can't break the hang on i8xx */
2903 return;
2904
2905 ring = LP_RING(dev_priv);
2906 tmp = I915_READ_CTL(ring);
2907 if (tmp & RING_WAIT)
2908 I915_WRITE_CTL(ring, tmp);
2909}
2910
2911static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2634static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2912{ 2635{
2913 struct drm_i915_gem_object *obj; 2636 struct drm_device *dev = crtc->dev;
2914 struct drm_i915_private *dev_priv;
2915 2637
2916 if (crtc->fb == NULL) 2638 if (crtc->fb == NULL)
2917 return; 2639 return;
2918 2640
2919 obj = to_intel_framebuffer(crtc->fb)->obj; 2641 mutex_lock(&dev->struct_mutex);
2920 dev_priv = crtc->dev->dev_private; 2642 intel_finish_fb(crtc->fb);
2921 wait_event(dev_priv->pending_flip_queue, 2643 mutex_unlock(&dev->struct_mutex);
2922 atomic_read(&obj->pending_flip) == 0);
2923} 2644}
2924 2645
2925static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2646static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2936,6 +2657,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2936 if (encoder->base.crtc != crtc) 2657 if (encoder->base.crtc != crtc)
2937 continue; 2658 continue;
2938 2659
2660 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2661 * CPU handles all others */
2662 if (IS_HASWELL(dev)) {
2663 /* It is still unclear how this will work on PPT, so throw up a warning */
2664 WARN_ON(!HAS_PCH_LPT(dev));
2665
2666 if (encoder->type == DRM_MODE_ENCODER_DAC) {
2667 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2668 return true;
2669 } else {
2670 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2671 encoder->type);
2672 return false;
2673 }
2674 }
2675
2939 switch (encoder->type) { 2676 switch (encoder->type) {
2940 case INTEL_OUTPUT_EDP: 2677 case INTEL_OUTPUT_EDP:
2941 if (!intel_encoder_is_pch_edp(&encoder->base)) 2678 if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2947,6 +2684,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2947 return true; 2684 return true;
2948} 2685}
2949 2686
2687/* Program iCLKIP clock to the desired frequency */
2688static void lpt_program_iclkip(struct drm_crtc *crtc)
2689{
2690 struct drm_device *dev = crtc->dev;
2691 struct drm_i915_private *dev_priv = dev->dev_private;
2692 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2693 u32 temp;
2694
2695 /* It is necessary to ungate the pixclk gate prior to programming
2696 * the divisors, and gate it back when it is done.
2697 */
2698 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2699
2700 /* Disable SSCCTL */
2701 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2702 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2703 SBI_SSCCTL_DISABLE);
2704
2705 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2706 if (crtc->mode.clock == 20000) {
2707 auxdiv = 1;
2708 divsel = 0x41;
2709 phaseinc = 0x20;
2710 } else {
2711 /* The iCLK virtual clock root frequency is in MHz,
2712 * but the crtc->mode.clock in in KHz. To get the divisors,
2713 * it is necessary to divide one by another, so we
2714 * convert the virtual clock precision to KHz here for higher
2715 * precision.
2716 */
2717 u32 iclk_virtual_root_freq = 172800 * 1000;
2718 u32 iclk_pi_range = 64;
2719 u32 desired_divisor, msb_divisor_value, pi_value;
2720
2721 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2722 msb_divisor_value = desired_divisor / iclk_pi_range;
2723 pi_value = desired_divisor % iclk_pi_range;
2724
2725 auxdiv = 0;
2726 divsel = msb_divisor_value - 2;
2727 phaseinc = pi_value;
2728 }
2729
2730 /* This should not happen with any sane values */
2731 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2732 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2733 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2734 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2735
2736 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2737 crtc->mode.clock,
2738 auxdiv,
2739 divsel,
2740 phasedir,
2741 phaseinc);
2742
2743 /* Program SSCDIVINTPHASE6 */
2744 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2745 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2746 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2747 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2748 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2749 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2750 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2751
2752 intel_sbi_write(dev_priv,
2753 SBI_SSCDIVINTPHASE6,
2754 temp);
2755
2756 /* Program SSCAUXDIV */
2757 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2758 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2759 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2760 intel_sbi_write(dev_priv,
2761 SBI_SSCAUXDIV6,
2762 temp);
2763
2764
2765 /* Enable modulator and associated divider */
2766 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2767 temp &= ~SBI_SSCCTL_DISABLE;
2768 intel_sbi_write(dev_priv,
2769 SBI_SSCCTL6,
2770 temp);
2771
2772 /* Wait for initialization time */
2773 udelay(24);
2774
2775 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2776}
2777
2950/* 2778/*
2951 * Enable PCH resources required for PCH ports: 2779 * Enable PCH resources required for PCH ports:
2952 * - PCH PLLs 2780 * - PCH PLLs
@@ -2961,29 +2789,41 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2961 struct drm_i915_private *dev_priv = dev->dev_private; 2789 struct drm_i915_private *dev_priv = dev->dev_private;
2962 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2963 int pipe = intel_crtc->pipe; 2791 int pipe = intel_crtc->pipe;
2964 u32 reg, temp, transc_sel; 2792 u32 reg, temp;
2793
2794 assert_transcoder_disabled(dev_priv, pipe);
2965 2795
2966 /* For PCH output, training FDI link */ 2796 /* For PCH output, training FDI link */
2967 dev_priv->display.fdi_link_train(crtc); 2797 dev_priv->display.fdi_link_train(crtc);
2968 2798
2969 intel_enable_pch_pll(dev_priv, pipe); 2799 intel_enable_pch_pll(intel_crtc);
2970 2800
2971 if (HAS_PCH_CPT(dev)) { 2801 if (HAS_PCH_LPT(dev)) {
2972 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : 2802 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2973 TRANSC_DPLLB_SEL; 2803 lpt_program_iclkip(crtc);
2804 } else if (HAS_PCH_CPT(dev)) {
2805 u32 sel;
2974 2806
2975 /* Be sure PCH DPLL SEL is set */
2976 temp = I915_READ(PCH_DPLL_SEL); 2807 temp = I915_READ(PCH_DPLL_SEL);
2977 if (pipe == 0) { 2808 switch (pipe) {
2978 temp &= ~(TRANSA_DPLLB_SEL); 2809 default:
2979 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2810 case 0:
2980 } else if (pipe == 1) { 2811 temp |= TRANSA_DPLL_ENABLE;
2981 temp &= ~(TRANSB_DPLLB_SEL); 2812 sel = TRANSA_DPLLB_SEL;
2982 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2813 break;
2983 } else if (pipe == 2) { 2814 case 1:
2984 temp &= ~(TRANSC_DPLLB_SEL); 2815 temp |= TRANSB_DPLL_ENABLE;
2985 temp |= (TRANSC_DPLL_ENABLE | transc_sel); 2816 sel = TRANSB_DPLLB_SEL;
2817 break;
2818 case 2:
2819 temp |= TRANSC_DPLL_ENABLE;
2820 sel = TRANSC_DPLLB_SEL;
2821 break;
2986 } 2822 }
2823 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2824 temp |= sel;
2825 else
2826 temp &= ~sel;
2987 I915_WRITE(PCH_DPLL_SEL, temp); 2827 I915_WRITE(PCH_DPLL_SEL, temp);
2988 } 2828 }
2989 2829
@@ -2998,7 +2838,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2998 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2838 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2999 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 2839 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3000 2840
3001 intel_fdi_normal_train(crtc); 2841 if (!IS_HASWELL(dev))
2842 intel_fdi_normal_train(crtc);
3002 2843
3003 /* For PCH DP, enable TRANS_DP_CTL */ 2844 /* For PCH DP, enable TRANS_DP_CTL */
3004 if (HAS_PCH_CPT(dev) && 2845 if (HAS_PCH_CPT(dev) &&
@@ -3041,6 +2882,93 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3041 intel_enable_transcoder(dev_priv, pipe); 2882 intel_enable_transcoder(dev_priv, pipe);
3042} 2883}
3043 2884
2885static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2886{
2887 struct intel_pch_pll *pll = intel_crtc->pch_pll;
2888
2889 if (pll == NULL)
2890 return;
2891
2892 if (pll->refcount == 0) {
2893 WARN(1, "bad PCH PLL refcount\n");
2894 return;
2895 }
2896
2897 --pll->refcount;
2898 intel_crtc->pch_pll = NULL;
2899}
2900
2901static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2902{
2903 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2904 struct intel_pch_pll *pll;
2905 int i;
2906
2907 pll = intel_crtc->pch_pll;
2908 if (pll) {
2909 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2910 intel_crtc->base.base.id, pll->pll_reg);
2911 goto prepare;
2912 }
2913
2914 if (HAS_PCH_IBX(dev_priv->dev)) {
2915 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2916 i = intel_crtc->pipe;
2917 pll = &dev_priv->pch_plls[i];
2918
2919 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2920 intel_crtc->base.base.id, pll->pll_reg);
2921
2922 goto found;
2923 }
2924
2925 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2926 pll = &dev_priv->pch_plls[i];
2927
2928 /* Only want to check enabled timings first */
2929 if (pll->refcount == 0)
2930 continue;
2931
2932 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2933 fp == I915_READ(pll->fp0_reg)) {
2934 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2935 intel_crtc->base.base.id,
2936 pll->pll_reg, pll->refcount, pll->active);
2937
2938 goto found;
2939 }
2940 }
2941
2942 /* Ok no matching timings, maybe there's a free one? */
2943 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2944 pll = &dev_priv->pch_plls[i];
2945 if (pll->refcount == 0) {
2946 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2947 intel_crtc->base.base.id, pll->pll_reg);
2948 goto found;
2949 }
2950 }
2951
2952 return NULL;
2953
2954found:
2955 intel_crtc->pch_pll = pll;
2956 pll->refcount++;
2957 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2958prepare: /* separate function? */
2959 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
2960
2961 /* Wait for the clocks to stabilize before rewriting the regs */
2962 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2963 POSTING_READ(pll->pll_reg);
2964 udelay(150);
2965
2966 I915_WRITE(pll->fp0_reg, fp);
2967 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2968 pll->on = false;
2969 return pll;
2970}
2971
3044void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 2972void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3045{ 2973{
3046 struct drm_i915_private *dev_priv = dev->dev_private; 2974 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3185,8 +3113,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3185 } 3113 }
3186 3114
3187 /* disable PCH DPLL */ 3115 /* disable PCH DPLL */
3188 if (!intel_crtc->no_pll) 3116 intel_disable_pch_pll(intel_crtc);
3189 intel_disable_pch_pll(dev_priv, pipe);
3190 3117
3191 /* Switch from PCDclk to Rawclk */ 3118 /* Switch from PCDclk to Rawclk */
3192 reg = FDI_RX_CTL(pipe); 3119 reg = FDI_RX_CTL(pipe);
@@ -3214,7 +3141,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3214 3141
3215 mutex_lock(&dev->struct_mutex); 3142 mutex_lock(&dev->struct_mutex);
3216 intel_update_fbc(dev); 3143 intel_update_fbc(dev);
3217 intel_clear_scanline_wait(dev);
3218 mutex_unlock(&dev->struct_mutex); 3144 mutex_unlock(&dev->struct_mutex);
3219} 3145}
3220 3146
@@ -3242,6 +3168,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3242 } 3168 }
3243} 3169}
3244 3170
3171static void ironlake_crtc_off(struct drm_crtc *crtc)
3172{
3173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3174 intel_put_pch_pll(intel_crtc);
3175}
3176
3245static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3177static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3246{ 3178{
3247 if (!enable && intel_crtc->overlay) { 3179 if (!enable && intel_crtc->overlay) {
@@ -3313,7 +3245,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3313 intel_crtc->active = false; 3245 intel_crtc->active = false;
3314 intel_update_fbc(dev); 3246 intel_update_fbc(dev);
3315 intel_update_watermarks(dev); 3247 intel_update_watermarks(dev);
3316 intel_clear_scanline_wait(dev);
3317} 3248}
3318 3249
3319static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 3250static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3333,6 +3264,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3333 } 3264 }
3334} 3265}
3335 3266
3267static void i9xx_crtc_off(struct drm_crtc *crtc)
3268{
3269}
3270
3336/** 3271/**
3337 * Sets the power management mode of the pipe and plane. 3272 * Sets the power management mode of the pipe and plane.
3338 */ 3273 */
@@ -3380,25 +3315,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3380{ 3315{
3381 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3316 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3382 struct drm_device *dev = crtc->dev; 3317 struct drm_device *dev = crtc->dev;
3383 3318 struct drm_i915_private *dev_priv = dev->dev_private;
3384 /* Flush any pending WAITs before we disable the pipe. Note that
3385 * we need to drop the struct_mutex in order to acquire it again
3386 * during the lowlevel dpms routines around a couple of the
3387 * operations. It does not look trivial nor desirable to move
3388 * that locking higher. So instead we leave a window for the
3389 * submission of further commands on the fb before we can actually
3390 * disable it. This race with userspace exists anyway, and we can
3391 * only rely on the pipe being disabled by userspace after it
3392 * receives the hotplug notification and has flushed any pending
3393 * batches.
3394 */
3395 if (crtc->fb) {
3396 mutex_lock(&dev->struct_mutex);
3397 intel_finish_fb(crtc->fb);
3398 mutex_unlock(&dev->struct_mutex);
3399 }
3400 3319
3401 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 3320 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3321 dev_priv->display.off(crtc);
3322
3402 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 3323 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3403 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 3324 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3404 3325
@@ -3448,8 +3369,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
3448{ 3369{
3449 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3370 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3450 struct drm_device *dev = encoder->dev; 3371 struct drm_device *dev = encoder->dev;
3451 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3372 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3452 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3453 3373
3454 /* lvds has its own version of commit see intel_lvds_commit */ 3374 /* lvds has its own version of commit see intel_lvds_commit */
3455 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3375 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3487,6 +3407,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3487 return true; 3407 return true;
3488} 3408}
3489 3409
3410static int valleyview_get_display_clock_speed(struct drm_device *dev)
3411{
3412 return 400000; /* FIXME */
3413}
3414
3490static int i945_get_display_clock_speed(struct drm_device *dev) 3415static int i945_get_display_clock_speed(struct drm_device *dev)
3491{ 3416{
3492 return 400000; 3417 return 400000;
@@ -3584,1342 +3509,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3584 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3509 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3585} 3510}
3586 3511
3587
3588struct intel_watermark_params {
3589 unsigned long fifo_size;
3590 unsigned long max_wm;
3591 unsigned long default_wm;
3592 unsigned long guard_size;
3593 unsigned long cacheline_size;
3594};
3595
3596/* Pineview has different values for various configs */
3597static const struct intel_watermark_params pineview_display_wm = {
3598 PINEVIEW_DISPLAY_FIFO,
3599 PINEVIEW_MAX_WM,
3600 PINEVIEW_DFT_WM,
3601 PINEVIEW_GUARD_WM,
3602 PINEVIEW_FIFO_LINE_SIZE
3603};
3604static const struct intel_watermark_params pineview_display_hplloff_wm = {
3605 PINEVIEW_DISPLAY_FIFO,
3606 PINEVIEW_MAX_WM,
3607 PINEVIEW_DFT_HPLLOFF_WM,
3608 PINEVIEW_GUARD_WM,
3609 PINEVIEW_FIFO_LINE_SIZE
3610};
3611static const struct intel_watermark_params pineview_cursor_wm = {
3612 PINEVIEW_CURSOR_FIFO,
3613 PINEVIEW_CURSOR_MAX_WM,
3614 PINEVIEW_CURSOR_DFT_WM,
3615 PINEVIEW_CURSOR_GUARD_WM,
3616 PINEVIEW_FIFO_LINE_SIZE,
3617};
3618static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3619 PINEVIEW_CURSOR_FIFO,
3620 PINEVIEW_CURSOR_MAX_WM,
3621 PINEVIEW_CURSOR_DFT_WM,
3622 PINEVIEW_CURSOR_GUARD_WM,
3623 PINEVIEW_FIFO_LINE_SIZE
3624};
3625static const struct intel_watermark_params g4x_wm_info = {
3626 G4X_FIFO_SIZE,
3627 G4X_MAX_WM,
3628 G4X_MAX_WM,
3629 2,
3630 G4X_FIFO_LINE_SIZE,
3631};
3632static const struct intel_watermark_params g4x_cursor_wm_info = {
3633 I965_CURSOR_FIFO,
3634 I965_CURSOR_MAX_WM,
3635 I965_CURSOR_DFT_WM,
3636 2,
3637 G4X_FIFO_LINE_SIZE,
3638};
3639static const struct intel_watermark_params i965_cursor_wm_info = {
3640 I965_CURSOR_FIFO,
3641 I965_CURSOR_MAX_WM,
3642 I965_CURSOR_DFT_WM,
3643 2,
3644 I915_FIFO_LINE_SIZE,
3645};
3646static const struct intel_watermark_params i945_wm_info = {
3647 I945_FIFO_SIZE,
3648 I915_MAX_WM,
3649 1,
3650 2,
3651 I915_FIFO_LINE_SIZE
3652};
3653static const struct intel_watermark_params i915_wm_info = {
3654 I915_FIFO_SIZE,
3655 I915_MAX_WM,
3656 1,
3657 2,
3658 I915_FIFO_LINE_SIZE
3659};
3660static const struct intel_watermark_params i855_wm_info = {
3661 I855GM_FIFO_SIZE,
3662 I915_MAX_WM,
3663 1,
3664 2,
3665 I830_FIFO_LINE_SIZE
3666};
3667static const struct intel_watermark_params i830_wm_info = {
3668 I830_FIFO_SIZE,
3669 I915_MAX_WM,
3670 1,
3671 2,
3672 I830_FIFO_LINE_SIZE
3673};
3674
3675static const struct intel_watermark_params ironlake_display_wm_info = {
3676 ILK_DISPLAY_FIFO,
3677 ILK_DISPLAY_MAXWM,
3678 ILK_DISPLAY_DFTWM,
3679 2,
3680 ILK_FIFO_LINE_SIZE
3681};
3682static const struct intel_watermark_params ironlake_cursor_wm_info = {
3683 ILK_CURSOR_FIFO,
3684 ILK_CURSOR_MAXWM,
3685 ILK_CURSOR_DFTWM,
3686 2,
3687 ILK_FIFO_LINE_SIZE
3688};
3689static const struct intel_watermark_params ironlake_display_srwm_info = {
3690 ILK_DISPLAY_SR_FIFO,
3691 ILK_DISPLAY_MAX_SRWM,
3692 ILK_DISPLAY_DFT_SRWM,
3693 2,
3694 ILK_FIFO_LINE_SIZE
3695};
3696static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3697 ILK_CURSOR_SR_FIFO,
3698 ILK_CURSOR_MAX_SRWM,
3699 ILK_CURSOR_DFT_SRWM,
3700 2,
3701 ILK_FIFO_LINE_SIZE
3702};
3703
3704static const struct intel_watermark_params sandybridge_display_wm_info = {
3705 SNB_DISPLAY_FIFO,
3706 SNB_DISPLAY_MAXWM,
3707 SNB_DISPLAY_DFTWM,
3708 2,
3709 SNB_FIFO_LINE_SIZE
3710};
3711static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3712 SNB_CURSOR_FIFO,
3713 SNB_CURSOR_MAXWM,
3714 SNB_CURSOR_DFTWM,
3715 2,
3716 SNB_FIFO_LINE_SIZE
3717};
3718static const struct intel_watermark_params sandybridge_display_srwm_info = {
3719 SNB_DISPLAY_SR_FIFO,
3720 SNB_DISPLAY_MAX_SRWM,
3721 SNB_DISPLAY_DFT_SRWM,
3722 2,
3723 SNB_FIFO_LINE_SIZE
3724};
3725static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3726 SNB_CURSOR_SR_FIFO,
3727 SNB_CURSOR_MAX_SRWM,
3728 SNB_CURSOR_DFT_SRWM,
3729 2,
3730 SNB_FIFO_LINE_SIZE
3731};
3732
3733
3734/**
3735 * intel_calculate_wm - calculate watermark level
3736 * @clock_in_khz: pixel clock
3737 * @wm: chip FIFO params
3738 * @pixel_size: display pixel size
3739 * @latency_ns: memory latency for the platform
3740 *
3741 * Calculate the watermark level (the level at which the display plane will
3742 * start fetching from memory again). Each chip has a different display
3743 * FIFO size and allocation, so the caller needs to figure that out and pass
3744 * in the correct intel_watermark_params structure.
3745 *
3746 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3747 * on the pixel size. When it reaches the watermark level, it'll start
3748 * fetching FIFO line sized based chunks from memory until the FIFO fills
3749 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3750 * will occur, and a display engine hang could result.
3751 */
3752static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3753 const struct intel_watermark_params *wm,
3754 int fifo_size,
3755 int pixel_size,
3756 unsigned long latency_ns)
3757{
3758 long entries_required, wm_size;
3759
3760 /*
3761 * Note: we need to make sure we don't overflow for various clock &
3762 * latency values.
3763 * clocks go from a few thousand to several hundred thousand.
3764 * latency is usually a few thousand
3765 */
3766 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3767 1000;
3768 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3769
3770 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3771
3772 wm_size = fifo_size - (entries_required + wm->guard_size);
3773
3774 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3775
3776 /* Don't promote wm_size to unsigned... */
3777 if (wm_size > (long)wm->max_wm)
3778 wm_size = wm->max_wm;
3779 if (wm_size <= 0)
3780 wm_size = wm->default_wm;
3781 return wm_size;
3782}
3783
3784struct cxsr_latency {
3785 int is_desktop;
3786 int is_ddr3;
3787 unsigned long fsb_freq;
3788 unsigned long mem_freq;
3789 unsigned long display_sr;
3790 unsigned long display_hpll_disable;
3791 unsigned long cursor_sr;
3792 unsigned long cursor_hpll_disable;
3793};
3794
3795static const struct cxsr_latency cxsr_latency_table[] = {
3796 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
3797 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3798 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3799 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3800 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3801
3802 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3803 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3804 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3805 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3806 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3807
3808 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3809 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3810 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3811 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3812 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3813
3814 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3815 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3816 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3817 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3818 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3819
3820 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3821 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3822 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3823 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3824 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3825
3826 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3827 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3828 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3829 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3830 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3831};
3832
3833static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3834 int is_ddr3,
3835 int fsb,
3836 int mem)
3837{
3838 const struct cxsr_latency *latency;
3839 int i;
3840
3841 if (fsb == 0 || mem == 0)
3842 return NULL;
3843
3844 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3845 latency = &cxsr_latency_table[i];
3846 if (is_desktop == latency->is_desktop &&
3847 is_ddr3 == latency->is_ddr3 &&
3848 fsb == latency->fsb_freq && mem == latency->mem_freq)
3849 return latency;
3850 }
3851
3852 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3853
3854 return NULL;
3855}
3856
3857static void pineview_disable_cxsr(struct drm_device *dev)
3858{
3859 struct drm_i915_private *dev_priv = dev->dev_private;
3860
3861 /* deactivate cxsr */
3862 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3863}
3864
3865/*
3866 * Latency for FIFO fetches is dependent on several factors:
3867 * - memory configuration (speed, channels)
3868 * - chipset
3869 * - current MCH state
3870 * It can be fairly high in some situations, so here we assume a fairly
3871 * pessimal value. It's a tradeoff between extra memory fetches (if we
3872 * set this value too high, the FIFO will fetch frequently to stay full)
3873 * and power consumption (set it too low to save power and we might see
3874 * FIFO underruns and display "flicker").
3875 *
3876 * A value of 5us seems to be a good balance; safe for very low end
3877 * platforms but not overly aggressive on lower latency configs.
3878 */
3879static const int latency_ns = 5000;
3880
3881static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3882{
3883 struct drm_i915_private *dev_priv = dev->dev_private;
3884 uint32_t dsparb = I915_READ(DSPARB);
3885 int size;
3886
3887 size = dsparb & 0x7f;
3888 if (plane)
3889 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3890
3891 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3892 plane ? "B" : "A", size);
3893
3894 return size;
3895}
3896
3897static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3898{
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3900 uint32_t dsparb = I915_READ(DSPARB);
3901 int size;
3902
3903 size = dsparb & 0x1ff;
3904 if (plane)
3905 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3906 size >>= 1; /* Convert to cachelines */
3907
3908 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3909 plane ? "B" : "A", size);
3910
3911 return size;
3912}
3913
3914static int i845_get_fifo_size(struct drm_device *dev, int plane)
3915{
3916 struct drm_i915_private *dev_priv = dev->dev_private;
3917 uint32_t dsparb = I915_READ(DSPARB);
3918 int size;
3919
3920 size = dsparb & 0x7f;
3921 size >>= 2; /* Convert to cachelines */
3922
3923 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3924 plane ? "B" : "A",
3925 size);
3926
3927 return size;
3928}
3929
3930static int i830_get_fifo_size(struct drm_device *dev, int plane)
3931{
3932 struct drm_i915_private *dev_priv = dev->dev_private;
3933 uint32_t dsparb = I915_READ(DSPARB);
3934 int size;
3935
3936 size = dsparb & 0x7f;
3937 size >>= 1; /* Convert to cachelines */
3938
3939 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3940 plane ? "B" : "A", size);
3941
3942 return size;
3943}
3944
3945static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3946{
3947 struct drm_crtc *crtc, *enabled = NULL;
3948
3949 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3950 if (crtc->enabled && crtc->fb) {
3951 if (enabled)
3952 return NULL;
3953 enabled = crtc;
3954 }
3955 }
3956
3957 return enabled;
3958}
3959
3960static void pineview_update_wm(struct drm_device *dev)
3961{
3962 struct drm_i915_private *dev_priv = dev->dev_private;
3963 struct drm_crtc *crtc;
3964 const struct cxsr_latency *latency;
3965 u32 reg;
3966 unsigned long wm;
3967
3968 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3969 dev_priv->fsb_freq, dev_priv->mem_freq);
3970 if (!latency) {
3971 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3972 pineview_disable_cxsr(dev);
3973 return;
3974 }
3975
3976 crtc = single_enabled_crtc(dev);
3977 if (crtc) {
3978 int clock = crtc->mode.clock;
3979 int pixel_size = crtc->fb->bits_per_pixel / 8;
3980
3981 /* Display SR */
3982 wm = intel_calculate_wm(clock, &pineview_display_wm,
3983 pineview_display_wm.fifo_size,
3984 pixel_size, latency->display_sr);
3985 reg = I915_READ(DSPFW1);
3986 reg &= ~DSPFW_SR_MASK;
3987 reg |= wm << DSPFW_SR_SHIFT;
3988 I915_WRITE(DSPFW1, reg);
3989 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3990
3991 /* cursor SR */
3992 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3993 pineview_display_wm.fifo_size,
3994 pixel_size, latency->cursor_sr);
3995 reg = I915_READ(DSPFW3);
3996 reg &= ~DSPFW_CURSOR_SR_MASK;
3997 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3998 I915_WRITE(DSPFW3, reg);
3999
4000 /* Display HPLL off SR */
4001 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4002 pineview_display_hplloff_wm.fifo_size,
4003 pixel_size, latency->display_hpll_disable);
4004 reg = I915_READ(DSPFW3);
4005 reg &= ~DSPFW_HPLL_SR_MASK;
4006 reg |= wm & DSPFW_HPLL_SR_MASK;
4007 I915_WRITE(DSPFW3, reg);
4008
4009 /* cursor HPLL off SR */
4010 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4011 pineview_display_hplloff_wm.fifo_size,
4012 pixel_size, latency->cursor_hpll_disable);
4013 reg = I915_READ(DSPFW3);
4014 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4015 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4016 I915_WRITE(DSPFW3, reg);
4017 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4018
4019 /* activate cxsr */
4020 I915_WRITE(DSPFW3,
4021 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4022 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4023 } else {
4024 pineview_disable_cxsr(dev);
4025 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4026 }
4027}
4028
4029static bool g4x_compute_wm0(struct drm_device *dev,
4030 int plane,
4031 const struct intel_watermark_params *display,
4032 int display_latency_ns,
4033 const struct intel_watermark_params *cursor,
4034 int cursor_latency_ns,
4035 int *plane_wm,
4036 int *cursor_wm)
4037{
4038 struct drm_crtc *crtc;
4039 int htotal, hdisplay, clock, pixel_size;
4040 int line_time_us, line_count;
4041 int entries, tlb_miss;
4042
4043 crtc = intel_get_crtc_for_plane(dev, plane);
4044 if (crtc->fb == NULL || !crtc->enabled) {
4045 *cursor_wm = cursor->guard_size;
4046 *plane_wm = display->guard_size;
4047 return false;
4048 }
4049
4050 htotal = crtc->mode.htotal;
4051 hdisplay = crtc->mode.hdisplay;
4052 clock = crtc->mode.clock;
4053 pixel_size = crtc->fb->bits_per_pixel / 8;
4054
4055 /* Use the small buffer method to calculate plane watermark */
4056 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4057 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4058 if (tlb_miss > 0)
4059 entries += tlb_miss;
4060 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4061 *plane_wm = entries + display->guard_size;
4062 if (*plane_wm > (int)display->max_wm)
4063 *plane_wm = display->max_wm;
4064
4065 /* Use the large buffer method to calculate cursor watermark */
4066 line_time_us = ((htotal * 1000) / clock);
4067 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4068 entries = line_count * 64 * pixel_size;
4069 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4070 if (tlb_miss > 0)
4071 entries += tlb_miss;
4072 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4073 *cursor_wm = entries + cursor->guard_size;
4074 if (*cursor_wm > (int)cursor->max_wm)
4075 *cursor_wm = (int)cursor->max_wm;
4076
4077 return true;
4078}
4079
4080/*
4081 * Check the wm result.
4082 *
4083 * If any calculated watermark values is larger than the maximum value that
4084 * can be programmed into the associated watermark register, that watermark
4085 * must be disabled.
4086 */
4087static bool g4x_check_srwm(struct drm_device *dev,
4088 int display_wm, int cursor_wm,
4089 const struct intel_watermark_params *display,
4090 const struct intel_watermark_params *cursor)
4091{
4092 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4093 display_wm, cursor_wm);
4094
4095 if (display_wm > display->max_wm) {
4096 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4097 display_wm, display->max_wm);
4098 return false;
4099 }
4100
4101 if (cursor_wm > cursor->max_wm) {
4102 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4103 cursor_wm, cursor->max_wm);
4104 return false;
4105 }
4106
4107 if (!(display_wm || cursor_wm)) {
4108 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4109 return false;
4110 }
4111
4112 return true;
4113}
4114
4115static bool g4x_compute_srwm(struct drm_device *dev,
4116 int plane,
4117 int latency_ns,
4118 const struct intel_watermark_params *display,
4119 const struct intel_watermark_params *cursor,
4120 int *display_wm, int *cursor_wm)
4121{
4122 struct drm_crtc *crtc;
4123 int hdisplay, htotal, pixel_size, clock;
4124 unsigned long line_time_us;
4125 int line_count, line_size;
4126 int small, large;
4127 int entries;
4128
4129 if (!latency_ns) {
4130 *display_wm = *cursor_wm = 0;
4131 return false;
4132 }
4133
4134 crtc = intel_get_crtc_for_plane(dev, plane);
4135 hdisplay = crtc->mode.hdisplay;
4136 htotal = crtc->mode.htotal;
4137 clock = crtc->mode.clock;
4138 pixel_size = crtc->fb->bits_per_pixel / 8;
4139
4140 line_time_us = (htotal * 1000) / clock;
4141 line_count = (latency_ns / line_time_us + 1000) / 1000;
4142 line_size = hdisplay * pixel_size;
4143
4144 /* Use the minimum of the small and large buffer method for primary */
4145 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4146 large = line_count * line_size;
4147
4148 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4149 *display_wm = entries + display->guard_size;
4150
4151 /* calculate the self-refresh watermark for display cursor */
4152 entries = line_count * pixel_size * 64;
4153 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4154 *cursor_wm = entries + cursor->guard_size;
4155
4156 return g4x_check_srwm(dev,
4157 *display_wm, *cursor_wm,
4158 display, cursor);
4159}
4160
4161#define single_plane_enabled(mask) is_power_of_2(mask)
4162
4163static void g4x_update_wm(struct drm_device *dev)
4164{
4165 static const int sr_latency_ns = 12000;
4166 struct drm_i915_private *dev_priv = dev->dev_private;
4167 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4168 int plane_sr, cursor_sr;
4169 unsigned int enabled = 0;
4170
4171 if (g4x_compute_wm0(dev, 0,
4172 &g4x_wm_info, latency_ns,
4173 &g4x_cursor_wm_info, latency_ns,
4174 &planea_wm, &cursora_wm))
4175 enabled |= 1;
4176
4177 if (g4x_compute_wm0(dev, 1,
4178 &g4x_wm_info, latency_ns,
4179 &g4x_cursor_wm_info, latency_ns,
4180 &planeb_wm, &cursorb_wm))
4181 enabled |= 2;
4182
4183 plane_sr = cursor_sr = 0;
4184 if (single_plane_enabled(enabled) &&
4185 g4x_compute_srwm(dev, ffs(enabled) - 1,
4186 sr_latency_ns,
4187 &g4x_wm_info,
4188 &g4x_cursor_wm_info,
4189 &plane_sr, &cursor_sr))
4190 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4191 else
4192 I915_WRITE(FW_BLC_SELF,
4193 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4194
4195 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4196 planea_wm, cursora_wm,
4197 planeb_wm, cursorb_wm,
4198 plane_sr, cursor_sr);
4199
4200 I915_WRITE(DSPFW1,
4201 (plane_sr << DSPFW_SR_SHIFT) |
4202 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4203 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4204 planea_wm);
4205 I915_WRITE(DSPFW2,
4206 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4207 (cursora_wm << DSPFW_CURSORA_SHIFT));
4208 /* HPLL off in SR has some issues on G4x... disable it */
4209 I915_WRITE(DSPFW3,
4210 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4211 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4212}
4213
4214static void i965_update_wm(struct drm_device *dev)
4215{
4216 struct drm_i915_private *dev_priv = dev->dev_private;
4217 struct drm_crtc *crtc;
4218 int srwm = 1;
4219 int cursor_sr = 16;
4220
4221 /* Calc sr entries for one plane configs */
4222 crtc = single_enabled_crtc(dev);
4223 if (crtc) {
4224 /* self-refresh has much higher latency */
4225 static const int sr_latency_ns = 12000;
4226 int clock = crtc->mode.clock;
4227 int htotal = crtc->mode.htotal;
4228 int hdisplay = crtc->mode.hdisplay;
4229 int pixel_size = crtc->fb->bits_per_pixel / 8;
4230 unsigned long line_time_us;
4231 int entries;
4232
4233 line_time_us = ((htotal * 1000) / clock);
4234
4235 /* Use ns/us then divide to preserve precision */
4236 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4237 pixel_size * hdisplay;
4238 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4239 srwm = I965_FIFO_SIZE - entries;
4240 if (srwm < 0)
4241 srwm = 1;
4242 srwm &= 0x1ff;
4243 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4244 entries, srwm);
4245
4246 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4247 pixel_size * 64;
4248 entries = DIV_ROUND_UP(entries,
4249 i965_cursor_wm_info.cacheline_size);
4250 cursor_sr = i965_cursor_wm_info.fifo_size -
4251 (entries + i965_cursor_wm_info.guard_size);
4252
4253 if (cursor_sr > i965_cursor_wm_info.max_wm)
4254 cursor_sr = i965_cursor_wm_info.max_wm;
4255
4256 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4257 "cursor %d\n", srwm, cursor_sr);
4258
4259 if (IS_CRESTLINE(dev))
4260 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4261 } else {
4262 /* Turn off self refresh if both pipes are enabled */
4263 if (IS_CRESTLINE(dev))
4264 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4265 & ~FW_BLC_SELF_EN);
4266 }
4267
4268 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4269 srwm);
4270
4271 /* 965 has limitations... */
4272 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4273 (8 << 16) | (8 << 8) | (8 << 0));
4274 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4275 /* update cursor SR watermark */
4276 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4277}
4278
4279static void i9xx_update_wm(struct drm_device *dev)
4280{
4281 struct drm_i915_private *dev_priv = dev->dev_private;
4282 const struct intel_watermark_params *wm_info;
4283 uint32_t fwater_lo;
4284 uint32_t fwater_hi;
4285 int cwm, srwm = 1;
4286 int fifo_size;
4287 int planea_wm, planeb_wm;
4288 struct drm_crtc *crtc, *enabled = NULL;
4289
4290 if (IS_I945GM(dev))
4291 wm_info = &i945_wm_info;
4292 else if (!IS_GEN2(dev))
4293 wm_info = &i915_wm_info;
4294 else
4295 wm_info = &i855_wm_info;
4296
4297 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4298 crtc = intel_get_crtc_for_plane(dev, 0);
4299 if (crtc->enabled && crtc->fb) {
4300 planea_wm = intel_calculate_wm(crtc->mode.clock,
4301 wm_info, fifo_size,
4302 crtc->fb->bits_per_pixel / 8,
4303 latency_ns);
4304 enabled = crtc;
4305 } else
4306 planea_wm = fifo_size - wm_info->guard_size;
4307
4308 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4309 crtc = intel_get_crtc_for_plane(dev, 1);
4310 if (crtc->enabled && crtc->fb) {
4311 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4312 wm_info, fifo_size,
4313 crtc->fb->bits_per_pixel / 8,
4314 latency_ns);
4315 if (enabled == NULL)
4316 enabled = crtc;
4317 else
4318 enabled = NULL;
4319 } else
4320 planeb_wm = fifo_size - wm_info->guard_size;
4321
4322 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4323
4324 /*
4325 * Overlay gets an aggressive default since video jitter is bad.
4326 */
4327 cwm = 2;
4328
4329 /* Play safe and disable self-refresh before adjusting watermarks. */
4330 if (IS_I945G(dev) || IS_I945GM(dev))
4331 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4332 else if (IS_I915GM(dev))
4333 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4334
4335 /* Calc sr entries for one plane configs */
4336 if (HAS_FW_BLC(dev) && enabled) {
4337 /* self-refresh has much higher latency */
4338 static const int sr_latency_ns = 6000;
4339 int clock = enabled->mode.clock;
4340 int htotal = enabled->mode.htotal;
4341 int hdisplay = enabled->mode.hdisplay;
4342 int pixel_size = enabled->fb->bits_per_pixel / 8;
4343 unsigned long line_time_us;
4344 int entries;
4345
4346 line_time_us = (htotal * 1000) / clock;
4347
4348 /* Use ns/us then divide to preserve precision */
4349 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4350 pixel_size * hdisplay;
4351 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4352 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4353 srwm = wm_info->fifo_size - entries;
4354 if (srwm < 0)
4355 srwm = 1;
4356
4357 if (IS_I945G(dev) || IS_I945GM(dev))
4358 I915_WRITE(FW_BLC_SELF,
4359 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4360 else if (IS_I915GM(dev))
4361 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4362 }
4363
4364 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4365 planea_wm, planeb_wm, cwm, srwm);
4366
4367 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4368 fwater_hi = (cwm & 0x1f);
4369
4370 /* Set request length to 8 cachelines per fetch */
4371 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4372 fwater_hi = fwater_hi | (1 << 8);
4373
4374 I915_WRITE(FW_BLC, fwater_lo);
4375 I915_WRITE(FW_BLC2, fwater_hi);
4376
4377 if (HAS_FW_BLC(dev)) {
4378 if (enabled) {
4379 if (IS_I945G(dev) || IS_I945GM(dev))
4380 I915_WRITE(FW_BLC_SELF,
4381 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4382 else if (IS_I915GM(dev))
4383 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4384 DRM_DEBUG_KMS("memory self refresh enabled\n");
4385 } else
4386 DRM_DEBUG_KMS("memory self refresh disabled\n");
4387 }
4388}
4389
4390static void i830_update_wm(struct drm_device *dev)
4391{
4392 struct drm_i915_private *dev_priv = dev->dev_private;
4393 struct drm_crtc *crtc;
4394 uint32_t fwater_lo;
4395 int planea_wm;
4396
4397 crtc = single_enabled_crtc(dev);
4398 if (crtc == NULL)
4399 return;
4400
4401 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4402 dev_priv->display.get_fifo_size(dev, 0),
4403 crtc->fb->bits_per_pixel / 8,
4404 latency_ns);
4405 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4406 fwater_lo |= (3<<8) | planea_wm;
4407
4408 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4409
4410 I915_WRITE(FW_BLC, fwater_lo);
4411}
4412
4413#define ILK_LP0_PLANE_LATENCY 700
4414#define ILK_LP0_CURSOR_LATENCY 1300
4415
4416/*
4417 * Check the wm result.
4418 *
4419 * If any calculated watermark values is larger than the maximum value that
4420 * can be programmed into the associated watermark register, that watermark
4421 * must be disabled.
4422 */
4423static bool ironlake_check_srwm(struct drm_device *dev, int level,
4424 int fbc_wm, int display_wm, int cursor_wm,
4425 const struct intel_watermark_params *display,
4426 const struct intel_watermark_params *cursor)
4427{
4428 struct drm_i915_private *dev_priv = dev->dev_private;
4429
4430 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4431 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4432
4433 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4434 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4435 fbc_wm, SNB_FBC_MAX_SRWM, level);
4436
4437 /* fbc has it's own way to disable FBC WM */
4438 I915_WRITE(DISP_ARB_CTL,
4439 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4440 return false;
4441 }
4442
4443 if (display_wm > display->max_wm) {
4444 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4445 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4446 return false;
4447 }
4448
4449 if (cursor_wm > cursor->max_wm) {
4450 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4451 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4452 return false;
4453 }
4454
4455 if (!(fbc_wm || display_wm || cursor_wm)) {
4456 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4457 return false;
4458 }
4459
4460 return true;
4461}
4462
4463/*
4464 * Compute watermark values of WM[1-3],
4465 */
4466static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4467 int latency_ns,
4468 const struct intel_watermark_params *display,
4469 const struct intel_watermark_params *cursor,
4470 int *fbc_wm, int *display_wm, int *cursor_wm)
4471{
4472 struct drm_crtc *crtc;
4473 unsigned long line_time_us;
4474 int hdisplay, htotal, pixel_size, clock;
4475 int line_count, line_size;
4476 int small, large;
4477 int entries;
4478
4479 if (!latency_ns) {
4480 *fbc_wm = *display_wm = *cursor_wm = 0;
4481 return false;
4482 }
4483
4484 crtc = intel_get_crtc_for_plane(dev, plane);
4485 hdisplay = crtc->mode.hdisplay;
4486 htotal = crtc->mode.htotal;
4487 clock = crtc->mode.clock;
4488 pixel_size = crtc->fb->bits_per_pixel / 8;
4489
4490 line_time_us = (htotal * 1000) / clock;
4491 line_count = (latency_ns / line_time_us + 1000) / 1000;
4492 line_size = hdisplay * pixel_size;
4493
4494 /* Use the minimum of the small and large buffer method for primary */
4495 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4496 large = line_count * line_size;
4497
4498 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4499 *display_wm = entries + display->guard_size;
4500
4501 /*
4502 * Spec says:
4503 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4504 */
4505 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4506
4507 /* calculate the self-refresh watermark for display cursor */
4508 entries = line_count * pixel_size * 64;
4509 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4510 *cursor_wm = entries + cursor->guard_size;
4511
4512 return ironlake_check_srwm(dev, level,
4513 *fbc_wm, *display_wm, *cursor_wm,
4514 display, cursor);
4515}
4516
4517static void ironlake_update_wm(struct drm_device *dev)
4518{
4519 struct drm_i915_private *dev_priv = dev->dev_private;
4520 int fbc_wm, plane_wm, cursor_wm;
4521 unsigned int enabled;
4522
4523 enabled = 0;
4524 if (g4x_compute_wm0(dev, 0,
4525 &ironlake_display_wm_info,
4526 ILK_LP0_PLANE_LATENCY,
4527 &ironlake_cursor_wm_info,
4528 ILK_LP0_CURSOR_LATENCY,
4529 &plane_wm, &cursor_wm)) {
4530 I915_WRITE(WM0_PIPEA_ILK,
4531 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4532 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4533 " plane %d, " "cursor: %d\n",
4534 plane_wm, cursor_wm);
4535 enabled |= 1;
4536 }
4537
4538 if (g4x_compute_wm0(dev, 1,
4539 &ironlake_display_wm_info,
4540 ILK_LP0_PLANE_LATENCY,
4541 &ironlake_cursor_wm_info,
4542 ILK_LP0_CURSOR_LATENCY,
4543 &plane_wm, &cursor_wm)) {
4544 I915_WRITE(WM0_PIPEB_ILK,
4545 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4546 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4547 " plane %d, cursor: %d\n",
4548 plane_wm, cursor_wm);
4549 enabled |= 2;
4550 }
4551
4552 /*
4553 * Calculate and update the self-refresh watermark only when one
4554 * display plane is used.
4555 */
4556 I915_WRITE(WM3_LP_ILK, 0);
4557 I915_WRITE(WM2_LP_ILK, 0);
4558 I915_WRITE(WM1_LP_ILK, 0);
4559
4560 if (!single_plane_enabled(enabled))
4561 return;
4562 enabled = ffs(enabled) - 1;
4563
4564 /* WM1 */
4565 if (!ironlake_compute_srwm(dev, 1, enabled,
4566 ILK_READ_WM1_LATENCY() * 500,
4567 &ironlake_display_srwm_info,
4568 &ironlake_cursor_srwm_info,
4569 &fbc_wm, &plane_wm, &cursor_wm))
4570 return;
4571
4572 I915_WRITE(WM1_LP_ILK,
4573 WM1_LP_SR_EN |
4574 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4575 (fbc_wm << WM1_LP_FBC_SHIFT) |
4576 (plane_wm << WM1_LP_SR_SHIFT) |
4577 cursor_wm);
4578
4579 /* WM2 */
4580 if (!ironlake_compute_srwm(dev, 2, enabled,
4581 ILK_READ_WM2_LATENCY() * 500,
4582 &ironlake_display_srwm_info,
4583 &ironlake_cursor_srwm_info,
4584 &fbc_wm, &plane_wm, &cursor_wm))
4585 return;
4586
4587 I915_WRITE(WM2_LP_ILK,
4588 WM2_LP_EN |
4589 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4590 (fbc_wm << WM1_LP_FBC_SHIFT) |
4591 (plane_wm << WM1_LP_SR_SHIFT) |
4592 cursor_wm);
4593
4594 /*
4595 * WM3 is unsupported on ILK, probably because we don't have latency
4596 * data for that power state
4597 */
4598}
4599
4600void sandybridge_update_wm(struct drm_device *dev)
4601{
4602 struct drm_i915_private *dev_priv = dev->dev_private;
4603 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4604 u32 val;
4605 int fbc_wm, plane_wm, cursor_wm;
4606 unsigned int enabled;
4607
4608 enabled = 0;
4609 if (g4x_compute_wm0(dev, 0,
4610 &sandybridge_display_wm_info, latency,
4611 &sandybridge_cursor_wm_info, latency,
4612 &plane_wm, &cursor_wm)) {
4613 val = I915_READ(WM0_PIPEA_ILK);
4614 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4615 I915_WRITE(WM0_PIPEA_ILK, val |
4616 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4617 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4618 " plane %d, " "cursor: %d\n",
4619 plane_wm, cursor_wm);
4620 enabled |= 1;
4621 }
4622
4623 if (g4x_compute_wm0(dev, 1,
4624 &sandybridge_display_wm_info, latency,
4625 &sandybridge_cursor_wm_info, latency,
4626 &plane_wm, &cursor_wm)) {
4627 val = I915_READ(WM0_PIPEB_ILK);
4628 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4629 I915_WRITE(WM0_PIPEB_ILK, val |
4630 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4631 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4632 " plane %d, cursor: %d\n",
4633 plane_wm, cursor_wm);
4634 enabled |= 2;
4635 }
4636
4637 /* IVB has 3 pipes */
4638 if (IS_IVYBRIDGE(dev) &&
4639 g4x_compute_wm0(dev, 2,
4640 &sandybridge_display_wm_info, latency,
4641 &sandybridge_cursor_wm_info, latency,
4642 &plane_wm, &cursor_wm)) {
4643 val = I915_READ(WM0_PIPEC_IVB);
4644 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4645 I915_WRITE(WM0_PIPEC_IVB, val |
4646 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4647 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4648 " plane %d, cursor: %d\n",
4649 plane_wm, cursor_wm);
4650 enabled |= 3;
4651 }
4652
4653 /*
4654 * Calculate and update the self-refresh watermark only when one
4655 * display plane is used.
4656 *
4657 * SNB support 3 levels of watermark.
4658 *
4659 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4660 * and disabled in the descending order
4661 *
4662 */
4663 I915_WRITE(WM3_LP_ILK, 0);
4664 I915_WRITE(WM2_LP_ILK, 0);
4665 I915_WRITE(WM1_LP_ILK, 0);
4666
4667 if (!single_plane_enabled(enabled) ||
4668 dev_priv->sprite_scaling_enabled)
4669 return;
4670 enabled = ffs(enabled) - 1;
4671
4672 /* WM1 */
4673 if (!ironlake_compute_srwm(dev, 1, enabled,
4674 SNB_READ_WM1_LATENCY() * 500,
4675 &sandybridge_display_srwm_info,
4676 &sandybridge_cursor_srwm_info,
4677 &fbc_wm, &plane_wm, &cursor_wm))
4678 return;
4679
4680 I915_WRITE(WM1_LP_ILK,
4681 WM1_LP_SR_EN |
4682 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4683 (fbc_wm << WM1_LP_FBC_SHIFT) |
4684 (plane_wm << WM1_LP_SR_SHIFT) |
4685 cursor_wm);
4686
4687 /* WM2 */
4688 if (!ironlake_compute_srwm(dev, 2, enabled,
4689 SNB_READ_WM2_LATENCY() * 500,
4690 &sandybridge_display_srwm_info,
4691 &sandybridge_cursor_srwm_info,
4692 &fbc_wm, &plane_wm, &cursor_wm))
4693 return;
4694
4695 I915_WRITE(WM2_LP_ILK,
4696 WM2_LP_EN |
4697 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4698 (fbc_wm << WM1_LP_FBC_SHIFT) |
4699 (plane_wm << WM1_LP_SR_SHIFT) |
4700 cursor_wm);
4701
4702 /* WM3 */
4703 if (!ironlake_compute_srwm(dev, 3, enabled,
4704 SNB_READ_WM3_LATENCY() * 500,
4705 &sandybridge_display_srwm_info,
4706 &sandybridge_cursor_srwm_info,
4707 &fbc_wm, &plane_wm, &cursor_wm))
4708 return;
4709
4710 I915_WRITE(WM3_LP_ILK,
4711 WM3_LP_EN |
4712 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4713 (fbc_wm << WM1_LP_FBC_SHIFT) |
4714 (plane_wm << WM1_LP_SR_SHIFT) |
4715 cursor_wm);
4716}
4717
4718static bool
4719sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4720 uint32_t sprite_width, int pixel_size,
4721 const struct intel_watermark_params *display,
4722 int display_latency_ns, int *sprite_wm)
4723{
4724 struct drm_crtc *crtc;
4725 int clock;
4726 int entries, tlb_miss;
4727
4728 crtc = intel_get_crtc_for_plane(dev, plane);
4729 if (crtc->fb == NULL || !crtc->enabled) {
4730 *sprite_wm = display->guard_size;
4731 return false;
4732 }
4733
4734 clock = crtc->mode.clock;
4735
4736 /* Use the small buffer method to calculate the sprite watermark */
4737 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4738 tlb_miss = display->fifo_size*display->cacheline_size -
4739 sprite_width * 8;
4740 if (tlb_miss > 0)
4741 entries += tlb_miss;
4742 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4743 *sprite_wm = entries + display->guard_size;
4744 if (*sprite_wm > (int)display->max_wm)
4745 *sprite_wm = display->max_wm;
4746
4747 return true;
4748}
4749
4750static bool
4751sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4752 uint32_t sprite_width, int pixel_size,
4753 const struct intel_watermark_params *display,
4754 int latency_ns, int *sprite_wm)
4755{
4756 struct drm_crtc *crtc;
4757 unsigned long line_time_us;
4758 int clock;
4759 int line_count, line_size;
4760 int small, large;
4761 int entries;
4762
4763 if (!latency_ns) {
4764 *sprite_wm = 0;
4765 return false;
4766 }
4767
4768 crtc = intel_get_crtc_for_plane(dev, plane);
4769 clock = crtc->mode.clock;
4770 if (!clock) {
4771 *sprite_wm = 0;
4772 return false;
4773 }
4774
4775 line_time_us = (sprite_width * 1000) / clock;
4776 if (!line_time_us) {
4777 *sprite_wm = 0;
4778 return false;
4779 }
4780
4781 line_count = (latency_ns / line_time_us + 1000) / 1000;
4782 line_size = sprite_width * pixel_size;
4783
4784 /* Use the minimum of the small and large buffer method for primary */
4785 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4786 large = line_count * line_size;
4787
4788 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4789 *sprite_wm = entries + display->guard_size;
4790
4791 return *sprite_wm > 0x3ff ? false : true;
4792}
4793
4794static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4795 uint32_t sprite_width, int pixel_size)
4796{
4797 struct drm_i915_private *dev_priv = dev->dev_private;
4798 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4799 u32 val;
4800 int sprite_wm, reg;
4801 int ret;
4802
4803 switch (pipe) {
4804 case 0:
4805 reg = WM0_PIPEA_ILK;
4806 break;
4807 case 1:
4808 reg = WM0_PIPEB_ILK;
4809 break;
4810 case 2:
4811 reg = WM0_PIPEC_IVB;
4812 break;
4813 default:
4814 return; /* bad pipe */
4815 }
4816
4817 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4818 &sandybridge_display_wm_info,
4819 latency, &sprite_wm);
4820 if (!ret) {
4821 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4822 pipe);
4823 return;
4824 }
4825
4826 val = I915_READ(reg);
4827 val &= ~WM0_PIPE_SPRITE_MASK;
4828 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4829 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4830
4831
4832 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4833 pixel_size,
4834 &sandybridge_display_srwm_info,
4835 SNB_READ_WM1_LATENCY() * 500,
4836 &sprite_wm);
4837 if (!ret) {
4838 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4839 pipe);
4840 return;
4841 }
4842 I915_WRITE(WM1S_LP_ILK, sprite_wm);
4843
4844 /* Only IVB has two more LP watermarks for sprite */
4845 if (!IS_IVYBRIDGE(dev))
4846 return;
4847
4848 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4849 pixel_size,
4850 &sandybridge_display_srwm_info,
4851 SNB_READ_WM2_LATENCY() * 500,
4852 &sprite_wm);
4853 if (!ret) {
4854 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4855 pipe);
4856 return;
4857 }
4858 I915_WRITE(WM2S_LP_IVB, sprite_wm);
4859
4860 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4861 pixel_size,
4862 &sandybridge_display_srwm_info,
4863 SNB_READ_WM3_LATENCY() * 500,
4864 &sprite_wm);
4865 if (!ret) {
4866 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4867 pipe);
4868 return;
4869 }
4870 I915_WRITE(WM3S_LP_IVB, sprite_wm);
4871}
4872
4873/**
4874 * intel_update_watermarks - update FIFO watermark values based on current modes
4875 *
4876 * Calculate watermark values for the various WM regs based on current mode
4877 * and plane configuration.
4878 *
4879 * There are several cases to deal with here:
4880 * - normal (i.e. non-self-refresh)
4881 * - self-refresh (SR) mode
4882 * - lines are large relative to FIFO size (buffer can hold up to 2)
4883 * - lines are small relative to FIFO size (buffer can hold more than 2
4884 * lines), so need to account for TLB latency
4885 *
4886 * The normal calculation is:
4887 * watermark = dotclock * bytes per pixel * latency
4888 * where latency is platform & configuration dependent (we assume pessimal
4889 * values here).
4890 *
4891 * The SR calculation is:
4892 * watermark = (trunc(latency/line time)+1) * surface width *
4893 * bytes per pixel
4894 * where
4895 * line time = htotal / dotclock
4896 * surface width = hdisplay for normal plane and 64 for cursor
4897 * and latency is assumed to be high, as above.
4898 *
4899 * The final value programmed to the register should always be rounded up,
4900 * and include an extra 2 entries to account for clock crossings.
4901 *
4902 * We don't use the sprite, so we can ignore that. And on Crestline we have
4903 * to set the non-SR watermarks to 8.
4904 */
4905static void intel_update_watermarks(struct drm_device *dev)
4906{
4907 struct drm_i915_private *dev_priv = dev->dev_private;
4908
4909 if (dev_priv->display.update_wm)
4910 dev_priv->display.update_wm(dev);
4911}
4912
4913void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4914 uint32_t sprite_width, int pixel_size)
4915{
4916 struct drm_i915_private *dev_priv = dev->dev_private;
4917
4918 if (dev_priv->display.update_sprite_wm)
4919 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4920 pixel_size);
4921}
4922
4923static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3512static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4924{ 3513{
4925 if (i915_panel_use_ssc >= 0) 3514 if (i915_panel_use_ssc >= 0)
@@ -5143,6 +3732,222 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5143 } 3732 }
5144} 3733}
5145 3734
3735static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3736 struct drm_display_mode *adjusted_mode)
3737{
3738 struct drm_device *dev = crtc->dev;
3739 struct drm_i915_private *dev_priv = dev->dev_private;
3740 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3741 int pipe = intel_crtc->pipe;
3742 u32 temp;
3743
3744 temp = I915_READ(LVDS);
3745 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3746 if (pipe == 1) {
3747 temp |= LVDS_PIPEB_SELECT;
3748 } else {
3749 temp &= ~LVDS_PIPEB_SELECT;
3750 }
3751 /* set the corresponsding LVDS_BORDER bit */
3752 temp |= dev_priv->lvds_border_bits;
3753 /* Set the B0-B3 data pairs corresponding to whether we're going to
3754 * set the DPLLs for dual-channel mode or not.
3755 */
3756 if (clock->p2 == 7)
3757 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3758 else
3759 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3760
3761 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3762 * appropriately here, but we need to look more thoroughly into how
3763 * panels behave in the two modes.
3764 */
3765 /* set the dithering flag on LVDS as needed */
3766 if (INTEL_INFO(dev)->gen >= 4) {
3767 if (dev_priv->lvds_dither)
3768 temp |= LVDS_ENABLE_DITHER;
3769 else
3770 temp &= ~LVDS_ENABLE_DITHER;
3771 }
3772 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3773 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3774 temp |= LVDS_HSYNC_POLARITY;
3775 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3776 temp |= LVDS_VSYNC_POLARITY;
3777 I915_WRITE(LVDS, temp);
3778}
3779
3780static void i9xx_update_pll(struct drm_crtc *crtc,
3781 struct drm_display_mode *mode,
3782 struct drm_display_mode *adjusted_mode,
3783 intel_clock_t *clock, intel_clock_t *reduced_clock,
3784 int num_connectors)
3785{
3786 struct drm_device *dev = crtc->dev;
3787 struct drm_i915_private *dev_priv = dev->dev_private;
3788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3789 int pipe = intel_crtc->pipe;
3790 u32 dpll;
3791 bool is_sdvo;
3792
3793 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3794 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3795
3796 dpll = DPLL_VGA_MODE_DIS;
3797
3798 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3799 dpll |= DPLLB_MODE_LVDS;
3800 else
3801 dpll |= DPLLB_MODE_DAC_SERIAL;
3802 if (is_sdvo) {
3803 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3804 if (pixel_multiplier > 1) {
3805 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3806 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3807 }
3808 dpll |= DPLL_DVO_HIGH_SPEED;
3809 }
3810 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3811 dpll |= DPLL_DVO_HIGH_SPEED;
3812
3813 /* compute bitmask from p1 value */
3814 if (IS_PINEVIEW(dev))
3815 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3816 else {
3817 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3818 if (IS_G4X(dev) && reduced_clock)
3819 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3820 }
3821 switch (clock->p2) {
3822 case 5:
3823 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3824 break;
3825 case 7:
3826 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3827 break;
3828 case 10:
3829 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3830 break;
3831 case 14:
3832 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3833 break;
3834 }
3835 if (INTEL_INFO(dev)->gen >= 4)
3836 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3837
3838 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3839 dpll |= PLL_REF_INPUT_TVCLKINBC;
3840 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3841 /* XXX: just matching BIOS for now */
3842 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3843 dpll |= 3;
3844 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3845 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3846 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3847 else
3848 dpll |= PLL_REF_INPUT_DREFCLK;
3849
3850 dpll |= DPLL_VCO_ENABLE;
3851 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3852 POSTING_READ(DPLL(pipe));
3853 udelay(150);
3854
3855 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3856 * This is an exception to the general rule that mode_set doesn't turn
3857 * things on.
3858 */
3859 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3860 intel_update_lvds(crtc, clock, adjusted_mode);
3861
3862 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3863 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3864
3865 I915_WRITE(DPLL(pipe), dpll);
3866
3867 /* Wait for the clocks to stabilize. */
3868 POSTING_READ(DPLL(pipe));
3869 udelay(150);
3870
3871 if (INTEL_INFO(dev)->gen >= 4) {
3872 u32 temp = 0;
3873 if (is_sdvo) {
3874 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3875 if (temp > 1)
3876 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3877 else
3878 temp = 0;
3879 }
3880 I915_WRITE(DPLL_MD(pipe), temp);
3881 } else {
3882 /* The pixel multiplier can only be updated once the
3883 * DPLL is enabled and the clocks are stable.
3884 *
3885 * So write it again.
3886 */
3887 I915_WRITE(DPLL(pipe), dpll);
3888 }
3889}
3890
3891static void i8xx_update_pll(struct drm_crtc *crtc,
3892 struct drm_display_mode *adjusted_mode,
3893 intel_clock_t *clock,
3894 int num_connectors)
3895{
3896 struct drm_device *dev = crtc->dev;
3897 struct drm_i915_private *dev_priv = dev->dev_private;
3898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3899 int pipe = intel_crtc->pipe;
3900 u32 dpll;
3901
3902 dpll = DPLL_VGA_MODE_DIS;
3903
3904 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3905 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3906 } else {
3907 if (clock->p1 == 2)
3908 dpll |= PLL_P1_DIVIDE_BY_TWO;
3909 else
3910 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3911 if (clock->p2 == 4)
3912 dpll |= PLL_P2_DIVIDE_BY_4;
3913 }
3914
3915 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3916 /* XXX: just matching BIOS for now */
3917 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3918 dpll |= 3;
3919 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3920 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3921 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3922 else
3923 dpll |= PLL_REF_INPUT_DREFCLK;
3924
3925 dpll |= DPLL_VCO_ENABLE;
3926 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3927 POSTING_READ(DPLL(pipe));
3928 udelay(150);
3929
3930 I915_WRITE(DPLL(pipe), dpll);
3931
3932 /* Wait for the clocks to stabilize. */
3933 POSTING_READ(DPLL(pipe));
3934 udelay(150);
3935
3936 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3937 * This is an exception to the general rule that mode_set doesn't turn
3938 * things on.
3939 */
3940 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3941 intel_update_lvds(crtc, clock, adjusted_mode);
3942
3943 /* The pixel multiplier can only be updated once the
3944 * DPLL is enabled and the clocks are stable.
3945 *
3946 * So write it again.
3947 */
3948 I915_WRITE(DPLL(pipe), dpll);
3949}
3950
5146static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 3951static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5147 struct drm_display_mode *mode, 3952 struct drm_display_mode *mode,
5148 struct drm_display_mode *adjusted_mode, 3953 struct drm_display_mode *adjusted_mode,
@@ -5156,15 +3961,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5156 int plane = intel_crtc->plane; 3961 int plane = intel_crtc->plane;
5157 int refclk, num_connectors = 0; 3962 int refclk, num_connectors = 0;
5158 intel_clock_t clock, reduced_clock; 3963 intel_clock_t clock, reduced_clock;
5159 u32 dpll, dspcntr, pipeconf, vsyncshift; 3964 u32 dspcntr, pipeconf, vsyncshift;
5160 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3965 bool ok, has_reduced_clock = false, is_sdvo = false;
5161 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3966 bool is_lvds = false, is_tv = false, is_dp = false;
5162 struct drm_mode_config *mode_config = &dev->mode_config; 3967 struct drm_mode_config *mode_config = &dev->mode_config;
5163 struct intel_encoder *encoder; 3968 struct intel_encoder *encoder;
5164 const intel_limit_t *limit; 3969 const intel_limit_t *limit;
5165 int ret; 3970 int ret;
5166 u32 temp;
5167 u32 lvds_sync = 0;
5168 3971
5169 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3972 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5170 if (encoder->base.crtc != crtc) 3973 if (encoder->base.crtc != crtc)
@@ -5180,15 +3983,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5180 if (encoder->needs_tv_clock) 3983 if (encoder->needs_tv_clock)
5181 is_tv = true; 3984 is_tv = true;
5182 break; 3985 break;
5183 case INTEL_OUTPUT_DVO:
5184 is_dvo = true;
5185 break;
5186 case INTEL_OUTPUT_TVOUT: 3986 case INTEL_OUTPUT_TVOUT:
5187 is_tv = true; 3987 is_tv = true;
5188 break; 3988 break;
5189 case INTEL_OUTPUT_ANALOG:
5190 is_crt = true;
5191 break;
5192 case INTEL_OUTPUT_DISPLAYPORT: 3989 case INTEL_OUTPUT_DISPLAYPORT:
5193 is_dp = true; 3990 is_dp = true;
5194 break; 3991 break;
@@ -5235,71 +4032,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5235 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? 4032 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5236 &reduced_clock : NULL); 4033 &reduced_clock : NULL);
5237 4034
5238 dpll = DPLL_VGA_MODE_DIS; 4035 if (IS_GEN2(dev))
5239 4036 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
5240 if (!IS_GEN2(dev)) {
5241 if (is_lvds)
5242 dpll |= DPLLB_MODE_LVDS;
5243 else
5244 dpll |= DPLLB_MODE_DAC_SERIAL;
5245 if (is_sdvo) {
5246 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5247 if (pixel_multiplier > 1) {
5248 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5249 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5250 }
5251 dpll |= DPLL_DVO_HIGH_SPEED;
5252 }
5253 if (is_dp)
5254 dpll |= DPLL_DVO_HIGH_SPEED;
5255
5256 /* compute bitmask from p1 value */
5257 if (IS_PINEVIEW(dev))
5258 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5259 else {
5260 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5261 if (IS_G4X(dev) && has_reduced_clock)
5262 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5263 }
5264 switch (clock.p2) {
5265 case 5:
5266 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5267 break;
5268 case 7:
5269 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5270 break;
5271 case 10:
5272 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5273 break;
5274 case 14:
5275 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5276 break;
5277 }
5278 if (INTEL_INFO(dev)->gen >= 4)
5279 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5280 } else {
5281 if (is_lvds) {
5282 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5283 } else {
5284 if (clock.p1 == 2)
5285 dpll |= PLL_P1_DIVIDE_BY_TWO;
5286 else
5287 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5288 if (clock.p2 == 4)
5289 dpll |= PLL_P2_DIVIDE_BY_4;
5290 }
5291 }
5292
5293 if (is_sdvo && is_tv)
5294 dpll |= PLL_REF_INPUT_TVCLKINBC;
5295 else if (is_tv)
5296 /* XXX: just matching BIOS for now */
5297 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5298 dpll |= 3;
5299 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5300 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5301 else 4037 else
5302 dpll |= PLL_REF_INPUT_DREFCLK; 4038 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4039 has_reduced_clock ? &reduced_clock : NULL,
4040 num_connectors);
5303 4041
5304 /* setup pipeconf */ 4042 /* setup pipeconf */
5305 pipeconf = I915_READ(PIPECONF(pipe)); 4043 pipeconf = I915_READ(PIPECONF(pipe));
@@ -5336,97 +4074,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5336 } 4074 }
5337 } 4075 }
5338 4076
5339 dpll |= DPLL_VCO_ENABLE;
5340
5341 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4077 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5342 drm_mode_debug_printmodeline(mode); 4078 drm_mode_debug_printmodeline(mode);
5343 4079
5344 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5345
5346 POSTING_READ(DPLL(pipe));
5347 udelay(150);
5348
5349 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5350 * This is an exception to the general rule that mode_set doesn't turn
5351 * things on.
5352 */
5353 if (is_lvds) {
5354 temp = I915_READ(LVDS);
5355 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5356 if (pipe == 1) {
5357 temp |= LVDS_PIPEB_SELECT;
5358 } else {
5359 temp &= ~LVDS_PIPEB_SELECT;
5360 }
5361 /* set the corresponsding LVDS_BORDER bit */
5362 temp |= dev_priv->lvds_border_bits;
5363 /* Set the B0-B3 data pairs corresponding to whether we're going to
5364 * set the DPLLs for dual-channel mode or not.
5365 */
5366 if (clock.p2 == 7)
5367 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5368 else
5369 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5370
5371 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5372 * appropriately here, but we need to look more thoroughly into how
5373 * panels behave in the two modes.
5374 */
5375 /* set the dithering flag on LVDS as needed */
5376 if (INTEL_INFO(dev)->gen >= 4) {
5377 if (dev_priv->lvds_dither)
5378 temp |= LVDS_ENABLE_DITHER;
5379 else
5380 temp &= ~LVDS_ENABLE_DITHER;
5381 }
5382 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5383 lvds_sync |= LVDS_HSYNC_POLARITY;
5384 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5385 lvds_sync |= LVDS_VSYNC_POLARITY;
5386 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5387 != lvds_sync) {
5388 char flags[2] = "-+";
5389 DRM_INFO("Changing LVDS panel from "
5390 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5391 flags[!(temp & LVDS_HSYNC_POLARITY)],
5392 flags[!(temp & LVDS_VSYNC_POLARITY)],
5393 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5394 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5395 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5396 temp |= lvds_sync;
5397 }
5398 I915_WRITE(LVDS, temp);
5399 }
5400
5401 if (is_dp) {
5402 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5403 }
5404
5405 I915_WRITE(DPLL(pipe), dpll);
5406
5407 /* Wait for the clocks to stabilize. */
5408 POSTING_READ(DPLL(pipe));
5409 udelay(150);
5410
5411 if (INTEL_INFO(dev)->gen >= 4) {
5412 temp = 0;
5413 if (is_sdvo) {
5414 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5415 if (temp > 1)
5416 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5417 else
5418 temp = 0;
5419 }
5420 I915_WRITE(DPLL_MD(pipe), temp);
5421 } else {
5422 /* The pixel multiplier can only be updated once the
5423 * DPLL is enabled and the clocks are stable.
5424 *
5425 * So write it again.
5426 */
5427 I915_WRITE(DPLL(pipe), dpll);
5428 }
5429
5430 if (HAS_PIPE_CXSR(dev)) { 4080 if (HAS_PIPE_CXSR(dev)) {
5431 if (intel_crtc->lowfreq_avail) { 4081 if (intel_crtc->lowfreq_avail) {
5432 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4082 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5492,7 +4142,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5492 4142
5493 I915_WRITE(DSPCNTR(plane), dspcntr); 4143 I915_WRITE(DSPCNTR(plane), dspcntr);
5494 POSTING_READ(DSPCNTR(plane)); 4144 POSTING_READ(DSPCNTR(plane));
5495 intel_enable_plane(dev_priv, plane, pipe);
5496 4145
5497 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4146 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5498 4147
@@ -5668,17 +4317,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5668 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4317 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5669 bool ok, has_reduced_clock = false, is_sdvo = false; 4318 bool ok, has_reduced_clock = false, is_sdvo = false;
5670 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4319 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5671 struct intel_encoder *has_edp_encoder = NULL;
5672 struct drm_mode_config *mode_config = &dev->mode_config; 4320 struct drm_mode_config *mode_config = &dev->mode_config;
5673 struct intel_encoder *encoder; 4321 struct intel_encoder *encoder, *edp_encoder = NULL;
5674 const intel_limit_t *limit; 4322 const intel_limit_t *limit;
5675 int ret; 4323 int ret;
5676 struct fdi_m_n m_n = {0}; 4324 struct fdi_m_n m_n = {0};
5677 u32 temp; 4325 u32 temp;
5678 u32 lvds_sync = 0;
5679 int target_clock, pixel_multiplier, lane, link_bw, factor; 4326 int target_clock, pixel_multiplier, lane, link_bw, factor;
5680 unsigned int pipe_bpp; 4327 unsigned int pipe_bpp;
5681 bool dither; 4328 bool dither;
4329 bool is_cpu_edp = false, is_pch_edp = false;
5682 4330
5683 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4331 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5684 if (encoder->base.crtc != crtc) 4332 if (encoder->base.crtc != crtc)
@@ -5704,7 +4352,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5704 is_dp = true; 4352 is_dp = true;
5705 break; 4353 break;
5706 case INTEL_OUTPUT_EDP: 4354 case INTEL_OUTPUT_EDP:
5707 has_edp_encoder = encoder; 4355 is_dp = true;
4356 if (intel_encoder_is_pch_edp(&encoder->base))
4357 is_pch_edp = true;
4358 else
4359 is_cpu_edp = true;
4360 edp_encoder = encoder;
5708 break; 4361 break;
5709 } 4362 }
5710 4363
@@ -5767,15 +4420,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5767 lane = 0; 4420 lane = 0;
5768 /* CPU eDP doesn't require FDI link, so just set DP M/N 4421 /* CPU eDP doesn't require FDI link, so just set DP M/N
5769 according to current link config */ 4422 according to current link config */
5770 if (has_edp_encoder && 4423 if (is_cpu_edp) {
5771 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5772 target_clock = mode->clock; 4424 target_clock = mode->clock;
5773 intel_edp_link_config(has_edp_encoder, 4425 intel_edp_link_config(edp_encoder, &lane, &link_bw);
5774 &lane, &link_bw);
5775 } else { 4426 } else {
5776 /* [e]DP over FDI requires target mode clock 4427 /* [e]DP over FDI requires target mode clock
5777 instead of link clock */ 4428 instead of link clock */
5778 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4429 if (is_dp)
5779 target_clock = mode->clock; 4430 target_clock = mode->clock;
5780 else 4431 else
5781 target_clock = adjusted_mode->clock; 4432 target_clock = adjusted_mode->clock;
@@ -5866,7 +4517,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5866 } 4517 }
5867 dpll |= DPLL_DVO_HIGH_SPEED; 4518 dpll |= DPLL_DVO_HIGH_SPEED;
5868 } 4519 }
5869 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4520 if (is_dp && !is_cpu_edp)
5870 dpll |= DPLL_DVO_HIGH_SPEED; 4521 dpll |= DPLL_DVO_HIGH_SPEED;
5871 4522
5872 /* compute bitmask from p1 value */ 4523 /* compute bitmask from p1 value */
@@ -5909,30 +4560,22 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5909 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4560 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5910 drm_mode_debug_printmodeline(mode); 4561 drm_mode_debug_printmodeline(mode);
5911 4562
5912 /* PCH eDP needs FDI, but CPU eDP does not */ 4563 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
5913 if (!intel_crtc->no_pll) { 4564 * pre-Haswell/LPT generation */
5914 if (!has_edp_encoder || 4565 if (HAS_PCH_LPT(dev)) {
5915 intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4566 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
5916 I915_WRITE(PCH_FP0(pipe), fp); 4567 pipe);
5917 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4568 } else if (!is_cpu_edp) {
5918 4569 struct intel_pch_pll *pll;
5919 POSTING_READ(PCH_DPLL(pipe)); 4570
5920 udelay(150); 4571 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5921 } 4572 if (pll == NULL) {
5922 } else { 4573 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5923 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && 4574 pipe);
5924 fp == I915_READ(PCH_FP0(0))) {
5925 intel_crtc->use_pll_a = true;
5926 DRM_DEBUG_KMS("using pipe a dpll\n");
5927 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5928 fp == I915_READ(PCH_FP0(1))) {
5929 intel_crtc->use_pll_a = false;
5930 DRM_DEBUG_KMS("using pipe b dpll\n");
5931 } else {
5932 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5933 return -EINVAL; 4575 return -EINVAL;
5934 } 4576 }
5935 } 4577 } else
4578 intel_put_pch_pll(intel_crtc);
5936 4579
5937 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4580 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5938 * This is an exception to the general rule that mode_set doesn't turn 4581 * This is an exception to the general rule that mode_set doesn't turn
@@ -5965,22 +4608,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5965 * appropriately here, but we need to look more thoroughly into how 4608 * appropriately here, but we need to look more thoroughly into how
5966 * panels behave in the two modes. 4609 * panels behave in the two modes.
5967 */ 4610 */
4611 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5968 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4612 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5969 lvds_sync |= LVDS_HSYNC_POLARITY; 4613 temp |= LVDS_HSYNC_POLARITY;
5970 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4614 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5971 lvds_sync |= LVDS_VSYNC_POLARITY; 4615 temp |= LVDS_VSYNC_POLARITY;
5972 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5973 != lvds_sync) {
5974 char flags[2] = "-+";
5975 DRM_INFO("Changing LVDS panel from "
5976 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5977 flags[!(temp & LVDS_HSYNC_POLARITY)],
5978 flags[!(temp & LVDS_VSYNC_POLARITY)],
5979 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5980 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5981 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5982 temp |= lvds_sync;
5983 }
5984 I915_WRITE(PCH_LVDS, temp); 4616 I915_WRITE(PCH_LVDS, temp);
5985 } 4617 }
5986 4618
@@ -5990,7 +4622,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5990 pipeconf |= PIPECONF_DITHER_EN; 4622 pipeconf |= PIPECONF_DITHER_EN;
5991 pipeconf |= PIPECONF_DITHER_TYPE_SP; 4623 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5992 } 4624 }
5993 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4625 if (is_dp && !is_cpu_edp) {
5994 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4626 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5995 } else { 4627 } else {
5996 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4628 /* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6000,13 +4632,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6000 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4632 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6001 } 4633 }
6002 4634
6003 if (!intel_crtc->no_pll && 4635 if (intel_crtc->pch_pll) {
6004 (!has_edp_encoder || 4636 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
6005 intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6006 I915_WRITE(PCH_DPLL(pipe), dpll);
6007 4637
6008 /* Wait for the clocks to stabilize. */ 4638 /* Wait for the clocks to stabilize. */
6009 POSTING_READ(PCH_DPLL(pipe)); 4639 POSTING_READ(intel_crtc->pch_pll->pll_reg);
6010 udelay(150); 4640 udelay(150);
6011 4641
6012 /* The pixel multiplier can only be updated once the 4642 /* The pixel multiplier can only be updated once the
@@ -6014,20 +4644,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6014 * 4644 *
6015 * So write it again. 4645 * So write it again.
6016 */ 4646 */
6017 I915_WRITE(PCH_DPLL(pipe), dpll); 4647 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
6018 } 4648 }
6019 4649
6020 intel_crtc->lowfreq_avail = false; 4650 intel_crtc->lowfreq_avail = false;
6021 if (!intel_crtc->no_pll) { 4651 if (intel_crtc->pch_pll) {
6022 if (is_lvds && has_reduced_clock && i915_powersave) { 4652 if (is_lvds && has_reduced_clock && i915_powersave) {
6023 I915_WRITE(PCH_FP1(pipe), fp2); 4653 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
6024 intel_crtc->lowfreq_avail = true; 4654 intel_crtc->lowfreq_avail = true;
6025 if (HAS_PIPE_CXSR(dev)) { 4655 if (HAS_PIPE_CXSR(dev)) {
6026 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4656 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6027 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4657 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6028 } 4658 }
6029 } else { 4659 } else {
6030 I915_WRITE(PCH_FP1(pipe), fp); 4660 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
6031 if (HAS_PIPE_CXSR(dev)) { 4661 if (HAS_PIPE_CXSR(dev)) {
6032 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4662 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6033 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4663 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6080,10 +4710,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6080 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 4710 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6081 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 4711 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6082 4712
6083 if (has_edp_encoder && 4713 if (is_cpu_edp)
6084 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6085 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4714 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6086 }
6087 4715
6088 I915_WRITE(PIPECONF(pipe), pipeconf); 4716 I915_WRITE(PIPECONF(pipe), pipeconf);
6089 POSTING_READ(PIPECONF(pipe)); 4717 POSTING_READ(PIPECONF(pipe));
@@ -6097,6 +4725,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6097 4725
6098 intel_update_watermarks(dev); 4726 intel_update_watermarks(dev);
6099 4727
4728 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4729
6100 return ret; 4730 return ret;
6101} 4731}
6102 4732
@@ -6451,7 +5081,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6451 if (!visible && !intel_crtc->cursor_visible) 5081 if (!visible && !intel_crtc->cursor_visible)
6452 return; 5082 return;
6453 5083
6454 if (IS_IVYBRIDGE(dev)) { 5084 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6455 I915_WRITE(CURPOS_IVB(pipe), pos); 5085 I915_WRITE(CURPOS_IVB(pipe), pos);
6456 ivb_update_cursor(crtc, base); 5086 ivb_update_cursor(crtc, base);
6457 } else { 5087 } else {
@@ -6461,9 +5091,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6461 else 5091 else
6462 i9xx_update_cursor(crtc, base); 5092 i9xx_update_cursor(crtc, base);
6463 } 5093 }
6464
6465 if (visible)
6466 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6467} 5094}
6468 5095
6469static int intel_crtc_cursor_set(struct drm_crtc *crtc, 5096static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6987,7 +5614,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6987 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5614 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6988 5615
6989 drm_mode_set_name(mode); 5616 drm_mode_set_name(mode);
6990 drm_mode_set_crtcinfo(mode, 0);
6991 5617
6992 return mode; 5618 return mode;
6993} 5619}
@@ -7086,7 +5712,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7086 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5712 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7087 int pipe = intel_crtc->pipe; 5713 int pipe = intel_crtc->pipe;
7088 int dpll_reg = DPLL(pipe); 5714 int dpll_reg = DPLL(pipe);
7089 u32 dpll; 5715 int dpll;
7090 5716
7091 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5717 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7092 5718
@@ -7100,6 +5726,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7100 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 5726 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7101 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 5727 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7102 } 5728 }
5729
7103} 5730}
7104 5731
7105/** 5732/**
@@ -7158,12 +5785,16 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7158 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5785 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7159 return; 5786 return;
7160 5787
7161 if (!dev_priv->busy) 5788 if (!dev_priv->busy) {
5789 intel_sanitize_pm(dev);
7162 dev_priv->busy = true; 5790 dev_priv->busy = true;
7163 else 5791 } else
7164 mod_timer(&dev_priv->idle_timer, jiffies + 5792 mod_timer(&dev_priv->idle_timer, jiffies +
7165 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5793 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7166 5794
5795 if (obj == NULL)
5796 return;
5797
7167 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5798 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7168 if (!crtc->fb) 5799 if (!crtc->fb)
7169 continue; 5800 continue;
@@ -7336,18 +5967,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5967 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7337 unsigned long offset; 5968 unsigned long offset;
7338 u32 flip_mask; 5969 u32 flip_mask;
5970 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7339 int ret; 5971 int ret;
7340 5972
7341 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5973 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7342 if (ret) 5974 if (ret)
7343 goto out; 5975 goto err;
7344 5976
7345 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5977 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7346 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5978 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7347 5979
7348 ret = BEGIN_LP_RING(6); 5980 ret = intel_ring_begin(ring, 6);
7349 if (ret) 5981 if (ret)
7350 goto out; 5982 goto err_unpin;
7351 5983
7352 /* Can't queue multiple flips, so wait for the previous 5984 /* Can't queue multiple flips, so wait for the previous
7353 * one to finish before executing the next. 5985 * one to finish before executing the next.
@@ -7356,15 +5988,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7356 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5988 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7357 else 5989 else
7358 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5990 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7359 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5991 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7360 OUT_RING(MI_NOOP); 5992 intel_ring_emit(ring, MI_NOOP);
7361 OUT_RING(MI_DISPLAY_FLIP | 5993 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7362 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5994 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7363 OUT_RING(fb->pitches[0]); 5995 intel_ring_emit(ring, fb->pitches[0]);
7364 OUT_RING(obj->gtt_offset + offset); 5996 intel_ring_emit(ring, obj->gtt_offset + offset);
7365 OUT_RING(0); /* aux display base address, unused */ 5997 intel_ring_emit(ring, 0); /* aux display base address, unused */
7366 ADVANCE_LP_RING(); 5998 intel_ring_advance(ring);
7367out: 5999 return 0;
6000
6001err_unpin:
6002 intel_unpin_fb_obj(obj);
6003err:
7368 return ret; 6004 return ret;
7369} 6005}
7370 6006
@@ -7377,33 +6013,38 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6013 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7378 unsigned long offset; 6014 unsigned long offset;
7379 u32 flip_mask; 6015 u32 flip_mask;
6016 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7380 int ret; 6017 int ret;
7381 6018
7382 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 6019 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7383 if (ret) 6020 if (ret)
7384 goto out; 6021 goto err;
7385 6022
7386 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 6023 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7387 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 6024 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7388 6025
7389 ret = BEGIN_LP_RING(6); 6026 ret = intel_ring_begin(ring, 6);
7390 if (ret) 6027 if (ret)
7391 goto out; 6028 goto err_unpin;
7392 6029
7393 if (intel_crtc->plane) 6030 if (intel_crtc->plane)
7394 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 6031 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7395 else 6032 else
7396 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 6033 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7397 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 6034 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7398 OUT_RING(MI_NOOP); 6035 intel_ring_emit(ring, MI_NOOP);
7399 OUT_RING(MI_DISPLAY_FLIP_I915 | 6036 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7400 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6037 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7401 OUT_RING(fb->pitches[0]); 6038 intel_ring_emit(ring, fb->pitches[0]);
7402 OUT_RING(obj->gtt_offset + offset); 6039 intel_ring_emit(ring, obj->gtt_offset + offset);
7403 OUT_RING(MI_NOOP); 6040 intel_ring_emit(ring, MI_NOOP);
7404 6041
7405 ADVANCE_LP_RING(); 6042 intel_ring_advance(ring);
7406out: 6043 return 0;
6044
6045err_unpin:
6046 intel_unpin_fb_obj(obj);
6047err:
7407 return ret; 6048 return ret;
7408} 6049}
7409 6050
@@ -7415,24 +6056,25 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7415 struct drm_i915_private *dev_priv = dev->dev_private; 6056 struct drm_i915_private *dev_priv = dev->dev_private;
7416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6057 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7417 uint32_t pf, pipesrc; 6058 uint32_t pf, pipesrc;
6059 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7418 int ret; 6060 int ret;
7419 6061
7420 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 6062 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7421 if (ret) 6063 if (ret)
7422 goto out; 6064 goto err;
7423 6065
7424 ret = BEGIN_LP_RING(4); 6066 ret = intel_ring_begin(ring, 4);
7425 if (ret) 6067 if (ret)
7426 goto out; 6068 goto err_unpin;
7427 6069
7428 /* i965+ uses the linear or tiled offsets from the 6070 /* i965+ uses the linear or tiled offsets from the
7429 * Display Registers (which do not change across a page-flip) 6071 * Display Registers (which do not change across a page-flip)
7430 * so we need only reprogram the base address. 6072 * so we need only reprogram the base address.
7431 */ 6073 */
7432 OUT_RING(MI_DISPLAY_FLIP | 6074 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7433 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6075 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7434 OUT_RING(fb->pitches[0]); 6076 intel_ring_emit(ring, fb->pitches[0]);
7435 OUT_RING(obj->gtt_offset | obj->tiling_mode); 6077 intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
7436 6078
7437 /* XXX Enabling the panel-fitter across page-flip is so far 6079 /* XXX Enabling the panel-fitter across page-flip is so far
7438 * untested on non-native modes, so ignore it for now. 6080 * untested on non-native modes, so ignore it for now.
@@ -7440,9 +6082,13 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7440 */ 6082 */
7441 pf = 0; 6083 pf = 0;
7442 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 6084 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7443 OUT_RING(pf | pipesrc); 6085 intel_ring_emit(ring, pf | pipesrc);
7444 ADVANCE_LP_RING(); 6086 intel_ring_advance(ring);
7445out: 6087 return 0;
6088
6089err_unpin:
6090 intel_unpin_fb_obj(obj);
6091err:
7446 return ret; 6092 return ret;
7447} 6093}
7448 6094
@@ -7453,21 +6099,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7453{ 6099{
7454 struct drm_i915_private *dev_priv = dev->dev_private; 6100 struct drm_i915_private *dev_priv = dev->dev_private;
7455 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6101 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6102 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7456 uint32_t pf, pipesrc; 6103 uint32_t pf, pipesrc;
7457 int ret; 6104 int ret;
7458 6105
7459 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 6106 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7460 if (ret) 6107 if (ret)
7461 goto out; 6108 goto err;
7462 6109
7463 ret = BEGIN_LP_RING(4); 6110 ret = intel_ring_begin(ring, 4);
7464 if (ret) 6111 if (ret)
7465 goto out; 6112 goto err_unpin;
7466 6113
7467 OUT_RING(MI_DISPLAY_FLIP | 6114 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7468 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6115 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7469 OUT_RING(fb->pitches[0] | obj->tiling_mode); 6116 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7470 OUT_RING(obj->gtt_offset); 6117 intel_ring_emit(ring, obj->gtt_offset);
7471 6118
7472 /* Contrary to the suggestions in the documentation, 6119 /* Contrary to the suggestions in the documentation,
7473 * "Enable Panel Fitter" does not seem to be required when page 6120 * "Enable Panel Fitter" does not seem to be required when page
@@ -7477,9 +6124,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7477 */ 6124 */
7478 pf = 0; 6125 pf = 0;
7479 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 6126 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7480 OUT_RING(pf | pipesrc); 6127 intel_ring_emit(ring, pf | pipesrc);
7481 ADVANCE_LP_RING(); 6128 intel_ring_advance(ring);
7482out: 6129 return 0;
6130
6131err_unpin:
6132 intel_unpin_fb_obj(obj);
6133err:
7483 return ret; 6134 return ret;
7484} 6135}
7485 6136
@@ -7501,18 +6152,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7501 6152
7502 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6153 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7503 if (ret) 6154 if (ret)
7504 goto out; 6155 goto err;
7505 6156
7506 ret = intel_ring_begin(ring, 4); 6157 ret = intel_ring_begin(ring, 4);
7507 if (ret) 6158 if (ret)
7508 goto out; 6159 goto err_unpin;
7509 6160
7510 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 6161 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7511 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 6162 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7512 intel_ring_emit(ring, (obj->gtt_offset)); 6163 intel_ring_emit(ring, (obj->gtt_offset));
7513 intel_ring_emit(ring, (MI_NOOP)); 6164 intel_ring_emit(ring, (MI_NOOP));
7514 intel_ring_advance(ring); 6165 intel_ring_advance(ring);
7515out: 6166 return 0;
6167
6168err_unpin:
6169 intel_unpin_fb_obj(obj);
6170err:
7516 return ret; 6171 return ret;
7517} 6172}
7518 6173
@@ -7589,6 +6244,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7589 goto cleanup_pending; 6244 goto cleanup_pending;
7590 6245
7591 intel_disable_fbc(dev); 6246 intel_disable_fbc(dev);
6247 intel_mark_busy(dev, obj);
7592 mutex_unlock(&dev->struct_mutex); 6248 mutex_unlock(&dev->struct_mutex);
7593 6249
7594 trace_i915_flip_request(intel_crtc->plane, obj); 6250 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7617,10 +6273,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
7617{ 6273{
7618 struct drm_i915_private *dev_priv = dev->dev_private; 6274 struct drm_i915_private *dev_priv = dev->dev_private;
7619 u32 reg, val; 6275 u32 reg, val;
6276 int i;
7620 6277
7621 /* Clear any frame start delays used for debugging left by the BIOS */ 6278 /* Clear any frame start delays used for debugging left by the BIOS */
7622 for_each_pipe(pipe) { 6279 for_each_pipe(i) {
7623 reg = PIPECONF(pipe); 6280 reg = PIPECONF(i);
7624 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 6281 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7625 } 6282 }
7626 6283
@@ -7690,6 +6347,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
7690 .page_flip = intel_crtc_page_flip, 6347 .page_flip = intel_crtc_page_flip,
7691}; 6348};
7692 6349
6350static void intel_pch_pll_init(struct drm_device *dev)
6351{
6352 drm_i915_private_t *dev_priv = dev->dev_private;
6353 int i;
6354
6355 if (dev_priv->num_pch_pll == 0) {
6356 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6357 return;
6358 }
6359
6360 for (i = 0; i < dev_priv->num_pch_pll; i++) {
6361 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6362 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6363 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6364 }
6365}
6366
7693static void intel_crtc_init(struct drm_device *dev, int pipe) 6367static void intel_crtc_init(struct drm_device *dev, int pipe)
7694{ 6368{
7695 drm_i915_private_t *dev_priv = dev->dev_private; 6369 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7727,8 +6401,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
7727 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 6401 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7728 6402
7729 if (HAS_PCH_SPLIT(dev)) { 6403 if (HAS_PCH_SPLIT(dev)) {
7730 if (pipe == 2 && IS_IVYBRIDGE(dev))
7731 intel_crtc->no_pll = true;
7732 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6404 intel_helper_funcs.prepare = ironlake_crtc_prepare;
7733 intel_helper_funcs.commit = ironlake_crtc_commit; 6405 intel_helper_funcs.commit = ironlake_crtc_commit;
7734 } else { 6406 } else {
@@ -7747,15 +6419,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
7747int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 6419int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7748 struct drm_file *file) 6420 struct drm_file *file)
7749{ 6421{
7750 drm_i915_private_t *dev_priv = dev->dev_private;
7751 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 6422 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7752 struct drm_mode_object *drmmode_obj; 6423 struct drm_mode_object *drmmode_obj;
7753 struct intel_crtc *crtc; 6424 struct intel_crtc *crtc;
7754 6425
7755 if (!dev_priv) { 6426 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7756 DRM_ERROR("called with no initialization\n"); 6427 return -ENODEV;
7757 return -EINVAL;
7758 }
7759 6428
7760 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 6429 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7761 DRM_MODE_OBJECT_CRTC); 6430 DRM_MODE_OBJECT_CRTC);
@@ -7828,12 +6497,31 @@ static void intel_setup_outputs(struct drm_device *dev)
7828 6497
7829 intel_crt_init(dev); 6498 intel_crt_init(dev);
7830 6499
7831 if (HAS_PCH_SPLIT(dev)) { 6500 if (IS_HASWELL(dev)) {
6501 int found;
6502
6503 /* Haswell uses DDI functions to detect digital outputs */
6504 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6505 /* DDI A only supports eDP */
6506 if (found)
6507 intel_ddi_init(dev, PORT_A);
6508
6509 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6510 * register */
6511 found = I915_READ(SFUSE_STRAP);
6512
6513 if (found & SFUSE_STRAP_DDIB_DETECTED)
6514 intel_ddi_init(dev, PORT_B);
6515 if (found & SFUSE_STRAP_DDIC_DETECTED)
6516 intel_ddi_init(dev, PORT_C);
6517 if (found & SFUSE_STRAP_DDID_DETECTED)
6518 intel_ddi_init(dev, PORT_D);
6519 } else if (HAS_PCH_SPLIT(dev)) {
7832 int found; 6520 int found;
7833 6521
7834 if (I915_READ(HDMIB) & PORT_DETECTED) { 6522 if (I915_READ(HDMIB) & PORT_DETECTED) {
7835 /* PCH SDVOB multiplex with HDMIB */ 6523 /* PCH SDVOB multiplex with HDMIB */
7836 found = intel_sdvo_init(dev, PCH_SDVOB); 6524 found = intel_sdvo_init(dev, PCH_SDVOB, true);
7837 if (!found) 6525 if (!found)
7838 intel_hdmi_init(dev, HDMIB); 6526 intel_hdmi_init(dev, HDMIB);
7839 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 6527 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7857,7 +6545,7 @@ static void intel_setup_outputs(struct drm_device *dev)
7857 6545
7858 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6546 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7859 DRM_DEBUG_KMS("probing SDVOB\n"); 6547 DRM_DEBUG_KMS("probing SDVOB\n");
7860 found = intel_sdvo_init(dev, SDVOB); 6548 found = intel_sdvo_init(dev, SDVOB, true);
7861 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 6549 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7862 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 6550 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7863 intel_hdmi_init(dev, SDVOB); 6551 intel_hdmi_init(dev, SDVOB);
@@ -7873,7 +6561,7 @@ static void intel_setup_outputs(struct drm_device *dev)
7873 6561
7874 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6562 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7875 DRM_DEBUG_KMS("probing SDVOC\n"); 6563 DRM_DEBUG_KMS("probing SDVOC\n");
7876 found = intel_sdvo_init(dev, SDVOC); 6564 found = intel_sdvo_init(dev, SDVOC, false);
7877 } 6565 }
7878 6566
7879 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 6567 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8002,882 +6690,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
8002 .output_poll_changed = intel_fb_output_poll_changed, 6690 .output_poll_changed = intel_fb_output_poll_changed,
8003}; 6691};
8004 6692
8005static struct drm_i915_gem_object *
8006intel_alloc_context_page(struct drm_device *dev)
8007{
8008 struct drm_i915_gem_object *ctx;
8009 int ret;
8010
8011 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8012
8013 ctx = i915_gem_alloc_object(dev, 4096);
8014 if (!ctx) {
8015 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8016 return NULL;
8017 }
8018
8019 ret = i915_gem_object_pin(ctx, 4096, true);
8020 if (ret) {
8021 DRM_ERROR("failed to pin power context: %d\n", ret);
8022 goto err_unref;
8023 }
8024
8025 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8026 if (ret) {
8027 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8028 goto err_unpin;
8029 }
8030
8031 return ctx;
8032
8033err_unpin:
8034 i915_gem_object_unpin(ctx);
8035err_unref:
8036 drm_gem_object_unreference(&ctx->base);
8037 mutex_unlock(&dev->struct_mutex);
8038 return NULL;
8039}
8040
8041bool ironlake_set_drps(struct drm_device *dev, u8 val)
8042{
8043 struct drm_i915_private *dev_priv = dev->dev_private;
8044 u16 rgvswctl;
8045
8046 rgvswctl = I915_READ16(MEMSWCTL);
8047 if (rgvswctl & MEMCTL_CMD_STS) {
8048 DRM_DEBUG("gpu busy, RCS change rejected\n");
8049 return false; /* still busy with another command */
8050 }
8051
8052 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8053 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8054 I915_WRITE16(MEMSWCTL, rgvswctl);
8055 POSTING_READ16(MEMSWCTL);
8056
8057 rgvswctl |= MEMCTL_CMD_STS;
8058 I915_WRITE16(MEMSWCTL, rgvswctl);
8059
8060 return true;
8061}
8062
8063void ironlake_enable_drps(struct drm_device *dev)
8064{
8065 struct drm_i915_private *dev_priv = dev->dev_private;
8066 u32 rgvmodectl = I915_READ(MEMMODECTL);
8067 u8 fmax, fmin, fstart, vstart;
8068
8069 /* Enable temp reporting */
8070 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8071 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8072
8073 /* 100ms RC evaluation intervals */
8074 I915_WRITE(RCUPEI, 100000);
8075 I915_WRITE(RCDNEI, 100000);
8076
8077 /* Set max/min thresholds to 90ms and 80ms respectively */
8078 I915_WRITE(RCBMAXAVG, 90000);
8079 I915_WRITE(RCBMINAVG, 80000);
8080
8081 I915_WRITE(MEMIHYST, 1);
8082
8083 /* Set up min, max, and cur for interrupt handling */
8084 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8085 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8086 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8087 MEMMODE_FSTART_SHIFT;
8088
8089 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8090 PXVFREQ_PX_SHIFT;
8091
8092 dev_priv->fmax = fmax; /* IPS callback will increase this */
8093 dev_priv->fstart = fstart;
8094
8095 dev_priv->max_delay = fstart;
8096 dev_priv->min_delay = fmin;
8097 dev_priv->cur_delay = fstart;
8098
8099 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8100 fmax, fmin, fstart);
8101
8102 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8103
8104 /*
8105 * Interrupts will be enabled in ironlake_irq_postinstall
8106 */
8107
8108 I915_WRITE(VIDSTART, vstart);
8109 POSTING_READ(VIDSTART);
8110
8111 rgvmodectl |= MEMMODE_SWMODE_EN;
8112 I915_WRITE(MEMMODECTL, rgvmodectl);
8113
8114 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8115 DRM_ERROR("stuck trying to change perf mode\n");
8116 msleep(1);
8117
8118 ironlake_set_drps(dev, fstart);
8119
8120 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8121 I915_READ(0x112e0);
8122 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8123 dev_priv->last_count2 = I915_READ(0x112f4);
8124 getrawmonotonic(&dev_priv->last_time2);
8125}
8126
8127void ironlake_disable_drps(struct drm_device *dev)
8128{
8129 struct drm_i915_private *dev_priv = dev->dev_private;
8130 u16 rgvswctl = I915_READ16(MEMSWCTL);
8131
8132 /* Ack interrupts, disable EFC interrupt */
8133 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8134 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8135 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8136 I915_WRITE(DEIIR, DE_PCU_EVENT);
8137 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8138
8139 /* Go back to the starting frequency */
8140 ironlake_set_drps(dev, dev_priv->fstart);
8141 msleep(1);
8142 rgvswctl |= MEMCTL_CMD_STS;
8143 I915_WRITE(MEMSWCTL, rgvswctl);
8144 msleep(1);
8145
8146}
8147
8148void gen6_set_rps(struct drm_device *dev, u8 val)
8149{
8150 struct drm_i915_private *dev_priv = dev->dev_private;
8151 u32 swreq;
8152
8153 swreq = (val & 0x3ff) << 25;
8154 I915_WRITE(GEN6_RPNSWREQ, swreq);
8155}
8156
8157void gen6_disable_rps(struct drm_device *dev)
8158{
8159 struct drm_i915_private *dev_priv = dev->dev_private;
8160
8161 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8162 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8163 I915_WRITE(GEN6_PMIER, 0);
8164 /* Complete PM interrupt masking here doesn't race with the rps work
8165 * item again unmasking PM interrupts because that is using a different
8166 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8167 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8168
8169 spin_lock_irq(&dev_priv->rps_lock);
8170 dev_priv->pm_iir = 0;
8171 spin_unlock_irq(&dev_priv->rps_lock);
8172
8173 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8174}
8175
8176static unsigned long intel_pxfreq(u32 vidfreq)
8177{
8178 unsigned long freq;
8179 int div = (vidfreq & 0x3f0000) >> 16;
8180 int post = (vidfreq & 0x3000) >> 12;
8181 int pre = (vidfreq & 0x7);
8182
8183 if (!pre)
8184 return 0;
8185
8186 freq = ((div * 133333) / ((1<<post) * pre));
8187
8188 return freq;
8189}
8190
8191void intel_init_emon(struct drm_device *dev)
8192{
8193 struct drm_i915_private *dev_priv = dev->dev_private;
8194 u32 lcfuse;
8195 u8 pxw[16];
8196 int i;
8197
8198 /* Disable to program */
8199 I915_WRITE(ECR, 0);
8200 POSTING_READ(ECR);
8201
8202 /* Program energy weights for various events */
8203 I915_WRITE(SDEW, 0x15040d00);
8204 I915_WRITE(CSIEW0, 0x007f0000);
8205 I915_WRITE(CSIEW1, 0x1e220004);
8206 I915_WRITE(CSIEW2, 0x04000004);
8207
8208 for (i = 0; i < 5; i++)
8209 I915_WRITE(PEW + (i * 4), 0);
8210 for (i = 0; i < 3; i++)
8211 I915_WRITE(DEW + (i * 4), 0);
8212
8213 /* Program P-state weights to account for frequency power adjustment */
8214 for (i = 0; i < 16; i++) {
8215 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8216 unsigned long freq = intel_pxfreq(pxvidfreq);
8217 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8218 PXVFREQ_PX_SHIFT;
8219 unsigned long val;
8220
8221 val = vid * vid;
8222 val *= (freq / 1000);
8223 val *= 255;
8224 val /= (127*127*900);
8225 if (val > 0xff)
8226 DRM_ERROR("bad pxval: %ld\n", val);
8227 pxw[i] = val;
8228 }
8229 /* Render standby states get 0 weight */
8230 pxw[14] = 0;
8231 pxw[15] = 0;
8232
8233 for (i = 0; i < 4; i++) {
8234 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8235 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8236 I915_WRITE(PXW + (i * 4), val);
8237 }
8238
8239 /* Adjust magic regs to magic values (more experimental results) */
8240 I915_WRITE(OGW0, 0);
8241 I915_WRITE(OGW1, 0);
8242 I915_WRITE(EG0, 0x00007f00);
8243 I915_WRITE(EG1, 0x0000000e);
8244 I915_WRITE(EG2, 0x000e0000);
8245 I915_WRITE(EG3, 0x68000300);
8246 I915_WRITE(EG4, 0x42000000);
8247 I915_WRITE(EG5, 0x00140031);
8248 I915_WRITE(EG6, 0);
8249 I915_WRITE(EG7, 0);
8250
8251 for (i = 0; i < 8; i++)
8252 I915_WRITE(PXWL + (i * 4), 0);
8253
8254 /* Enable PMON + select events */
8255 I915_WRITE(ECR, 0x80000019);
8256
8257 lcfuse = I915_READ(LCFUSE02);
8258
8259 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8260}
8261
8262static int intel_enable_rc6(struct drm_device *dev)
8263{
8264 /*
8265 * Respect the kernel parameter if it is set
8266 */
8267 if (i915_enable_rc6 >= 0)
8268 return i915_enable_rc6;
8269
8270 /*
8271 * Disable RC6 on Ironlake
8272 */
8273 if (INTEL_INFO(dev)->gen == 5)
8274 return 0;
8275
8276 /*
8277 * Disable rc6 on Sandybridge
8278 */
8279 if (INTEL_INFO(dev)->gen == 6) {
8280 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8281 return INTEL_RC6_ENABLE;
8282 }
8283 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8284 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8285}
8286
8287void gen6_enable_rps(struct drm_i915_private *dev_priv)
8288{
8289 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8290 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8291 u32 pcu_mbox, rc6_mask = 0;
8292 u32 gtfifodbg;
8293 int cur_freq, min_freq, max_freq;
8294 int rc6_mode;
8295 int i;
8296
8297 /* Here begins a magic sequence of register writes to enable
8298 * auto-downclocking.
8299 *
8300 * Perhaps there might be some value in exposing these to
8301 * userspace...
8302 */
8303 I915_WRITE(GEN6_RC_STATE, 0);
8304 mutex_lock(&dev_priv->dev->struct_mutex);
8305
8306 /* Clear the DBG now so we don't confuse earlier errors */
8307 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8308 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8309 I915_WRITE(GTFIFODBG, gtfifodbg);
8310 }
8311
8312 gen6_gt_force_wake_get(dev_priv);
8313
8314 /* disable the counters and set deterministic thresholds */
8315 I915_WRITE(GEN6_RC_CONTROL, 0);
8316
8317 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8318 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8319 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8320 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8321 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8322
8323 for (i = 0; i < I915_NUM_RINGS; i++)
8324 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8325
8326 I915_WRITE(GEN6_RC_SLEEP, 0);
8327 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8328 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8329 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8330 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8331
8332 rc6_mode = intel_enable_rc6(dev_priv->dev);
8333 if (rc6_mode & INTEL_RC6_ENABLE)
8334 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8335
8336 if (rc6_mode & INTEL_RC6p_ENABLE)
8337 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8338
8339 if (rc6_mode & INTEL_RC6pp_ENABLE)
8340 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8341
8342 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8343 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8344 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8345 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8346
8347 I915_WRITE(GEN6_RC_CONTROL,
8348 rc6_mask |
8349 GEN6_RC_CTL_EI_MODE(1) |
8350 GEN6_RC_CTL_HW_ENABLE);
8351
8352 I915_WRITE(GEN6_RPNSWREQ,
8353 GEN6_FREQUENCY(10) |
8354 GEN6_OFFSET(0) |
8355 GEN6_AGGRESSIVE_TURBO);
8356 I915_WRITE(GEN6_RC_VIDEO_FREQ,
8357 GEN6_FREQUENCY(12));
8358
8359 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8360 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8361 18 << 24 |
8362 6 << 16);
8363 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8364 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8365 I915_WRITE(GEN6_RP_UP_EI, 100000);
8366 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8367 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8368 I915_WRITE(GEN6_RP_CONTROL,
8369 GEN6_RP_MEDIA_TURBO |
8370 GEN6_RP_MEDIA_HW_MODE |
8371 GEN6_RP_MEDIA_IS_GFX |
8372 GEN6_RP_ENABLE |
8373 GEN6_RP_UP_BUSY_AVG |
8374 GEN6_RP_DOWN_IDLE_CONT);
8375
8376 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8377 500))
8378 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8379
8380 I915_WRITE(GEN6_PCODE_DATA, 0);
8381 I915_WRITE(GEN6_PCODE_MAILBOX,
8382 GEN6_PCODE_READY |
8383 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8384 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8385 500))
8386 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8387
8388 min_freq = (rp_state_cap & 0xff0000) >> 16;
8389 max_freq = rp_state_cap & 0xff;
8390 cur_freq = (gt_perf_status & 0xff00) >> 8;
8391
8392 /* Check for overclock support */
8393 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8394 500))
8395 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8396 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8397 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8398 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8399 500))
8400 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8401 if (pcu_mbox & (1<<31)) { /* OC supported */
8402 max_freq = pcu_mbox & 0xff;
8403 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8404 }
8405
8406 /* In units of 100MHz */
8407 dev_priv->max_delay = max_freq;
8408 dev_priv->min_delay = min_freq;
8409 dev_priv->cur_delay = cur_freq;
8410
8411 /* requires MSI enabled */
8412 I915_WRITE(GEN6_PMIER,
8413 GEN6_PM_MBOX_EVENT |
8414 GEN6_PM_THERMAL_EVENT |
8415 GEN6_PM_RP_DOWN_TIMEOUT |
8416 GEN6_PM_RP_UP_THRESHOLD |
8417 GEN6_PM_RP_DOWN_THRESHOLD |
8418 GEN6_PM_RP_UP_EI_EXPIRED |
8419 GEN6_PM_RP_DOWN_EI_EXPIRED);
8420 spin_lock_irq(&dev_priv->rps_lock);
8421 WARN_ON(dev_priv->pm_iir != 0);
8422 I915_WRITE(GEN6_PMIMR, 0);
8423 spin_unlock_irq(&dev_priv->rps_lock);
8424 /* enable all PM interrupts */
8425 I915_WRITE(GEN6_PMINTRMSK, 0);
8426
8427 gen6_gt_force_wake_put(dev_priv);
8428 mutex_unlock(&dev_priv->dev->struct_mutex);
8429}
8430
8431void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8432{
8433 int min_freq = 15;
8434 int gpu_freq, ia_freq, max_ia_freq;
8435 int scaling_factor = 180;
8436
8437 max_ia_freq = cpufreq_quick_get_max(0);
8438 /*
8439 * Default to measured freq if none found, PCU will ensure we don't go
8440 * over
8441 */
8442 if (!max_ia_freq)
8443 max_ia_freq = tsc_khz;
8444
8445 /* Convert from kHz to MHz */
8446 max_ia_freq /= 1000;
8447
8448 mutex_lock(&dev_priv->dev->struct_mutex);
8449
8450 /*
8451 * For each potential GPU frequency, load a ring frequency we'd like
8452 * to use for memory access. We do this by specifying the IA frequency
8453 * the PCU should use as a reference to determine the ring frequency.
8454 */
8455 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8456 gpu_freq--) {
8457 int diff = dev_priv->max_delay - gpu_freq;
8458
8459 /*
8460 * For GPU frequencies less than 750MHz, just use the lowest
8461 * ring freq.
8462 */
8463 if (gpu_freq < min_freq)
8464 ia_freq = 800;
8465 else
8466 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8467 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8468
8469 I915_WRITE(GEN6_PCODE_DATA,
8470 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8471 gpu_freq);
8472 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8473 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8474 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8475 GEN6_PCODE_READY) == 0, 10)) {
8476 DRM_ERROR("pcode write of freq table timed out\n");
8477 continue;
8478 }
8479 }
8480
8481 mutex_unlock(&dev_priv->dev->struct_mutex);
8482}
8483
8484static void ironlake_init_clock_gating(struct drm_device *dev)
8485{
8486 struct drm_i915_private *dev_priv = dev->dev_private;
8487 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8488
8489 /* Required for FBC */
8490 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8491 DPFCRUNIT_CLOCK_GATE_DISABLE |
8492 DPFDUNIT_CLOCK_GATE_DISABLE;
8493 /* Required for CxSR */
8494 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8495
8496 I915_WRITE(PCH_3DCGDIS0,
8497 MARIUNIT_CLOCK_GATE_DISABLE |
8498 SVSMUNIT_CLOCK_GATE_DISABLE);
8499 I915_WRITE(PCH_3DCGDIS1,
8500 VFMUNIT_CLOCK_GATE_DISABLE);
8501
8502 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8503
8504 /*
8505 * According to the spec the following bits should be set in
8506 * order to enable memory self-refresh
8507 * The bit 22/21 of 0x42004
8508 * The bit 5 of 0x42020
8509 * The bit 15 of 0x45000
8510 */
8511 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8512 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8513 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8514 I915_WRITE(ILK_DSPCLK_GATE,
8515 (I915_READ(ILK_DSPCLK_GATE) |
8516 ILK_DPARB_CLK_GATE));
8517 I915_WRITE(DISP_ARB_CTL,
8518 (I915_READ(DISP_ARB_CTL) |
8519 DISP_FBC_WM_DIS));
8520 I915_WRITE(WM3_LP_ILK, 0);
8521 I915_WRITE(WM2_LP_ILK, 0);
8522 I915_WRITE(WM1_LP_ILK, 0);
8523
8524 /*
8525 * Based on the document from hardware guys the following bits
8526 * should be set unconditionally in order to enable FBC.
8527 * The bit 22 of 0x42000
8528 * The bit 22 of 0x42004
8529 * The bit 7,8,9 of 0x42020.
8530 */
8531 if (IS_IRONLAKE_M(dev)) {
8532 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8533 I915_READ(ILK_DISPLAY_CHICKEN1) |
8534 ILK_FBCQ_DIS);
8535 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8536 I915_READ(ILK_DISPLAY_CHICKEN2) |
8537 ILK_DPARB_GATE);
8538 I915_WRITE(ILK_DSPCLK_GATE,
8539 I915_READ(ILK_DSPCLK_GATE) |
8540 ILK_DPFC_DIS1 |
8541 ILK_DPFC_DIS2 |
8542 ILK_CLK_FBC);
8543 }
8544
8545 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8546 I915_READ(ILK_DISPLAY_CHICKEN2) |
8547 ILK_ELPIN_409_SELECT);
8548 I915_WRITE(_3D_CHICKEN2,
8549 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8550 _3D_CHICKEN2_WM_READ_PIPELINED);
8551}
8552
8553static void gen6_init_clock_gating(struct drm_device *dev)
8554{
8555 struct drm_i915_private *dev_priv = dev->dev_private;
8556 int pipe;
8557 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8558
8559 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8560
8561 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8562 I915_READ(ILK_DISPLAY_CHICKEN2) |
8563 ILK_ELPIN_409_SELECT);
8564
8565 I915_WRITE(WM3_LP_ILK, 0);
8566 I915_WRITE(WM2_LP_ILK, 0);
8567 I915_WRITE(WM1_LP_ILK, 0);
8568
8569 I915_WRITE(GEN6_UCGCTL1,
8570 I915_READ(GEN6_UCGCTL1) |
8571 GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8572
8573 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8574 * gating disable must be set. Failure to set it results in
8575 * flickering pixels due to Z write ordering failures after
8576 * some amount of runtime in the Mesa "fire" demo, and Unigine
8577 * Sanctuary and Tropics, and apparently anything else with
8578 * alpha test or pixel discard.
8579 *
8580 * According to the spec, bit 11 (RCCUNIT) must also be set,
8581 * but we didn't debug actual testcases to find it out.
8582 */
8583 I915_WRITE(GEN6_UCGCTL2,
8584 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8585 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8586
8587 /*
8588 * According to the spec the following bits should be
8589 * set in order to enable memory self-refresh and fbc:
8590 * The bit21 and bit22 of 0x42000
8591 * The bit21 and bit22 of 0x42004
8592 * The bit5 and bit7 of 0x42020
8593 * The bit14 of 0x70180
8594 * The bit14 of 0x71180
8595 */
8596 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8597 I915_READ(ILK_DISPLAY_CHICKEN1) |
8598 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8599 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8600 I915_READ(ILK_DISPLAY_CHICKEN2) |
8601 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8602 I915_WRITE(ILK_DSPCLK_GATE,
8603 I915_READ(ILK_DSPCLK_GATE) |
8604 ILK_DPARB_CLK_GATE |
8605 ILK_DPFD_CLK_GATE);
8606
8607 for_each_pipe(pipe) {
8608 I915_WRITE(DSPCNTR(pipe),
8609 I915_READ(DSPCNTR(pipe)) |
8610 DISPPLANE_TRICKLE_FEED_DISABLE);
8611 intel_flush_display_plane(dev_priv, pipe);
8612 }
8613}
8614
8615static void ivybridge_init_clock_gating(struct drm_device *dev)
8616{
8617 struct drm_i915_private *dev_priv = dev->dev_private;
8618 int pipe;
8619 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8620
8621 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8622
8623 I915_WRITE(WM3_LP_ILK, 0);
8624 I915_WRITE(WM2_LP_ILK, 0);
8625 I915_WRITE(WM1_LP_ILK, 0);
8626
8627 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8628 * This implements the WaDisableRCZUnitClockGating workaround.
8629 */
8630 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8631
8632 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8633
8634 I915_WRITE(IVB_CHICKEN3,
8635 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8636 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8637
8638 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8639 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8640 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8641
8642 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8643 I915_WRITE(GEN7_L3CNTLREG1,
8644 GEN7_WA_FOR_GEN7_L3_CONTROL);
8645 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8646 GEN7_WA_L3_CHICKEN_MODE);
8647
8648 /* This is required by WaCatErrorRejectionIssue */
8649 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8650 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8651 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8652
8653 for_each_pipe(pipe) {
8654 I915_WRITE(DSPCNTR(pipe),
8655 I915_READ(DSPCNTR(pipe)) |
8656 DISPPLANE_TRICKLE_FEED_DISABLE);
8657 intel_flush_display_plane(dev_priv, pipe);
8658 }
8659}
8660
8661static void g4x_init_clock_gating(struct drm_device *dev)
8662{
8663 struct drm_i915_private *dev_priv = dev->dev_private;
8664 uint32_t dspclk_gate;
8665
8666 I915_WRITE(RENCLK_GATE_D1, 0);
8667 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8668 GS_UNIT_CLOCK_GATE_DISABLE |
8669 CL_UNIT_CLOCK_GATE_DISABLE);
8670 I915_WRITE(RAMCLK_GATE_D, 0);
8671 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8672 OVRUNIT_CLOCK_GATE_DISABLE |
8673 OVCUNIT_CLOCK_GATE_DISABLE;
8674 if (IS_GM45(dev))
8675 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8676 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8677}
8678
8679static void crestline_init_clock_gating(struct drm_device *dev)
8680{
8681 struct drm_i915_private *dev_priv = dev->dev_private;
8682
8683 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8684 I915_WRITE(RENCLK_GATE_D2, 0);
8685 I915_WRITE(DSPCLK_GATE_D, 0);
8686 I915_WRITE(RAMCLK_GATE_D, 0);
8687 I915_WRITE16(DEUC, 0);
8688}
8689
8690static void broadwater_init_clock_gating(struct drm_device *dev)
8691{
8692 struct drm_i915_private *dev_priv = dev->dev_private;
8693
8694 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8695 I965_RCC_CLOCK_GATE_DISABLE |
8696 I965_RCPB_CLOCK_GATE_DISABLE |
8697 I965_ISC_CLOCK_GATE_DISABLE |
8698 I965_FBC_CLOCK_GATE_DISABLE);
8699 I915_WRITE(RENCLK_GATE_D2, 0);
8700}
8701
8702static void gen3_init_clock_gating(struct drm_device *dev)
8703{
8704 struct drm_i915_private *dev_priv = dev->dev_private;
8705 u32 dstate = I915_READ(D_STATE);
8706
8707 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8708 DSTATE_DOT_CLOCK_GATING;
8709 I915_WRITE(D_STATE, dstate);
8710}
8711
8712static void i85x_init_clock_gating(struct drm_device *dev)
8713{
8714 struct drm_i915_private *dev_priv = dev->dev_private;
8715
8716 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8717}
8718
8719static void i830_init_clock_gating(struct drm_device *dev)
8720{
8721 struct drm_i915_private *dev_priv = dev->dev_private;
8722
8723 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8724}
8725
8726static void ibx_init_clock_gating(struct drm_device *dev)
8727{
8728 struct drm_i915_private *dev_priv = dev->dev_private;
8729
8730 /*
8731 * On Ibex Peak and Cougar Point, we need to disable clock
8732 * gating for the panel power sequencer or it will fail to
8733 * start up when no ports are active.
8734 */
8735 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8736}
8737
8738static void cpt_init_clock_gating(struct drm_device *dev)
8739{
8740 struct drm_i915_private *dev_priv = dev->dev_private;
8741 int pipe;
8742
8743 /*
8744 * On Ibex Peak and Cougar Point, we need to disable clock
8745 * gating for the panel power sequencer or it will fail to
8746 * start up when no ports are active.
8747 */
8748 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8749 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8750 DPLS_EDP_PPS_FIX_DIS);
8751 /* Without this, mode sets may fail silently on FDI */
8752 for_each_pipe(pipe)
8753 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8754}
8755
8756static void ironlake_teardown_rc6(struct drm_device *dev)
8757{
8758 struct drm_i915_private *dev_priv = dev->dev_private;
8759
8760 if (dev_priv->renderctx) {
8761 i915_gem_object_unpin(dev_priv->renderctx);
8762 drm_gem_object_unreference(&dev_priv->renderctx->base);
8763 dev_priv->renderctx = NULL;
8764 }
8765
8766 if (dev_priv->pwrctx) {
8767 i915_gem_object_unpin(dev_priv->pwrctx);
8768 drm_gem_object_unreference(&dev_priv->pwrctx->base);
8769 dev_priv->pwrctx = NULL;
8770 }
8771}
8772
8773static void ironlake_disable_rc6(struct drm_device *dev)
8774{
8775 struct drm_i915_private *dev_priv = dev->dev_private;
8776
8777 if (I915_READ(PWRCTXA)) {
8778 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8779 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8780 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8781 50);
8782
8783 I915_WRITE(PWRCTXA, 0);
8784 POSTING_READ(PWRCTXA);
8785
8786 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8787 POSTING_READ(RSTDBYCTL);
8788 }
8789
8790 ironlake_teardown_rc6(dev);
8791}
8792
8793static int ironlake_setup_rc6(struct drm_device *dev)
8794{
8795 struct drm_i915_private *dev_priv = dev->dev_private;
8796
8797 if (dev_priv->renderctx == NULL)
8798 dev_priv->renderctx = intel_alloc_context_page(dev);
8799 if (!dev_priv->renderctx)
8800 return -ENOMEM;
8801
8802 if (dev_priv->pwrctx == NULL)
8803 dev_priv->pwrctx = intel_alloc_context_page(dev);
8804 if (!dev_priv->pwrctx) {
8805 ironlake_teardown_rc6(dev);
8806 return -ENOMEM;
8807 }
8808
8809 return 0;
8810}
8811
8812void ironlake_enable_rc6(struct drm_device *dev)
8813{
8814 struct drm_i915_private *dev_priv = dev->dev_private;
8815 int ret;
8816
8817 /* rc6 disabled by default due to repeated reports of hanging during
8818 * boot and resume.
8819 */
8820 if (!intel_enable_rc6(dev))
8821 return;
8822
8823 mutex_lock(&dev->struct_mutex);
8824 ret = ironlake_setup_rc6(dev);
8825 if (ret) {
8826 mutex_unlock(&dev->struct_mutex);
8827 return;
8828 }
8829
8830 /*
8831 * GPU can automatically power down the render unit if given a page
8832 * to save state.
8833 */
8834 ret = BEGIN_LP_RING(6);
8835 if (ret) {
8836 ironlake_teardown_rc6(dev);
8837 mutex_unlock(&dev->struct_mutex);
8838 return;
8839 }
8840
8841 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8842 OUT_RING(MI_SET_CONTEXT);
8843 OUT_RING(dev_priv->renderctx->gtt_offset |
8844 MI_MM_SPACE_GTT |
8845 MI_SAVE_EXT_STATE_EN |
8846 MI_RESTORE_EXT_STATE_EN |
8847 MI_RESTORE_INHIBIT);
8848 OUT_RING(MI_SUSPEND_FLUSH);
8849 OUT_RING(MI_NOOP);
8850 OUT_RING(MI_FLUSH);
8851 ADVANCE_LP_RING();
8852
8853 /*
8854 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8855 * does an implicit flush, combined with MI_FLUSH above, it should be
8856 * safe to assume that renderctx is valid
8857 */
8858 ret = intel_wait_ring_idle(LP_RING(dev_priv));
8859 if (ret) {
8860 DRM_ERROR("failed to enable ironlake power power savings\n");
8861 ironlake_teardown_rc6(dev);
8862 mutex_unlock(&dev->struct_mutex);
8863 return;
8864 }
8865
8866 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8867 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8868 mutex_unlock(&dev->struct_mutex);
8869}
8870
8871void intel_init_clock_gating(struct drm_device *dev)
8872{
8873 struct drm_i915_private *dev_priv = dev->dev_private;
8874
8875 dev_priv->display.init_clock_gating(dev);
8876
8877 if (dev_priv->display.init_pch_clock_gating)
8878 dev_priv->display.init_pch_clock_gating(dev);
8879}
8880
8881/* Set up chip specific display functions */ 6693/* Set up chip specific display functions */
8882static void intel_init_display(struct drm_device *dev) 6694static void intel_init_display(struct drm_device *dev)
8883{ 6695{
@@ -8887,32 +6699,20 @@ static void intel_init_display(struct drm_device *dev)
8887 if (HAS_PCH_SPLIT(dev)) { 6699 if (HAS_PCH_SPLIT(dev)) {
8888 dev_priv->display.dpms = ironlake_crtc_dpms; 6700 dev_priv->display.dpms = ironlake_crtc_dpms;
8889 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 6701 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6702 dev_priv->display.off = ironlake_crtc_off;
8890 dev_priv->display.update_plane = ironlake_update_plane; 6703 dev_priv->display.update_plane = ironlake_update_plane;
8891 } else { 6704 } else {
8892 dev_priv->display.dpms = i9xx_crtc_dpms; 6705 dev_priv->display.dpms = i9xx_crtc_dpms;
8893 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 6706 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6707 dev_priv->display.off = i9xx_crtc_off;
8894 dev_priv->display.update_plane = i9xx_update_plane; 6708 dev_priv->display.update_plane = i9xx_update_plane;
8895 } 6709 }
8896 6710
8897 if (I915_HAS_FBC(dev)) {
8898 if (HAS_PCH_SPLIT(dev)) {
8899 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8900 dev_priv->display.enable_fbc = ironlake_enable_fbc;
8901 dev_priv->display.disable_fbc = ironlake_disable_fbc;
8902 } else if (IS_GM45(dev)) {
8903 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8904 dev_priv->display.enable_fbc = g4x_enable_fbc;
8905 dev_priv->display.disable_fbc = g4x_disable_fbc;
8906 } else if (IS_CRESTLINE(dev)) {
8907 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8908 dev_priv->display.enable_fbc = i8xx_enable_fbc;
8909 dev_priv->display.disable_fbc = i8xx_disable_fbc;
8910 }
8911 /* 855GM needs testing */
8912 }
8913
8914 /* Returns the core display clock speed */ 6711 /* Returns the core display clock speed */
8915 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 6712 if (IS_VALLEYVIEW(dev))
6713 dev_priv->display.get_display_clock_speed =
6714 valleyview_get_display_clock_speed;
6715 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8916 dev_priv->display.get_display_clock_speed = 6716 dev_priv->display.get_display_clock_speed =
8917 i945_get_display_clock_speed; 6717 i945_get_display_clock_speed;
8918 else if (IS_I915G(dev)) 6718 else if (IS_I915G(dev))
@@ -8934,124 +6734,27 @@ static void intel_init_display(struct drm_device *dev)
8934 dev_priv->display.get_display_clock_speed = 6734 dev_priv->display.get_display_clock_speed =
8935 i830_get_display_clock_speed; 6735 i830_get_display_clock_speed;
8936 6736
8937 /* For FIFO watermark updates */
8938 if (HAS_PCH_SPLIT(dev)) { 6737 if (HAS_PCH_SPLIT(dev)) {
8939 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8940 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8941
8942 /* IVB configs may use multi-threaded forcewake */
8943 if (IS_IVYBRIDGE(dev)) {
8944 u32 ecobus;
8945
8946 /* A small trick here - if the bios hasn't configured MT forcewake,
8947 * and if the device is in RC6, then force_wake_mt_get will not wake
8948 * the device and the ECOBUS read will return zero. Which will be
8949 * (correctly) interpreted by the test below as MT forcewake being
8950 * disabled.
8951 */
8952 mutex_lock(&dev->struct_mutex);
8953 __gen6_gt_force_wake_mt_get(dev_priv);
8954 ecobus = I915_READ_NOTRACE(ECOBUS);
8955 __gen6_gt_force_wake_mt_put(dev_priv);
8956 mutex_unlock(&dev->struct_mutex);
8957
8958 if (ecobus & FORCEWAKE_MT_ENABLE) {
8959 DRM_DEBUG_KMS("Using MT version of forcewake\n");
8960 dev_priv->display.force_wake_get =
8961 __gen6_gt_force_wake_mt_get;
8962 dev_priv->display.force_wake_put =
8963 __gen6_gt_force_wake_mt_put;
8964 }
8965 }
8966
8967 if (HAS_PCH_IBX(dev))
8968 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8969 else if (HAS_PCH_CPT(dev))
8970 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
8971
8972 if (IS_GEN5(dev)) { 6738 if (IS_GEN5(dev)) {
8973 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
8974 dev_priv->display.update_wm = ironlake_update_wm;
8975 else {
8976 DRM_DEBUG_KMS("Failed to get proper latency. "
8977 "Disable CxSR\n");
8978 dev_priv->display.update_wm = NULL;
8979 }
8980 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 6739 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
8981 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8982 dev_priv->display.write_eld = ironlake_write_eld; 6740 dev_priv->display.write_eld = ironlake_write_eld;
8983 } else if (IS_GEN6(dev)) { 6741 } else if (IS_GEN6(dev)) {
8984 if (SNB_READ_WM0_LATENCY()) {
8985 dev_priv->display.update_wm = sandybridge_update_wm;
8986 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
8987 } else {
8988 DRM_DEBUG_KMS("Failed to read display plane latency. "
8989 "Disable CxSR\n");
8990 dev_priv->display.update_wm = NULL;
8991 }
8992 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 6742 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
8993 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8994 dev_priv->display.write_eld = ironlake_write_eld; 6743 dev_priv->display.write_eld = ironlake_write_eld;
8995 } else if (IS_IVYBRIDGE(dev)) { 6744 } else if (IS_IVYBRIDGE(dev)) {
8996 /* FIXME: detect B0+ stepping and use auto training */ 6745 /* FIXME: detect B0+ stepping and use auto training */
8997 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6746 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8998 if (SNB_READ_WM0_LATENCY()) { 6747 dev_priv->display.write_eld = ironlake_write_eld;
8999 dev_priv->display.update_wm = sandybridge_update_wm; 6748 } else if (IS_HASWELL(dev)) {
9000 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6749 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
9001 } else {
9002 DRM_DEBUG_KMS("Failed to read display plane latency. "
9003 "Disable CxSR\n");
9004 dev_priv->display.update_wm = NULL;
9005 }
9006 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9007 dev_priv->display.write_eld = ironlake_write_eld; 6750 dev_priv->display.write_eld = ironlake_write_eld;
9008 } else 6751 } else
9009 dev_priv->display.update_wm = NULL; 6752 dev_priv->display.update_wm = NULL;
9010 } else if (IS_PINEVIEW(dev)) { 6753 } else if (IS_VALLEYVIEW(dev)) {
9011 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6754 dev_priv->display.force_wake_get = vlv_force_wake_get;
9012 dev_priv->is_ddr3, 6755 dev_priv->display.force_wake_put = vlv_force_wake_put;
9013 dev_priv->fsb_freq,
9014 dev_priv->mem_freq)) {
9015 DRM_INFO("failed to find known CxSR latency "
9016 "(found ddr%s fsb freq %d, mem freq %d), "
9017 "disabling CxSR\n",
9018 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9019 dev_priv->fsb_freq, dev_priv->mem_freq);
9020 /* Disable CxSR and never update its watermark again */
9021 pineview_disable_cxsr(dev);
9022 dev_priv->display.update_wm = NULL;
9023 } else
9024 dev_priv->display.update_wm = pineview_update_wm;
9025 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9026 } else if (IS_G4X(dev)) { 6756 } else if (IS_G4X(dev)) {
9027 dev_priv->display.write_eld = g4x_write_eld; 6757 dev_priv->display.write_eld = g4x_write_eld;
9028 dev_priv->display.update_wm = g4x_update_wm;
9029 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9030 } else if (IS_GEN4(dev)) {
9031 dev_priv->display.update_wm = i965_update_wm;
9032 if (IS_CRESTLINE(dev))
9033 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9034 else if (IS_BROADWATER(dev))
9035 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9036 } else if (IS_GEN3(dev)) {
9037 dev_priv->display.update_wm = i9xx_update_wm;
9038 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9039 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9040 } else if (IS_I865G(dev)) {
9041 dev_priv->display.update_wm = i830_update_wm;
9042 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9043 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9044 } else if (IS_I85X(dev)) {
9045 dev_priv->display.update_wm = i9xx_update_wm;
9046 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9047 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9048 } else {
9049 dev_priv->display.update_wm = i830_update_wm;
9050 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9051 if (IS_845G(dev))
9052 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9053 else
9054 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9055 } 6758 }
9056 6759
9057 /* Default just returns -ENODEV to indicate unsupported */ 6760 /* Default just returns -ENODEV to indicate unsupported */
@@ -9090,7 +6793,7 @@ static void quirk_pipea_force(struct drm_device *dev)
9090 struct drm_i915_private *dev_priv = dev->dev_private; 6793 struct drm_i915_private *dev_priv = dev->dev_private;
9091 6794
9092 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 6795 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9093 DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); 6796 DRM_INFO("applying pipe a force quirk\n");
9094} 6797}
9095 6798
9096/* 6799/*
@@ -9100,6 +6803,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
9100{ 6803{
9101 struct drm_i915_private *dev_priv = dev->dev_private; 6804 struct drm_i915_private *dev_priv = dev->dev_private;
9102 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 6805 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6806 DRM_INFO("applying lvds SSC disable quirk\n");
6807}
6808
6809/*
6810 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6811 * brightness value
6812 */
6813static void quirk_invert_brightness(struct drm_device *dev)
6814{
6815 struct drm_i915_private *dev_priv = dev->dev_private;
6816 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
6817 DRM_INFO("applying inverted panel brightness quirk\n");
9103} 6818}
9104 6819
9105struct intel_quirk { 6820struct intel_quirk {
@@ -9109,7 +6824,7 @@ struct intel_quirk {
9109 void (*hook)(struct drm_device *dev); 6824 void (*hook)(struct drm_device *dev);
9110}; 6825};
9111 6826
9112struct intel_quirk intel_quirks[] = { 6827static struct intel_quirk intel_quirks[] = {
9113 /* HP Mini needs pipe A force quirk (LP: #322104) */ 6828 /* HP Mini needs pipe A force quirk (LP: #322104) */
9114 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 6829 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9115 6830
@@ -9134,6 +6849,9 @@ struct intel_quirk intel_quirks[] = {
9134 6849
9135 /* Sony Vaio Y cannot use SSC on LVDS */ 6850 /* Sony Vaio Y cannot use SSC on LVDS */
9136 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 6851 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6852
6853 /* Acer Aspire 5734Z must invert backlight brightness */
6854 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
9137}; 6855};
9138 6856
9139static void intel_init_quirks(struct drm_device *dev) 6857static void intel_init_quirks(struct drm_device *dev)
@@ -9166,7 +6884,7 @@ static void i915_disable_vga(struct drm_device *dev)
9166 vga_reg = VGACNTRL; 6884 vga_reg = VGACNTRL;
9167 6885
9168 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6886 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9169 outb(1, VGA_SR_INDEX); 6887 outb(SR01, VGA_SR_INDEX);
9170 sr1 = inb(VGA_SR_DATA); 6888 sr1 = inb(VGA_SR_DATA);
9171 outb(sr1 | 1<<5, VGA_SR_DATA); 6889 outb(sr1 | 1<<5, VGA_SR_DATA);
9172 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6890 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9176,6 +6894,40 @@ static void i915_disable_vga(struct drm_device *dev)
9176 POSTING_READ(vga_reg); 6894 POSTING_READ(vga_reg);
9177} 6895}
9178 6896
6897static void ivb_pch_pwm_override(struct drm_device *dev)
6898{
6899 struct drm_i915_private *dev_priv = dev->dev_private;
6900
6901 /*
6902 * IVB has CPU eDP backlight regs too, set things up to let the
6903 * PCH regs control the backlight
6904 */
6905 I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6906 I915_WRITE(BLC_PWM_CPU_CTL, 0);
6907 I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
6908}
6909
6910void intel_modeset_init_hw(struct drm_device *dev)
6911{
6912 struct drm_i915_private *dev_priv = dev->dev_private;
6913
6914 intel_init_clock_gating(dev);
6915
6916 if (IS_IRONLAKE_M(dev)) {
6917 ironlake_enable_drps(dev);
6918 ironlake_enable_rc6(dev);
6919 intel_init_emon(dev);
6920 }
6921
6922 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
6923 gen6_enable_rps(dev_priv);
6924 gen6_update_ring_freq(dev_priv);
6925 }
6926
6927 if (IS_IVYBRIDGE(dev))
6928 ivb_pch_pwm_override(dev);
6929}
6930
9179void intel_modeset_init(struct drm_device *dev) 6931void intel_modeset_init(struct drm_device *dev)
9180{ 6932{
9181 struct drm_i915_private *dev_priv = dev->dev_private; 6933 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9189,10 +6941,14 @@ void intel_modeset_init(struct drm_device *dev)
9189 dev->mode_config.preferred_depth = 24; 6941 dev->mode_config.preferred_depth = 24;
9190 dev->mode_config.prefer_shadow = 1; 6942 dev->mode_config.prefer_shadow = 1;
9191 6943
9192 dev->mode_config.funcs = (void *)&intel_mode_funcs; 6944 dev->mode_config.funcs = &intel_mode_funcs;
9193 6945
9194 intel_init_quirks(dev); 6946 intel_init_quirks(dev);
9195 6947
6948 intel_init_pm(dev);
6949
6950 intel_prepare_ddi(dev);
6951
9196 intel_init_display(dev); 6952 intel_init_display(dev);
9197 6953
9198 if (IS_GEN2(dev)) { 6954 if (IS_GEN2(dev)) {
@@ -9217,22 +6973,12 @@ void intel_modeset_init(struct drm_device *dev)
9217 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 6973 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9218 } 6974 }
9219 6975
6976 intel_pch_pll_init(dev);
6977
9220 /* Just disable it once at startup */ 6978 /* Just disable it once at startup */
9221 i915_disable_vga(dev); 6979 i915_disable_vga(dev);
9222 intel_setup_outputs(dev); 6980 intel_setup_outputs(dev);
9223 6981
9224 intel_init_clock_gating(dev);
9225
9226 if (IS_IRONLAKE_M(dev)) {
9227 ironlake_enable_drps(dev);
9228 intel_init_emon(dev);
9229 }
9230
9231 if (IS_GEN6(dev) || IS_GEN7(dev)) {
9232 gen6_enable_rps(dev_priv);
9233 gen6_update_ring_freq(dev_priv);
9234 }
9235
9236 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6982 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9237 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6983 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9238 (unsigned long)dev); 6984 (unsigned long)dev);
@@ -9240,8 +6986,7 @@ void intel_modeset_init(struct drm_device *dev)
9240 6986
9241void intel_modeset_gem_init(struct drm_device *dev) 6987void intel_modeset_gem_init(struct drm_device *dev)
9242{ 6988{
9243 if (IS_IRONLAKE_M(dev)) 6989 intel_modeset_init_hw(dev);
9244 ironlake_enable_rc6(dev);
9245 6990
9246 intel_setup_overlay(dev); 6991 intel_setup_overlay(dev);
9247} 6992}
@@ -9271,12 +7016,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
9271 7016
9272 if (IS_IRONLAKE_M(dev)) 7017 if (IS_IRONLAKE_M(dev))
9273 ironlake_disable_drps(dev); 7018 ironlake_disable_drps(dev);
9274 if (IS_GEN6(dev) || IS_GEN7(dev)) 7019 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
9275 gen6_disable_rps(dev); 7020 gen6_disable_rps(dev);
9276 7021
9277 if (IS_IRONLAKE_M(dev)) 7022 if (IS_IRONLAKE_M(dev))
9278 ironlake_disable_rc6(dev); 7023 ironlake_disable_rc6(dev);
9279 7024
7025 if (IS_VALLEYVIEW(dev))
7026 vlv_init_dpio(dev);
7027
9280 mutex_unlock(&dev->struct_mutex); 7028 mutex_unlock(&dev->struct_mutex);
9281 7029
9282 /* Disable the irq before mode object teardown, for the irq might 7030 /* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4b637919f74f..71c7096e3869 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
688 int lane_count, clock; 688 int lane_count, clock;
689 int max_lane_count = intel_dp_max_lane_count(intel_dp); 689 int max_lane_count = intel_dp_max_lane_count(intel_dp);
690 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 690 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
691 int bpp; 691 int bpp, mode_rate;
692 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 692 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
693 693
694 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 694 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
702 mode->clock = intel_dp->panel_fixed_mode->clock; 702 mode->clock = intel_dp->panel_fixed_mode->clock;
703 } 703 }
704 704
705 DRM_DEBUG_KMS("DP link computation with max lane count %i "
706 "max bw %02x pixel clock %iKHz\n",
707 max_lane_count, bws[max_clock], mode->clock);
708
705 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) 709 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
706 return false; 710 return false;
707 711
708 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 712 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
713 mode_rate = intel_dp_link_required(mode->clock, bpp);
709 714
710 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 715 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
711 for (clock = 0; clock <= max_clock; clock++) { 716 for (clock = 0; clock <= max_clock; clock++) {
712 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 717 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
713 718
714 if (intel_dp_link_required(mode->clock, bpp) 719 if (mode_rate <= link_avail) {
715 <= link_avail) {
716 intel_dp->link_bw = bws[clock]; 720 intel_dp->link_bw = bws[clock];
717 intel_dp->lane_count = lane_count; 721 intel_dp->lane_count = lane_count;
718 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 722 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
719 DRM_DEBUG_KMS("Display port link bw %02x lane " 723 DRM_DEBUG_KMS("DP link bw %02x lane "
720 "count %d clock %d\n", 724 "count %d clock %d bpp %d\n",
721 intel_dp->link_bw, intel_dp->lane_count, 725 intel_dp->link_bw, intel_dp->lane_count,
722 adjusted_mode->clock); 726 adjusted_mode->clock, bpp);
727 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
728 mode_rate, link_avail);
723 return true; 729 return true;
724 } 730 }
725 } 731 }
@@ -1149,6 +1155,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1149 DRM_DEBUG_KMS("Turn eDP power off\n"); 1155 DRM_DEBUG_KMS("Turn eDP power off\n");
1150 1156
1151 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1157 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1158 ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
1152 1159
1153 pp = ironlake_get_pp_control(dev_priv); 1160 pp = ironlake_get_pp_control(dev_priv);
1154 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1161 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@@ -1954,6 +1961,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
1954 return false; 1961 return false;
1955} 1962}
1956 1963
1964static void
1965intel_dp_probe_oui(struct intel_dp *intel_dp)
1966{
1967 u8 buf[3];
1968
1969 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1970 return;
1971
1972 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1973 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1974 buf[0], buf[1], buf[2]);
1975
1976 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1977 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1978 buf[0], buf[1], buf[2]);
1979}
1980
1957static bool 1981static bool
1958intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 1982intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
1959{ 1983{
@@ -2137,6 +2161,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2137 if (status != connector_status_connected) 2161 if (status != connector_status_connected)
2138 return status; 2162 return status;
2139 2163
2164 intel_dp_probe_oui(intel_dp);
2165
2140 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2166 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2141 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2167 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2142 } else { 2168 } else {
@@ -2438,6 +2464,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2438 } 2464 }
2439 2465
2440 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2466 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2467
2441 connector->interlace_allowed = true; 2468 connector->interlace_allowed = true;
2442 connector->doublescan_allowed = 0; 2469 connector->doublescan_allowed = 0;
2443 2470
@@ -2483,6 +2510,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2483 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2510 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2484 pp_div = I915_READ(PCH_PP_DIVISOR); 2511 pp_div = I915_READ(PCH_PP_DIVISOR);
2485 2512
2513 if (!pp_on || !pp_off || !pp_div) {
2514 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2515 intel_dp_encoder_destroy(&intel_dp->base.base);
2516 intel_dp_destroy(&intel_connector->base);
2517 return;
2518 }
2519
2486 /* Pull timing values out of registers */ 2520 /* Pull timing values out of registers */
2487 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2521 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2488 PANEL_POWER_UP_DELAY_SHIFT; 2522 PANEL_POWER_UP_DELAY_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 715afa153025..3e0918834e7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
45 ret__; \ 45 ret__; \
46}) 46})
47 47
48#define wait_for_atomic_us(COND, US) ({ \
49 int i, ret__ = -ETIMEDOUT; \
50 for (i = 0; i < (US); i++) { \
51 if ((COND)) { \
52 ret__ = 0; \
53 break; \
54 } \
55 udelay(1); \
56 } \
57 ret__; \
58})
59
48#define wait_for(COND, MS) _wait_for(COND, MS, 1) 60#define wait_for(COND, MS) _wait_for(COND, MS, 1)
49#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 61#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
50 62
@@ -171,8 +183,8 @@ struct intel_crtc {
171 bool cursor_visible; 183 bool cursor_visible;
172 unsigned int bpp; 184 unsigned int bpp;
173 185
174 bool no_pll; /* tertiary pipe for IVB */ 186 /* We can share PLLs across outputs if the timings match */
175 bool use_pll_a; 187 struct intel_pch_pll *pch_pll;
176}; 188};
177 189
178struct intel_plane { 190struct intel_plane {
@@ -196,6 +208,25 @@ struct intel_plane {
196 struct drm_intel_sprite_colorkey *key); 208 struct drm_intel_sprite_colorkey *key);
197}; 209};
198 210
211struct intel_watermark_params {
212 unsigned long fifo_size;
213 unsigned long max_wm;
214 unsigned long default_wm;
215 unsigned long guard_size;
216 unsigned long cacheline_size;
217};
218
219struct cxsr_latency {
220 int is_desktop;
221 int is_ddr3;
222 unsigned long fsb_freq;
223 unsigned long mem_freq;
224 unsigned long display_sr;
225 unsigned long display_hpll_disable;
226 unsigned long cursor_sr;
227 unsigned long cursor_hpll_disable;
228};
229
199#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 230#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
200#define to_intel_connector(x) container_of(x, struct intel_connector, base) 231#define to_intel_connector(x) container_of(x, struct intel_connector, base)
201#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 232#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -207,6 +238,8 @@ struct intel_plane {
207#define DIP_TYPE_AVI 0x82 238#define DIP_TYPE_AVI 0x82
208#define DIP_VERSION_AVI 0x2 239#define DIP_VERSION_AVI 0x2
209#define DIP_LEN_AVI 13 240#define DIP_LEN_AVI 13
241#define DIP_AVI_PR_1 0
242#define DIP_AVI_PR_2 1
210 243
211#define DIP_TYPE_SPD 0x83 244#define DIP_TYPE_SPD 0x83
212#define DIP_VERSION_SPD 0x1 245#define DIP_VERSION_SPD 0x1
@@ -240,23 +273,36 @@ struct dip_infoframe {
240 uint8_t ITC_EC_Q_SC; 273 uint8_t ITC_EC_Q_SC;
241 /* PB4 - VIC 6:0 */ 274 /* PB4 - VIC 6:0 */
242 uint8_t VIC; 275 uint8_t VIC;
243 /* PB5 - PR 3:0 */ 276 /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
244 uint8_t PR; 277 uint8_t YQ_CN_PR;
245 /* PB6 to PB13 */ 278 /* PB6 to PB13 */
246 uint16_t top_bar_end; 279 uint16_t top_bar_end;
247 uint16_t bottom_bar_start; 280 uint16_t bottom_bar_start;
248 uint16_t left_bar_end; 281 uint16_t left_bar_end;
249 uint16_t right_bar_start; 282 uint16_t right_bar_start;
250 } avi; 283 } __attribute__ ((packed)) avi;
251 struct { 284 struct {
252 uint8_t vn[8]; 285 uint8_t vn[8];
253 uint8_t pd[16]; 286 uint8_t pd[16];
254 uint8_t sdi; 287 uint8_t sdi;
255 } spd; 288 } __attribute__ ((packed)) spd;
256 uint8_t payload[27]; 289 uint8_t payload[27];
257 } __attribute__ ((packed)) body; 290 } __attribute__ ((packed)) body;
258} __attribute__((packed)); 291} __attribute__((packed));
259 292
293struct intel_hdmi {
294 struct intel_encoder base;
295 u32 sdvox_reg;
296 int ddc_bus;
297 int ddi_port;
298 uint32_t color_range;
299 bool has_hdmi_sink;
300 bool has_audio;
301 enum hdmi_force_audio force_audio;
302 void (*write_infoframe)(struct drm_encoder *encoder,
303 struct dip_infoframe *frame);
304};
305
260static inline struct drm_crtc * 306static inline struct drm_crtc *
261intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 307intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
262{ 308{
@@ -296,8 +342,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
296 342
297extern void intel_crt_init(struct drm_device *dev); 343extern void intel_crt_init(struct drm_device *dev);
298extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 344extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
299void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 345extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
300extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 346extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
347 struct drm_display_mode *adjusted_mode);
348extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
349extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
350extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
351 bool is_sdvob);
301extern void intel_dvo_init(struct drm_device *dev); 352extern void intel_dvo_init(struct drm_device *dev);
302extern void intel_tv_init(struct drm_device *dev); 353extern void intel_tv_init(struct drm_device *dev);
303extern void intel_mark_busy(struct drm_device *dev, 354extern void intel_mark_busy(struct drm_device *dev,
@@ -311,6 +362,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
311extern void intel_edp_link_config(struct intel_encoder *, int *, int *); 362extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
312extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); 363extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
313extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); 364extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
365extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
366 enum plane plane);
367
368void intel_sanitize_pm(struct drm_device *dev);
314 369
315/* intel_panel.c */ 370/* intel_panel.c */
316extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 371extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -368,12 +423,9 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
368extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 423extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
369 u16 *blue, int regno); 424 u16 *blue, int regno);
370extern void intel_enable_clock_gating(struct drm_device *dev); 425extern void intel_enable_clock_gating(struct drm_device *dev);
426extern void ironlake_disable_rc6(struct drm_device *dev);
371extern void ironlake_enable_drps(struct drm_device *dev); 427extern void ironlake_enable_drps(struct drm_device *dev);
372extern void ironlake_disable_drps(struct drm_device *dev); 428extern void ironlake_disable_drps(struct drm_device *dev);
373extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
374extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
375extern void gen6_disable_rps(struct drm_device *dev);
376extern void intel_init_emon(struct drm_device *dev);
377 429
378extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 430extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
379 struct drm_i915_gem_object *obj, 431 struct drm_i915_gem_object *obj,
@@ -411,16 +463,43 @@ extern void intel_init_clock_gating(struct drm_device *dev);
411extern void intel_write_eld(struct drm_encoder *encoder, 463extern void intel_write_eld(struct drm_encoder *encoder,
412 struct drm_display_mode *mode); 464 struct drm_display_mode *mode);
413extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); 465extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
466extern void intel_prepare_ddi(struct drm_device *dev);
467extern void hsw_fdi_link_train(struct drm_crtc *crtc);
468extern void intel_ddi_init(struct drm_device *dev, enum port port);
414 469
415/* For use by IVB LP watermark workaround in intel_sprite.c */ 470/* For use by IVB LP watermark workaround in intel_sprite.c */
416extern void sandybridge_update_wm(struct drm_device *dev); 471extern void intel_update_watermarks(struct drm_device *dev);
417extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 472extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
418 uint32_t sprite_width, 473 uint32_t sprite_width,
419 int pixel_size); 474 int pixel_size);
475extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
476 struct drm_display_mode *mode);
420 477
421extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 478extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
422 struct drm_file *file_priv); 479 struct drm_file *file_priv);
423extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 480extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
424 struct drm_file *file_priv); 481 struct drm_file *file_priv);
425 482
483extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
484
485/* Power-related functions, located in intel_pm.c */
486extern void intel_init_pm(struct drm_device *dev);
487/* FBC */
488extern bool intel_fbc_enabled(struct drm_device *dev);
489extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
490extern void intel_update_fbc(struct drm_device *dev);
491/* IPS */
492extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
493extern void intel_gpu_ips_teardown(void);
494
495extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
496extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
497extern void gen6_disable_rps(struct drm_device *dev);
498extern void intel_init_emon(struct drm_device *dev);
499
500extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
501extern void intel_ddi_mode_set(struct drm_encoder *encoder,
502 struct drm_display_mode *mode,
503 struct drm_display_mode *adjusted_mode);
504
426#endif /* __INTEL_DRV_H__ */ 505#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744d..60ba50b956f2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
243 * that's not the case. 243 * that's not the case.
244 */ 244 */
245 intel_ddc_get_modes(connector, 245 intel_ddc_get_modes(connector,
246 &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); 246 intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
247 if (!list_empty(&connector->probed_modes)) 247 if (!list_empty(&connector->probed_modes))
248 return 1; 248 return 1;
249 249
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
375 * special cases, but otherwise default to what's defined 375 * special cases, but otherwise default to what's defined
376 * in the spec. 376 * in the spec.
377 */ 377 */
378 if (dvo->gpio != 0) 378 if (intel_gmbus_is_port_valid(dvo->gpio))
379 gpio = dvo->gpio; 379 gpio = dvo->gpio;
380 else if (dvo->type == INTEL_DVO_CHIP_LVDS) 380 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
381 gpio = GMBUS_PORT_SSC; 381 gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
386 * It appears that everything is on GPIOE except for panels 386 * It appears that everything is on GPIOE except for panels
387 * on i830 laptops, which are on GPIOB (DVOA). 387 * on i830 laptops, which are on GPIOB (DVOA).
388 */ 388 */
389 i2c = &dev_priv->gmbus[gpio].adapter; 389 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
390 390
391 intel_dvo->dev = *dvo; 391 intel_dvo->dev = *dvo;
392 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) 392 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6e9ee33fd412..bf8690720a0c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
94 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
95 95
96 /* Flush everything out, we'll be doing GTT only from now on */ 96 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, obj, false); 97 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
98 if (ret) { 98 if (ret) {
99 DRM_ERROR("failed to pin fb: %d\n", ret); 99 DRM_ERROR("failed to pin fb: %d\n", ret);
100 goto out_unref; 100 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2d7f47b56b6a..2ead3bf7c21d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40struct intel_hdmi { 40struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
41 struct intel_encoder base;
42 u32 sdvox_reg;
43 int ddc_bus;
44 uint32_t color_range;
45 bool has_hdmi_sink;
46 bool has_audio;
47 enum hdmi_force_audio force_audio;
48 void (*write_infoframe)(struct drm_encoder *encoder,
49 struct dip_infoframe *frame);
50};
51
52static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
53{ 41{
54 return container_of(encoder, struct intel_hdmi, base.base); 42 return container_of(encoder, struct intel_hdmi, base.base);
55} 43}
@@ -75,108 +63,246 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
75 frame->checksum = 0x100 - sum; 63 frame->checksum = 0x100 - sum;
76} 64}
77 65
78static u32 intel_infoframe_index(struct dip_infoframe *frame) 66static u32 g4x_infoframe_index(struct dip_infoframe *frame)
79{ 67{
80 u32 flags = 0;
81
82 switch (frame->type) { 68 switch (frame->type) {
83 case DIP_TYPE_AVI: 69 case DIP_TYPE_AVI:
84 flags |= VIDEO_DIP_SELECT_AVI; 70 return VIDEO_DIP_SELECT_AVI;
85 break;
86 case DIP_TYPE_SPD: 71 case DIP_TYPE_SPD:
87 flags |= VIDEO_DIP_SELECT_SPD; 72 return VIDEO_DIP_SELECT_SPD;
88 break;
89 default: 73 default:
90 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 74 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
91 break; 75 return 0;
92 } 76 }
93
94 return flags;
95} 77}
96 78
97static u32 intel_infoframe_flags(struct dip_infoframe *frame) 79static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
98{ 80{
99 u32 flags = 0; 81 switch (frame->type) {
82 case DIP_TYPE_AVI:
83 return VIDEO_DIP_ENABLE_AVI;
84 case DIP_TYPE_SPD:
85 return VIDEO_DIP_ENABLE_SPD;
86 default:
87 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
88 return 0;
89 }
90}
100 91
92static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
93{
101 switch (frame->type) { 94 switch (frame->type) {
102 case DIP_TYPE_AVI: 95 case DIP_TYPE_AVI:
103 flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; 96 return VIDEO_DIP_ENABLE_AVI_HSW;
104 break;
105 case DIP_TYPE_SPD: 97 case DIP_TYPE_SPD:
106 flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC; 98 return VIDEO_DIP_ENABLE_SPD_HSW;
107 break;
108 default: 99 default:
109 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 100 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
110 break; 101 return 0;
111 } 102 }
103}
112 104
113 return flags; 105static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
106{
107 switch (frame->type) {
108 case DIP_TYPE_AVI:
109 return HSW_TVIDEO_DIP_AVI_DATA(pipe);
110 case DIP_TYPE_SPD:
111 return HSW_TVIDEO_DIP_SPD_DATA(pipe);
112 default:
113 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
114 return 0;
115 }
114} 116}
115 117
116static void i9xx_write_infoframe(struct drm_encoder *encoder, 118static void g4x_write_infoframe(struct drm_encoder *encoder,
117 struct dip_infoframe *frame) 119 struct dip_infoframe *frame)
118{ 120{
119 uint32_t *data = (uint32_t *)frame; 121 uint32_t *data = (uint32_t *)frame;
120 struct drm_device *dev = encoder->dev; 122 struct drm_device *dev = encoder->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private; 123 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 124 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
123 u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); 125 u32 val = I915_READ(VIDEO_DIP_CTL);
124 unsigned i, len = DIP_HEADER_SIZE + frame->len; 126 unsigned i, len = DIP_HEADER_SIZE + frame->len;
125 127
126 128 val &= ~VIDEO_DIP_PORT_MASK;
127 /* XXX first guess at handling video port, is this corrent? */
128 if (intel_hdmi->sdvox_reg == SDVOB) 129 if (intel_hdmi->sdvox_reg == SDVOB)
129 port = VIDEO_DIP_PORT_B; 130 val |= VIDEO_DIP_PORT_B;
130 else if (intel_hdmi->sdvox_reg == SDVOC) 131 else if (intel_hdmi->sdvox_reg == SDVOC)
131 port = VIDEO_DIP_PORT_C; 132 val |= VIDEO_DIP_PORT_C;
132 else 133 else
133 return; 134 return;
134 135
135 flags = intel_infoframe_index(frame); 136 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
137 val |= g4x_infoframe_index(frame);
136 138
137 val &= ~VIDEO_DIP_SELECT_MASK; 139 val &= ~g4x_infoframe_enable(frame);
140 val |= VIDEO_DIP_ENABLE;
138 141
139 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); 142 I915_WRITE(VIDEO_DIP_CTL, val);
140 143
141 for (i = 0; i < len; i += 4) { 144 for (i = 0; i < len; i += 4) {
142 I915_WRITE(VIDEO_DIP_DATA, *data); 145 I915_WRITE(VIDEO_DIP_DATA, *data);
143 data++; 146 data++;
144 } 147 }
145 148
146 flags |= intel_infoframe_flags(frame); 149 val |= g4x_infoframe_enable(frame);
150 val &= ~VIDEO_DIP_FREQ_MASK;
151 val |= VIDEO_DIP_FREQ_VSYNC;
147 152
148 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); 153 I915_WRITE(VIDEO_DIP_CTL, val);
149} 154}
150 155
151static void ironlake_write_infoframe(struct drm_encoder *encoder, 156static void ibx_write_infoframe(struct drm_encoder *encoder,
152 struct dip_infoframe *frame) 157 struct dip_infoframe *frame)
153{ 158{
154 uint32_t *data = (uint32_t *)frame; 159 uint32_t *data = (uint32_t *)frame;
155 struct drm_device *dev = encoder->dev; 160 struct drm_device *dev = encoder->dev;
156 struct drm_i915_private *dev_priv = dev->dev_private; 161 struct drm_i915_private *dev_priv = dev->dev_private;
157 struct drm_crtc *crtc = encoder->crtc; 162 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 163 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
159 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 164 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
160 unsigned i, len = DIP_HEADER_SIZE + frame->len; 165 unsigned i, len = DIP_HEADER_SIZE + frame->len;
161 u32 flags, val = I915_READ(reg); 166 u32 val = I915_READ(reg);
167
168 val &= ~VIDEO_DIP_PORT_MASK;
169 switch (intel_hdmi->sdvox_reg) {
170 case HDMIB:
171 val |= VIDEO_DIP_PORT_B;
172 break;
173 case HDMIC:
174 val |= VIDEO_DIP_PORT_C;
175 break;
176 case HDMID:
177 val |= VIDEO_DIP_PORT_D;
178 break;
179 default:
180 return;
181 }
162 182
163 intel_wait_for_vblank(dev, intel_crtc->pipe); 183 intel_wait_for_vblank(dev, intel_crtc->pipe);
164 184
165 flags = intel_infoframe_index(frame); 185 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
186 val |= g4x_infoframe_index(frame);
187
188 val &= ~g4x_infoframe_enable(frame);
189 val |= VIDEO_DIP_ENABLE;
190
191 I915_WRITE(reg, val);
192
193 for (i = 0; i < len; i += 4) {
194 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
195 data++;
196 }
197
198 val |= g4x_infoframe_enable(frame);
199 val &= ~VIDEO_DIP_FREQ_MASK;
200 val |= VIDEO_DIP_FREQ_VSYNC;
201
202 I915_WRITE(reg, val);
203}
204
205static void cpt_write_infoframe(struct drm_encoder *encoder,
206 struct dip_infoframe *frame)
207{
208 uint32_t *data = (uint32_t *)frame;
209 struct drm_device *dev = encoder->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
212 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
213 unsigned i, len = DIP_HEADER_SIZE + frame->len;
214 u32 val = I915_READ(reg);
215
216 intel_wait_for_vblank(dev, intel_crtc->pipe);
166 217
167 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 218 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
219 val |= g4x_infoframe_index(frame);
220
221 /* The DIP control register spec says that we need to update the AVI
222 * infoframe without clearing its enable bit */
223 if (frame->type == DIP_TYPE_AVI)
224 val |= VIDEO_DIP_ENABLE_AVI;
225 else
226 val &= ~g4x_infoframe_enable(frame);
227
228 val |= VIDEO_DIP_ENABLE;
168 229
169 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 230 I915_WRITE(reg, val);
170 231
171 for (i = 0; i < len; i += 4) { 232 for (i = 0; i < len; i += 4) {
172 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 233 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
173 data++; 234 data++;
174 } 235 }
175 236
176 flags |= intel_infoframe_flags(frame); 237 val |= g4x_infoframe_enable(frame);
238 val &= ~VIDEO_DIP_FREQ_MASK;
239 val |= VIDEO_DIP_FREQ_VSYNC;
177 240
178 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 241 I915_WRITE(reg, val);
179} 242}
243
244static void vlv_write_infoframe(struct drm_encoder *encoder,
245 struct dip_infoframe *frame)
246{
247 uint32_t *data = (uint32_t *)frame;
248 struct drm_device *dev = encoder->dev;
249 struct drm_i915_private *dev_priv = dev->dev_private;
250 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
251 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
252 unsigned i, len = DIP_HEADER_SIZE + frame->len;
253 u32 val = I915_READ(reg);
254
255 intel_wait_for_vblank(dev, intel_crtc->pipe);
256
257 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
258 val |= g4x_infoframe_index(frame);
259
260 val &= ~g4x_infoframe_enable(frame);
261 val |= VIDEO_DIP_ENABLE;
262
263 I915_WRITE(reg, val);
264
265 for (i = 0; i < len; i += 4) {
266 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
267 data++;
268 }
269
270 val |= g4x_infoframe_enable(frame);
271 val &= ~VIDEO_DIP_FREQ_MASK;
272 val |= VIDEO_DIP_FREQ_VSYNC;
273
274 I915_WRITE(reg, val);
275}
276
277static void hsw_write_infoframe(struct drm_encoder *encoder,
278 struct dip_infoframe *frame)
279{
280 uint32_t *data = (uint32_t *)frame;
281 struct drm_device *dev = encoder->dev;
282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
284 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
285 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
286 unsigned int i, len = DIP_HEADER_SIZE + frame->len;
287 u32 val = I915_READ(ctl_reg);
288
289 if (data_reg == 0)
290 return;
291
292 intel_wait_for_vblank(dev, intel_crtc->pipe);
293
294 val &= ~hsw_infoframe_enable(frame);
295 I915_WRITE(ctl_reg, val);
296
297 for (i = 0; i < len; i += 4) {
298 I915_WRITE(data_reg + i, *data);
299 data++;
300 }
301
302 val |= hsw_infoframe_enable(frame);
303 I915_WRITE(ctl_reg, val);
304}
305
180static void intel_set_infoframe(struct drm_encoder *encoder, 306static void intel_set_infoframe(struct drm_encoder *encoder,
181 struct dip_infoframe *frame) 307 struct dip_infoframe *frame)
182{ 308{
@@ -189,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
189 intel_hdmi->write_infoframe(encoder, frame); 315 intel_hdmi->write_infoframe(encoder, frame);
190} 316}
191 317
192static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) 318void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
319 struct drm_display_mode *adjusted_mode)
193{ 320{
194 struct dip_infoframe avi_if = { 321 struct dip_infoframe avi_if = {
195 .type = DIP_TYPE_AVI, 322 .type = DIP_TYPE_AVI,
@@ -197,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
197 .len = DIP_LEN_AVI, 324 .len = DIP_LEN_AVI,
198 }; 325 };
199 326
327 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
328 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
329
200 intel_set_infoframe(encoder, &avi_if); 330 intel_set_infoframe(encoder, &avi_if);
201} 331}
202 332
203static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) 333void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
204{ 334{
205 struct dip_infoframe spd_if; 335 struct dip_infoframe spd_if;
206 336
@@ -221,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
221{ 351{
222 struct drm_device *dev = encoder->dev; 352 struct drm_device *dev = encoder->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_crtc *crtc = encoder->crtc; 354 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
226 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 355 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
227 u32 sdvox; 356 u32 sdvox;
228 357
@@ -259,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
259 I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 388 I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
260 POSTING_READ(intel_hdmi->sdvox_reg); 389 POSTING_READ(intel_hdmi->sdvox_reg);
261 390
262 intel_hdmi_set_avi_infoframe(encoder); 391 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
263 intel_hdmi_set_spd_infoframe(encoder); 392 intel_hdmi_set_spd_infoframe(encoder);
264} 393}
265 394
@@ -334,7 +463,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
334 intel_hdmi->has_hdmi_sink = false; 463 intel_hdmi->has_hdmi_sink = false;
335 intel_hdmi->has_audio = false; 464 intel_hdmi->has_audio = false;
336 edid = drm_get_edid(connector, 465 edid = drm_get_edid(connector,
337 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 466 intel_gmbus_get_adapter(dev_priv,
467 intel_hdmi->ddc_bus));
338 468
339 if (edid) { 469 if (edid) {
340 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 470 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +497,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
367 */ 497 */
368 498
369 return intel_ddc_get_modes(connector, 499 return intel_ddc_get_modes(connector,
370 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 500 intel_gmbus_get_adapter(dev_priv,
501 intel_hdmi->ddc_bus));
371} 502}
372 503
373static bool 504static bool
@@ -379,7 +510,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
379 bool has_audio = false; 510 bool has_audio = false;
380 511
381 edid = drm_get_edid(connector, 512 edid = drm_get_edid(connector,
382 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 513 intel_gmbus_get_adapter(dev_priv,
514 intel_hdmi->ddc_bus));
383 if (edid) { 515 if (edid) {
384 if (edid->input & DRM_EDID_INPUT_DIGITAL) 516 if (edid->input & DRM_EDID_INPUT_DIGITAL)
385 has_audio = drm_detect_monitor_audio(edid); 517 has_audio = drm_detect_monitor_audio(edid);
@@ -393,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
393 525
394static int 526static int
395intel_hdmi_set_property(struct drm_connector *connector, 527intel_hdmi_set_property(struct drm_connector *connector,
396 struct drm_property *property, 528 struct drm_property *property,
397 uint64_t val) 529 uint64_t val)
398{ 530{
399 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 531 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
400 struct drm_i915_private *dev_priv = connector->dev->dev_private; 532 struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -453,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
453 kfree(connector); 585 kfree(connector);
454} 586}
455 587
588static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
589 .dpms = intel_ddi_dpms,
590 .mode_fixup = intel_hdmi_mode_fixup,
591 .prepare = intel_encoder_prepare,
592 .mode_set = intel_ddi_mode_set,
593 .commit = intel_encoder_commit,
594};
595
456static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 596static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
457 .dpms = intel_hdmi_dpms, 597 .dpms = intel_hdmi_dpms,
458 .mode_fixup = intel_hdmi_mode_fixup, 598 .mode_fixup = intel_hdmi_mode_fixup,
@@ -542,20 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
542 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 682 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
543 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 683 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
544 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 684 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
685 } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
686 DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
687 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
688 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
689 intel_hdmi->ddi_port = PORT_B;
690 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
691 } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
692 DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
693 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
694 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
695 intel_hdmi->ddi_port = PORT_C;
696 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
697 } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
698 DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
699 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
700 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
701 intel_hdmi->ddi_port = PORT_D;
702 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
703 } else {
704 /* If we got an unknown sdvox_reg, things are pretty much broken
705 * in a way that we should let the kernel know about it */
706 BUG();
545 } 707 }
546 708
547 intel_hdmi->sdvox_reg = sdvox_reg; 709 intel_hdmi->sdvox_reg = sdvox_reg;
548 710
549 if (!HAS_PCH_SPLIT(dev)) { 711 if (!HAS_PCH_SPLIT(dev)) {
550 intel_hdmi->write_infoframe = i9xx_write_infoframe; 712 intel_hdmi->write_infoframe = g4x_write_infoframe;
551 I915_WRITE(VIDEO_DIP_CTL, 0); 713 I915_WRITE(VIDEO_DIP_CTL, 0);
714 } else if (IS_VALLEYVIEW(dev)) {
715 intel_hdmi->write_infoframe = vlv_write_infoframe;
716 for_each_pipe(i)
717 I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
718 } else if (IS_HASWELL(dev)) {
719 /* FIXME: Haswell has a new set of DIP frame registers, but we are
720 * just doing the minimal required for HDMI to work at this stage.
721 */
722 intel_hdmi->write_infoframe = hsw_write_infoframe;
723 for_each_pipe(i)
724 I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
725 } else if (HAS_PCH_IBX(dev)) {
726 intel_hdmi->write_infoframe = ibx_write_infoframe;
727 for_each_pipe(i)
728 I915_WRITE(TVIDEO_DIP_CTL(i), 0);
552 } else { 729 } else {
553 intel_hdmi->write_infoframe = ironlake_write_infoframe; 730 intel_hdmi->write_infoframe = cpt_write_infoframe;
554 for_each_pipe(i) 731 for_each_pipe(i)
555 I915_WRITE(TVIDEO_DIP_CTL(i), 0); 732 I915_WRITE(TVIDEO_DIP_CTL(i), 0);
556 } 733 }
557 734
558 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 735 if (IS_HASWELL(dev))
736 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
737 else
738 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
559 739
560 intel_hdmi_add_properties(intel_hdmi, connector); 740 intel_hdmi_add_properties(intel_hdmi, connector);
561 741
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8fdc95700218..4a9707dd0f9c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37 37
38struct gmbus_port {
39 const char *name;
40 int reg;
41};
42
43static const struct gmbus_port gmbus_ports[] = {
44 { "ssc", GPIOB },
45 { "vga", GPIOA },
46 { "panel", GPIOC },
47 { "dpc", GPIOD },
48 { "dpb", GPIOE },
49 { "dpd", GPIOF },
50};
51
38/* Intel GPIO access functions */ 52/* Intel GPIO access functions */
39 53
40#define I2C_RISEFALL_TIME 10 54#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
49intel_i2c_reset(struct drm_device *dev) 63intel_i2c_reset(struct drm_device *dev)
50{ 64{
51 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct drm_i915_private *dev_priv = dev->dev_private;
52 if (HAS_PCH_SPLIT(dev)) 66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
53 I915_WRITE(PCH_GMBUS0, 0);
54 else
55 I915_WRITE(GMBUS0, 0);
56} 67}
57 68
58static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 69static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,63 +151,173 @@ static void set_data(void *data, int state_high)
140 POSTING_READ(bus->gpio_reg); 151 POSTING_READ(bus->gpio_reg);
141} 152}
142 153
143static bool 154static int
155intel_gpio_pre_xfer(struct i2c_adapter *adapter)
156{
157 struct intel_gmbus *bus = container_of(adapter,
158 struct intel_gmbus,
159 adapter);
160 struct drm_i915_private *dev_priv = bus->dev_priv;
161
162 intel_i2c_reset(dev_priv->dev);
163 intel_i2c_quirk_set(dev_priv, true);
164 set_data(bus, 1);
165 set_clock(bus, 1);
166 udelay(I2C_RISEFALL_TIME);
167 return 0;
168}
169
170static void
171intel_gpio_post_xfer(struct i2c_adapter *adapter)
172{
173 struct intel_gmbus *bus = container_of(adapter,
174 struct intel_gmbus,
175 adapter);
176 struct drm_i915_private *dev_priv = bus->dev_priv;
177
178 set_data(bus, 1);
179 set_clock(bus, 1);
180 intel_i2c_quirk_set(dev_priv, false);
181}
182
183static void
144intel_gpio_setup(struct intel_gmbus *bus, u32 pin) 184intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
145{ 185{
146 struct drm_i915_private *dev_priv = bus->dev_priv; 186 struct drm_i915_private *dev_priv = bus->dev_priv;
147 static const int map_pin_to_reg[] = {
148 0,
149 GPIOB,
150 GPIOA,
151 GPIOC,
152 GPIOD,
153 GPIOE,
154 0,
155 GPIOF,
156 };
157 struct i2c_algo_bit_data *algo; 187 struct i2c_algo_bit_data *algo;
158 188
159 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
160 return false;
161
162 algo = &bus->bit_algo; 189 algo = &bus->bit_algo;
163 190
164 bus->gpio_reg = map_pin_to_reg[pin]; 191 /* -1 to map pin pair to gmbus index */
165 if (HAS_PCH_SPLIT(dev_priv->dev)) 192 bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
166 bus->gpio_reg += PCH_GPIOA - GPIOA;
167 193
168 bus->adapter.algo_data = algo; 194 bus->adapter.algo_data = algo;
169 algo->setsda = set_data; 195 algo->setsda = set_data;
170 algo->setscl = set_clock; 196 algo->setscl = set_clock;
171 algo->getsda = get_data; 197 algo->getsda = get_data;
172 algo->getscl = get_clock; 198 algo->getscl = get_clock;
199 algo->pre_xfer = intel_gpio_pre_xfer;
200 algo->post_xfer = intel_gpio_post_xfer;
173 algo->udelay = I2C_RISEFALL_TIME; 201 algo->udelay = I2C_RISEFALL_TIME;
174 algo->timeout = usecs_to_jiffies(2200); 202 algo->timeout = usecs_to_jiffies(2200);
175 algo->data = bus; 203 algo->data = bus;
204}
205
206static int
207gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
208 u32 gmbus1_index)
209{
210 int reg_offset = dev_priv->gpio_mmio_base;
211 u16 len = msg->len;
212 u8 *buf = msg->buf;
213
214 I915_WRITE(GMBUS1 + reg_offset,
215 gmbus1_index |
216 GMBUS_CYCLE_WAIT |
217 (len << GMBUS_BYTE_COUNT_SHIFT) |
218 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
219 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
220 while (len) {
221 int ret;
222 u32 val, loop = 0;
223 u32 gmbus2;
224
225 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
226 (GMBUS_SATOER | GMBUS_HW_RDY),
227 50);
228 if (ret)
229 return -ETIMEDOUT;
230 if (gmbus2 & GMBUS_SATOER)
231 return -ENXIO;
232
233 val = I915_READ(GMBUS3 + reg_offset);
234 do {
235 *buf++ = val & 0xff;
236 val >>= 8;
237 } while (--len && ++loop < 4);
238 }
176 239
177 return true; 240 return 0;
178} 241}
179 242
180static int 243static int
181intel_i2c_quirk_xfer(struct intel_gmbus *bus, 244gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
182 struct i2c_msg *msgs,
183 int num)
184{ 245{
185 struct drm_i915_private *dev_priv = bus->dev_priv; 246 int reg_offset = dev_priv->gpio_mmio_base;
247 u16 len = msg->len;
248 u8 *buf = msg->buf;
249 u32 val, loop;
250
251 val = loop = 0;
252 while (len && loop < 4) {
253 val |= *buf++ << (8 * loop++);
254 len -= 1;
255 }
256
257 I915_WRITE(GMBUS3 + reg_offset, val);
258 I915_WRITE(GMBUS1 + reg_offset,
259 GMBUS_CYCLE_WAIT |
260 (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
261 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
262 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
263 while (len) {
264 int ret;
265 u32 gmbus2;
266
267 val = loop = 0;
268 do {
269 val |= *buf++ << (8 * loop);
270 } while (--len && ++loop < 4);
271
272 I915_WRITE(GMBUS3 + reg_offset, val);
273
274 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
275 (GMBUS_SATOER | GMBUS_HW_RDY),
276 50);
277 if (ret)
278 return -ETIMEDOUT;
279 if (gmbus2 & GMBUS_SATOER)
280 return -ENXIO;
281 }
282 return 0;
283}
284
285/*
286 * The gmbus controller can combine a 1 or 2 byte write with a read that
287 * immediately follows it by using an "INDEX" cycle.
288 */
289static bool
290gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
291{
292 return (i + 1 < num &&
293 !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
294 (msgs[i + 1].flags & I2C_M_RD));
295}
296
297static int
298gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
299{
300 int reg_offset = dev_priv->gpio_mmio_base;
301 u32 gmbus1_index = 0;
302 u32 gmbus5 = 0;
186 int ret; 303 int ret;
187 304
188 intel_i2c_reset(dev_priv->dev); 305 if (msgs[0].len == 2)
306 gmbus5 = GMBUS_2BYTE_INDEX_EN |
307 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
308 if (msgs[0].len == 1)
309 gmbus1_index = GMBUS_CYCLE_INDEX |
310 (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
189 311
190 intel_i2c_quirk_set(dev_priv, true); 312 /* GMBUS5 holds 16-bit index */
191 set_data(bus, 1); 313 if (gmbus5)
192 set_clock(bus, 1); 314 I915_WRITE(GMBUS5 + reg_offset, gmbus5);
193 udelay(I2C_RISEFALL_TIME);
194 315
195 ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num); 316 ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
196 317
197 set_data(bus, 1); 318 /* Clear GMBUS5 after each index transfer */
198 set_clock(bus, 1); 319 if (gmbus5)
199 intel_i2c_quirk_set(dev_priv, false); 320 I915_WRITE(GMBUS5 + reg_offset, 0);
200 321
201 return ret; 322 return ret;
202} 323}
@@ -210,117 +331,108 @@ gmbus_xfer(struct i2c_adapter *adapter,
210 struct intel_gmbus, 331 struct intel_gmbus,
211 adapter); 332 adapter);
212 struct drm_i915_private *dev_priv = bus->dev_priv; 333 struct drm_i915_private *dev_priv = bus->dev_priv;
213 int i, reg_offset, ret; 334 int i, reg_offset;
335 int ret = 0;
214 336
215 mutex_lock(&dev_priv->gmbus_mutex); 337 mutex_lock(&dev_priv->gmbus_mutex);
216 338
217 if (bus->force_bit) { 339 if (bus->force_bit) {
218 ret = intel_i2c_quirk_xfer(bus, msgs, num); 340 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
219 goto out; 341 goto out;
220 } 342 }
221 343
222 reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; 344 reg_offset = dev_priv->gpio_mmio_base;
223 345
224 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 346 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
225 347
226 for (i = 0; i < num; i++) { 348 for (i = 0; i < num; i++) {
227 u16 len = msgs[i].len; 349 u32 gmbus2;
228 u8 *buf = msgs[i].buf; 350
229 351 if (gmbus_is_index_read(msgs, i, num)) {
230 if (msgs[i].flags & I2C_M_RD) { 352 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
231 I915_WRITE(GMBUS1 + reg_offset, 353 i += 1; /* set i to the index of the read xfer */
232 GMBUS_CYCLE_WAIT | 354 } else if (msgs[i].flags & I2C_M_RD) {
233 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | 355 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
234 (len << GMBUS_BYTE_COUNT_SHIFT) |
235 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
236 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
237 POSTING_READ(GMBUS2+reg_offset);
238 do {
239 u32 val, loop = 0;
240
241 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
242 goto timeout;
243 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
244 goto clear_err;
245
246 val = I915_READ(GMBUS3 + reg_offset);
247 do {
248 *buf++ = val & 0xff;
249 val >>= 8;
250 } while (--len && ++loop < 4);
251 } while (len);
252 } else { 356 } else {
253 u32 val, loop; 357 ret = gmbus_xfer_write(dev_priv, &msgs[i]);
254
255 val = loop = 0;
256 do {
257 val |= *buf++ << (8 * loop);
258 } while (--len && ++loop < 4);
259
260 I915_WRITE(GMBUS3 + reg_offset, val);
261 I915_WRITE(GMBUS1 + reg_offset,
262 GMBUS_CYCLE_WAIT |
263 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
264 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
265 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
266 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
267 POSTING_READ(GMBUS2+reg_offset);
268
269 while (len) {
270 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
271 goto timeout;
272 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
273 goto clear_err;
274
275 val = loop = 0;
276 do {
277 val |= *buf++ << (8 * loop);
278 } while (--len && ++loop < 4);
279
280 I915_WRITE(GMBUS3 + reg_offset, val);
281 POSTING_READ(GMBUS2+reg_offset);
282 }
283 } 358 }
284 359
285 if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) 360 if (ret == -ETIMEDOUT)
361 goto timeout;
362 if (ret == -ENXIO)
363 goto clear_err;
364
365 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
366 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
367 50);
368 if (ret)
286 goto timeout; 369 goto timeout;
287 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 370 if (gmbus2 & GMBUS_SATOER)
288 goto clear_err; 371 goto clear_err;
289 } 372 }
290 373
291 goto done; 374 /* Generate a STOP condition on the bus. Note that gmbus can't generata
375 * a STOP on the very first cycle. To simplify the code we
376 * unconditionally generate the STOP condition with an additional gmbus
377 * cycle. */
378 I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
379
380 /* Mark the GMBUS interface as disabled after waiting for idle.
381 * We will re-enable it at the start of the next xfer,
382 * till then let it sleep.
383 */
384 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
385 10)) {
386 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
387 adapter->name);
388 ret = -ETIMEDOUT;
389 }
390 I915_WRITE(GMBUS0 + reg_offset, 0);
391 ret = ret ?: i;
392 goto out;
292 393
293clear_err: 394clear_err:
395 /*
396 * Wait for bus to IDLE before clearing NAK.
397 * If we clear the NAK while bus is still active, then it will stay
398 * active and the next transaction may fail.
399 */
400 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
401 10))
402 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
403 adapter->name);
404
294 /* Toggle the Software Clear Interrupt bit. This has the effect 405 /* Toggle the Software Clear Interrupt bit. This has the effect
295 * of resetting the GMBUS controller and so clearing the 406 * of resetting the GMBUS controller and so clearing the
296 * BUS_ERROR raised by the slave's NAK. 407 * BUS_ERROR raised by the slave's NAK.
297 */ 408 */
298 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 409 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
299 I915_WRITE(GMBUS1 + reg_offset, 0); 410 I915_WRITE(GMBUS1 + reg_offset, 0);
411 I915_WRITE(GMBUS0 + reg_offset, 0);
300 412
301done: 413 DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
302 /* Mark the GMBUS interface as disabled after waiting for idle. 414 adapter->name, msgs[i].addr,
303 * We will re-enable it at the start of the next xfer, 415 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
304 * till then let it sleep. 416
417 /*
418 * If no ACK is received during the address phase of a transaction,
419 * the adapter must report -ENXIO.
420 * It is not clear what to return if no ACK is received at other times.
421 * So, we always return -ENXIO in all NAK cases, to ensure we send
422 * it at least during the one case that is specified.
305 */ 423 */
306 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) 424 ret = -ENXIO;
307 DRM_INFO("GMBUS timed out waiting for idle\n");
308 I915_WRITE(GMBUS0 + reg_offset, 0);
309 ret = i;
310 goto out; 425 goto out;
311 426
312timeout: 427timeout:
313 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", 428 DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
314 bus->reg0 & 0xff, bus->adapter.name); 429 bus->adapter.name, bus->reg0 & 0xff);
315 I915_WRITE(GMBUS0 + reg_offset, 0); 430 I915_WRITE(GMBUS0 + reg_offset, 0);
316 431
317 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 432 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
318 if (!bus->has_gpio) { 433 bus->force_bit = true;
319 ret = -EIO; 434 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
320 } else { 435
321 bus->force_bit = true;
322 ret = intel_i2c_quirk_xfer(bus, msgs, num);
323 }
324out: 436out:
325 mutex_unlock(&dev_priv->gmbus_mutex); 437 mutex_unlock(&dev_priv->gmbus_mutex);
326 return ret; 438 return ret;
@@ -346,35 +458,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
346 */ 458 */
347int intel_setup_gmbus(struct drm_device *dev) 459int intel_setup_gmbus(struct drm_device *dev)
348{ 460{
349 static const char *names[GMBUS_NUM_PORTS] = {
350 "disabled",
351 "ssc",
352 "vga",
353 "panel",
354 "dpc",
355 "dpb",
356 "reserved",
357 "dpd",
358 };
359 struct drm_i915_private *dev_priv = dev->dev_private; 461 struct drm_i915_private *dev_priv = dev->dev_private;
360 int ret, i; 462 int ret, i;
361 463
362 dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), 464 if (HAS_PCH_SPLIT(dev))
363 GFP_KERNEL); 465 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
364 if (dev_priv->gmbus == NULL) 466 else
365 return -ENOMEM; 467 dev_priv->gpio_mmio_base = 0;
366 468
367 mutex_init(&dev_priv->gmbus_mutex); 469 mutex_init(&dev_priv->gmbus_mutex);
368 470
369 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 471 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
370 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 472 struct intel_gmbus *bus = &dev_priv->gmbus[i];
473 u32 port = i + 1; /* +1 to map gmbus index to pin pair */
371 474
372 bus->adapter.owner = THIS_MODULE; 475 bus->adapter.owner = THIS_MODULE;
373 bus->adapter.class = I2C_CLASS_DDC; 476 bus->adapter.class = I2C_CLASS_DDC;
374 snprintf(bus->adapter.name, 477 snprintf(bus->adapter.name,
375 sizeof(bus->adapter.name), 478 sizeof(bus->adapter.name),
376 "i915 gmbus %s", 479 "i915 gmbus %s",
377 names[i]); 480 gmbus_ports[i].name);
378 481
379 bus->adapter.dev.parent = &dev->pdev->dev; 482 bus->adapter.dev.parent = &dev->pdev->dev;
380 bus->dev_priv = dev_priv; 483 bus->dev_priv = dev_priv;
@@ -385,13 +488,13 @@ int intel_setup_gmbus(struct drm_device *dev)
385 goto err; 488 goto err;
386 489
387 /* By default use a conservative clock rate */ 490 /* By default use a conservative clock rate */
388 bus->reg0 = i | GMBUS_RATE_100KHZ; 491 bus->reg0 = port | GMBUS_RATE_100KHZ;
389 492
390 bus->has_gpio = intel_gpio_setup(bus, i); 493 /* gmbus seems to be broken on i830 */
391 494 if (IS_I830(dev))
392 /* XXX force bit banging until GMBUS is fully debugged */
393 if (bus->has_gpio)
394 bus->force_bit = true; 495 bus->force_bit = true;
496
497 intel_gpio_setup(bus, port);
395 } 498 }
396 499
397 intel_i2c_reset(dev_priv->dev); 500 intel_i2c_reset(dev_priv->dev);
@@ -403,11 +506,18 @@ err:
403 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 506 struct intel_gmbus *bus = &dev_priv->gmbus[i];
404 i2c_del_adapter(&bus->adapter); 507 i2c_del_adapter(&bus->adapter);
405 } 508 }
406 kfree(dev_priv->gmbus);
407 dev_priv->gmbus = NULL;
408 return ret; 509 return ret;
409} 510}
410 511
512struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
513 unsigned port)
514{
515 WARN_ON(!intel_gmbus_is_port_valid(port));
516 /* -1 to map pin pair to gmbus index */
517 return (intel_gmbus_is_port_valid(port)) ?
518 &dev_priv->gmbus[port - 1].adapter : NULL;
519}
520
411void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) 521void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
412{ 522{
413 struct intel_gmbus *bus = to_intel_gmbus(adapter); 523 struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +529,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
419{ 529{
420 struct intel_gmbus *bus = to_intel_gmbus(adapter); 530 struct intel_gmbus *bus = to_intel_gmbus(adapter);
421 531
422 if (bus->has_gpio) 532 bus->force_bit = force_bit;
423 bus->force_bit = force_bit;
424} 533}
425 534
426void intel_teardown_gmbus(struct drm_device *dev) 535void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +544,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
435 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 544 struct intel_gmbus *bus = &dev_priv->gmbus[i];
436 i2c_del_adapter(&bus->adapter); 545 i2c_del_adapter(&bus->adapter);
437 } 546 }
438
439 kfree(dev_priv->gmbus);
440 dev_priv->gmbus = NULL;
441} 547}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9c71183629c2..9dee82350def 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -480,7 +480,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
480 480
481static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) 481static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
482{ 482{
483 DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); 483 DRM_INFO("Skipping forced modeset for %s\n", id->ident);
484 return 1; 484 return 1;
485} 485}
486 486
@@ -628,7 +628,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
628 628
629static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 629static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
630{ 630{
631 DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); 631 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
632 return 1; 632 return 1;
633} 633}
634 634
@@ -851,8 +851,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
851 child->device_type != DEVICE_TYPE_LFP) 851 child->device_type != DEVICE_TYPE_LFP)
852 continue; 852 continue;
853 853
854 if (child->i2c_pin) 854 if (intel_gmbus_is_port_valid(child->i2c_pin))
855 *i2c_pin = child->i2c_pin; 855 *i2c_pin = child->i2c_pin;
856 856
857 /* However, we cannot trust the BIOS writers to populate 857 /* However, we cannot trust the BIOS writers to populate
858 * the VBT correctly. Since LVDS requires additional 858 * the VBT correctly. Since LVDS requires additional
@@ -993,7 +993,8 @@ bool intel_lvds_init(struct drm_device *dev)
993 * preferred mode is the right one. 993 * preferred mode is the right one.
994 */ 994 */
995 intel_lvds->edid = drm_get_edid(connector, 995 intel_lvds->edid = drm_get_edid(connector,
996 &dev_priv->gmbus[pin].adapter); 996 intel_gmbus_get_adapter(dev_priv,
997 pin));
997 if (intel_lvds->edid) { 998 if (intel_lvds->edid) {
998 if (drm_add_edid_modes(connector, 999 if (drm_add_edid_modes(connector,
999 intel_lvds->edid)) { 1000 intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b6..d67ec3a51e42 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
56 } 56 }
57 }; 57 };
58 58
59 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; 59 return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
60 msgs, 2) == 2;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83cb..18bd0af855dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
28#include <linux/acpi.h> 30#include <linux/acpi.h>
29#include <linux/acpi_io.h> 31#include <linux/acpi_io.h>
30#include <acpi/video.h> 32#include <acpi/video.h>
@@ -149,7 +151,7 @@ struct opregion_asle {
149static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 151static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
150{ 152{
151 struct drm_i915_private *dev_priv = dev->dev_private; 153 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct opregion_asle *asle = dev_priv->opregion.asle; 154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
153 u32 max; 155 u32 max;
154 156
155 if (!(bclp & ASLE_BCLP_VALID)) 157 if (!(bclp & ASLE_BCLP_VALID))
@@ -161,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
161 163
162 max = intel_panel_get_max_backlight(dev); 164 max = intel_panel_get_max_backlight(dev);
163 intel_panel_set_backlight(dev, bclp * max / 255); 165 intel_panel_set_backlight(dev, bclp * max / 255);
164 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 166 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
165 167
166 return 0; 168 return 0;
167} 169}
@@ -198,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
198void intel_opregion_asle_intr(struct drm_device *dev) 200void intel_opregion_asle_intr(struct drm_device *dev)
199{ 201{
200 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct drm_i915_private *dev_priv = dev->dev_private;
201 struct opregion_asle *asle = dev_priv->opregion.asle; 203 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
202 u32 asle_stat = 0; 204 u32 asle_stat = 0;
203 u32 asle_req; 205 u32 asle_req;
204 206
205 if (!asle) 207 if (!asle)
206 return; 208 return;
207 209
208 asle_req = asle->aslc & ASLE_REQ_MSK; 210 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
209 211
210 if (!asle_req) { 212 if (!asle_req) {
211 DRM_DEBUG_DRIVER("non asle set request??\n"); 213 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -213,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
213 } 215 }
214 216
215 if (asle_req & ASLE_SET_ALS_ILLUM) 217 if (asle_req & ASLE_SET_ALS_ILLUM)
216 asle_stat |= asle_set_als_illum(dev, asle->alsi); 218 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
217 219
218 if (asle_req & ASLE_SET_BACKLIGHT) 220 if (asle_req & ASLE_SET_BACKLIGHT)
219 asle_stat |= asle_set_backlight(dev, asle->bclp); 221 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
220 222
221 if (asle_req & ASLE_SET_PFIT) 223 if (asle_req & ASLE_SET_PFIT)
222 asle_stat |= asle_set_pfit(dev, asle->pfit); 224 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
223 225
224 if (asle_req & ASLE_SET_PWM_FREQ) 226 if (asle_req & ASLE_SET_PWM_FREQ)
225 asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); 227 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
226 228
227 asle->aslc = asle_stat; 229 iowrite32(asle_stat, &asle->aslc);
228} 230}
229 231
230void intel_opregion_gse_intr(struct drm_device *dev) 232void intel_opregion_gse_intr(struct drm_device *dev)
231{ 233{
232 struct drm_i915_private *dev_priv = dev->dev_private; 234 struct drm_i915_private *dev_priv = dev->dev_private;
233 struct opregion_asle *asle = dev_priv->opregion.asle; 235 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
234 u32 asle_stat = 0; 236 u32 asle_stat = 0;
235 u32 asle_req; 237 u32 asle_req;
236 238
237 if (!asle) 239 if (!asle)
238 return; 240 return;
239 241
240 asle_req = asle->aslc & ASLE_REQ_MSK; 242 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
241 243
242 if (!asle_req) { 244 if (!asle_req) {
243 DRM_DEBUG_DRIVER("non asle set request??\n"); 245 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -250,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
250 } 252 }
251 253
252 if (asle_req & ASLE_SET_BACKLIGHT) 254 if (asle_req & ASLE_SET_BACKLIGHT)
253 asle_stat |= asle_set_backlight(dev, asle->bclp); 255 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
254 256
255 if (asle_req & ASLE_SET_PFIT) { 257 if (asle_req & ASLE_SET_PFIT) {
256 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 258 DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -262,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
262 asle_stat |= ASLE_PWM_FREQ_FAILED; 264 asle_stat |= ASLE_PWM_FREQ_FAILED;
263 } 265 }
264 266
265 asle->aslc = asle_stat; 267 iowrite32(asle_stat, &asle->aslc);
266} 268}
267#define ASLE_ALS_EN (1<<0) 269#define ASLE_ALS_EN (1<<0)
268#define ASLE_BLC_EN (1<<1) 270#define ASLE_BLC_EN (1<<1)
@@ -272,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
272void intel_opregion_enable_asle(struct drm_device *dev) 274void intel_opregion_enable_asle(struct drm_device *dev)
273{ 275{
274 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
275 struct opregion_asle *asle = dev_priv->opregion.asle; 277 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
276 278
277 if (asle) { 279 if (asle) {
278 if (IS_MOBILE(dev)) 280 if (IS_MOBILE(dev))
279 intel_enable_asle(dev); 281 intel_enable_asle(dev);
280 282
281 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 283 iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
282 ASLE_PFMB_EN; 284 ASLE_PFMB_EN,
283 asle->ardy = 1; 285 &asle->tche);
286 iowrite32(1, &asle->ardy);
284 } 287 }
285} 288}
286 289
@@ -298,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
298 Linux, these are handled by the dock, button and video drivers. 301 Linux, these are handled by the dock, button and video drivers.
299 */ 302 */
300 303
301 struct opregion_acpi *acpi; 304 struct opregion_acpi __iomem *acpi;
302 struct acpi_bus_event *event = data; 305 struct acpi_bus_event *event = data;
303 int ret = NOTIFY_OK; 306 int ret = NOTIFY_OK;
304 307
@@ -310,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
310 313
311 acpi = system_opregion->acpi; 314 acpi = system_opregion->acpi;
312 315
313 if (event->type == 0x80 && !(acpi->cevt & 0x1)) 316 if (event->type == 0x80 &&
317 (ioread32(&acpi->cevt) & 1) == 0)
314 ret = NOTIFY_BAD; 318 ret = NOTIFY_BAD;
315 319
316 acpi->csts = 0; 320 iowrite32(0, &acpi->csts);
317 321
318 return ret; 322 return ret;
319} 323}
@@ -337,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
337 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 341 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
338 unsigned long long device_id; 342 unsigned long long device_id;
339 acpi_status status; 343 acpi_status status;
344 u32 temp;
340 int i = 0; 345 int i = 0;
341 346
342 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 347 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -355,7 +360,7 @@ static void intel_didl_outputs(struct drm_device *dev)
355 } 360 }
356 361
357 if (!acpi_video_bus) { 362 if (!acpi_video_bus) {
358 printk(KERN_WARNING "No ACPI video bus found\n"); 363 pr_warn("No ACPI video bus found\n");
359 return; 364 return;
360 } 365 }
361 366
@@ -371,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
371 if (ACPI_SUCCESS(status)) { 376 if (ACPI_SUCCESS(status)) {
372 if (!device_id) 377 if (!device_id)
373 goto blind_set; 378 goto blind_set;
374 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); 379 iowrite32((u32)(device_id & 0x0f0f),
380 &opregion->acpi->didl[i]);
375 i++; 381 i++;
376 } 382 }
377 } 383 }
@@ -379,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
379end: 385end:
380 /* If fewer than 8 outputs, the list must be null terminated */ 386 /* If fewer than 8 outputs, the list must be null terminated */
381 if (i < 8) 387 if (i < 8)
382 opregion->acpi->didl[i] = 0; 388 iowrite32(0, &opregion->acpi->didl[i]);
383 return; 389 return;
384 390
385blind_set: 391blind_set:
@@ -413,7 +419,9 @@ blind_set:
413 output_type = ACPI_LVDS_OUTPUT; 419 output_type = ACPI_LVDS_OUTPUT;
414 break; 420 break;
415 } 421 }
416 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 422 temp = ioread32(&opregion->acpi->didl[i]);
423 iowrite32(temp | (1<<31) | output_type | i,
424 &opregion->acpi->didl[i]);
417 i++; 425 i++;
418 } 426 }
419 goto end; 427 goto end;
@@ -434,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
434 /* Notify BIOS we are ready to handle ACPI video ext notifs. 442 /* Notify BIOS we are ready to handle ACPI video ext notifs.
435 * Right now, all the events are handled by the ACPI video module. 443 * Right now, all the events are handled by the ACPI video module.
436 * We don't actually need to do anything with them. */ 444 * We don't actually need to do anything with them. */
437 opregion->acpi->csts = 0; 445 iowrite32(0, &opregion->acpi->csts);
438 opregion->acpi->drdy = 1; 446 iowrite32(1, &opregion->acpi->drdy);
439 447
440 system_opregion = opregion; 448 system_opregion = opregion;
441 register_acpi_notifier(&intel_opregion_notifier); 449 register_acpi_notifier(&intel_opregion_notifier);
@@ -454,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
454 return; 462 return;
455 463
456 if (opregion->acpi) { 464 if (opregion->acpi) {
457 opregion->acpi->drdy = 0; 465 iowrite32(0, &opregion->acpi->drdy);
458 466
459 system_opregion = NULL; 467 system_opregion = NULL;
460 unregister_acpi_notifier(&intel_opregion_notifier); 468 unregister_acpi_notifier(&intel_opregion_notifier);
@@ -474,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
474{ 482{
475 struct drm_i915_private *dev_priv = dev->dev_private; 483 struct drm_i915_private *dev_priv = dev->dev_private;
476 struct intel_opregion *opregion = &dev_priv->opregion; 484 struct intel_opregion *opregion = &dev_priv->opregion;
477 void *base; 485 void __iomem *base;
478 u32 asls, mboxes; 486 u32 asls, mboxes;
487 char buf[sizeof(OPREGION_SIGNATURE)];
479 int err = 0; 488 int err = 0;
480 489
481 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 490 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -489,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
489 if (!base) 498 if (!base)
490 return -ENOMEM; 499 return -ENOMEM;
491 500
492 if (memcmp(base, OPREGION_SIGNATURE, 16)) { 501 memcpy_fromio(buf, base, sizeof(buf));
502
503 if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
493 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 504 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
494 err = -EINVAL; 505 err = -EINVAL;
495 goto err_out; 506 goto err_out;
@@ -499,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
499 510
500 opregion->lid_state = base + ACPI_CLID; 511 opregion->lid_state = base + ACPI_CLID;
501 512
502 mboxes = opregion->header->mboxes; 513 mboxes = ioread32(&opregion->header->mboxes);
503 if (mboxes & MBOX_ACPI) { 514 if (mboxes & MBOX_ACPI) {
504 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 515 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
505 opregion->acpi = base + OPREGION_ACPI_OFFSET; 516 opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322fb..458743da3774 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
187 void (*flip_tail)(struct intel_overlay *); 187 void (*flip_tail)(struct intel_overlay *);
188}; 188};
189 189
190static struct overlay_registers * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 193 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
194 struct overlay_registers *regs; 194 struct overlay_registers __iomem *regs;
195 195
196 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
197 regs = overlay->reg_bo->phys_obj->handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
198 else 198 else
199 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, 199 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
200 overlay->reg_bo->gtt_offset); 200 overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
203} 203}
204 204
205static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 205static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
206 struct overlay_registers *regs) 206 struct overlay_registers __iomem *regs)
207{ 207{
208 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 208 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
209 io_mapping_unmap(regs); 209 io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215{ 215{
216 struct drm_device *dev = overlay->dev; 216 struct drm_device *dev = overlay->dev;
217 drm_i915_private_t *dev_priv = dev->dev_private; 217 drm_i915_private_t *dev_priv = dev->dev_private;
218 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
218 int ret; 219 int ret;
219 220
220 BUG_ON(overlay->last_flip_req); 221 BUG_ON(overlay->last_flip_req);
221 ret = i915_add_request(LP_RING(dev_priv), NULL, request); 222 ret = i915_add_request(ring, NULL, request);
222 if (ret) { 223 if (ret) {
223 kfree(request); 224 kfree(request);
224 return ret; 225 return ret;
225 } 226 }
226 overlay->last_flip_req = request->seqno; 227 overlay->last_flip_req = request->seqno;
227 overlay->flip_tail = tail; 228 overlay->flip_tail = tail;
228 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, 229 ret = i915_wait_request(ring, overlay->last_flip_req);
229 true);
230 if (ret) 230 if (ret)
231 return ret; 231 return ret;
232 i915_gem_retire_requests(dev);
232 233
233 overlay->last_flip_req = 0; 234 overlay->last_flip_req = 0;
234 return 0; 235 return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
262 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); 263 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
263 264
264 mode = drm_mode_duplicate(dev, &vesa_640x480); 265 mode = drm_mode_duplicate(dev, &vesa_640x480);
265 drm_mode_set_crtcinfo(mode, 0); 266
266 if (!drm_crtc_helper_set_mode(&crtc->base, mode, 267 if (!drm_crtc_helper_set_mode(&crtc->base, mode,
267 crtc->base.x, crtc->base.y, 268 crtc->base.x, crtc->base.y,
268 crtc->base.fb)) 269 crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
287{ 288{
288 struct drm_device *dev = overlay->dev; 289 struct drm_device *dev = overlay->dev;
289 struct drm_i915_private *dev_priv = dev->dev_private; 290 struct drm_i915_private *dev_priv = dev->dev_private;
291 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
290 struct drm_i915_gem_request *request; 292 struct drm_i915_gem_request *request;
291 int pipe_a_quirk = 0; 293 int pipe_a_quirk = 0;
292 int ret; 294 int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
306 goto out; 308 goto out;
307 } 309 }
308 310
309 ret = BEGIN_LP_RING(4); 311 ret = intel_ring_begin(ring, 4);
310 if (ret) { 312 if (ret) {
311 kfree(request); 313 kfree(request);
312 goto out; 314 goto out;
313 } 315 }
314 316
315 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 317 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
316 OUT_RING(overlay->flip_addr | OFC_UPDATE); 318 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
317 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 319 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
318 OUT_RING(MI_NOOP); 320 intel_ring_emit(ring, MI_NOOP);
319 ADVANCE_LP_RING(); 321 intel_ring_advance(ring);
320 322
321 ret = intel_overlay_do_wait_request(overlay, request, NULL); 323 ret = intel_overlay_do_wait_request(overlay, request, NULL);
322out: 324out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
332{ 334{
333 struct drm_device *dev = overlay->dev; 335 struct drm_device *dev = overlay->dev;
334 drm_i915_private_t *dev_priv = dev->dev_private; 336 drm_i915_private_t *dev_priv = dev->dev_private;
337 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
335 struct drm_i915_gem_request *request; 338 struct drm_i915_gem_request *request;
336 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
337 u32 tmp; 340 u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
351 if (tmp & (1 << 17)) 354 if (tmp & (1 << 17))
352 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 355 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
353 356
354 ret = BEGIN_LP_RING(2); 357 ret = intel_ring_begin(ring, 2);
355 if (ret) { 358 if (ret) {
356 kfree(request); 359 kfree(request);
357 return ret; 360 return ret;
358 } 361 }
359 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 362 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
360 OUT_RING(flip_addr); 363 intel_ring_emit(ring, flip_addr);
361 ADVANCE_LP_RING(); 364 intel_ring_advance(ring);
362 365
363 ret = i915_add_request(LP_RING(dev_priv), NULL, request); 366 ret = i915_add_request(ring, NULL, request);
364 if (ret) { 367 if (ret) {
365 kfree(request); 368 kfree(request);
366 return ret; 369 return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
401{ 404{
402 struct drm_device *dev = overlay->dev; 405 struct drm_device *dev = overlay->dev;
403 struct drm_i915_private *dev_priv = dev->dev_private; 406 struct drm_i915_private *dev_priv = dev->dev_private;
407 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
404 u32 flip_addr = overlay->flip_addr; 408 u32 flip_addr = overlay->flip_addr;
405 struct drm_i915_gem_request *request; 409 struct drm_i915_gem_request *request;
406 int ret; 410 int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
417 * of the hw. Do it in both cases */ 421 * of the hw. Do it in both cases */
418 flip_addr |= OFC_UPDATE; 422 flip_addr |= OFC_UPDATE;
419 423
420 ret = BEGIN_LP_RING(6); 424 ret = intel_ring_begin(ring, 6);
421 if (ret) { 425 if (ret) {
422 kfree(request); 426 kfree(request);
423 return ret; 427 return ret;
424 } 428 }
425 /* wait for overlay to go idle */ 429 /* wait for overlay to go idle */
426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 430 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
427 OUT_RING(flip_addr); 431 intel_ring_emit(ring, flip_addr);
428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 432 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
429 /* turn overlay off */ 433 /* turn overlay off */
430 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 434 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
431 OUT_RING(flip_addr); 435 intel_ring_emit(ring, flip_addr);
432 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 436 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
433 ADVANCE_LP_RING(); 437 intel_ring_advance(ring);
434 438
435 return intel_overlay_do_wait_request(overlay, request, 439 return intel_overlay_do_wait_request(overlay, request,
436 intel_overlay_off_tail); 440 intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
442{ 446{
443 struct drm_device *dev = overlay->dev; 447 struct drm_device *dev = overlay->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private; 448 drm_i915_private_t *dev_priv = dev->dev_private;
449 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
445 int ret; 450 int ret;
446 451
447 if (overlay->last_flip_req == 0) 452 if (overlay->last_flip_req == 0)
448 return 0; 453 return 0;
449 454
450 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, 455 ret = i915_wait_request(ring, overlay->last_flip_req);
451 true);
452 if (ret) 456 if (ret)
453 return ret; 457 return ret;
458 i915_gem_retire_requests(dev);
454 459
455 if (overlay->flip_tail) 460 if (overlay->flip_tail)
456 overlay->flip_tail(overlay); 461 overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467{ 472{
468 struct drm_device *dev = overlay->dev; 473 struct drm_device *dev = overlay->dev;
469 drm_i915_private_t *dev_priv = dev->dev_private; 474 drm_i915_private_t *dev_priv = dev->dev_private;
475 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
470 int ret; 476 int ret;
471 477
472 /* Only wait if there is actually an old frame to release to 478 /* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
483 if (request == NULL) 489 if (request == NULL)
484 return -ENOMEM; 490 return -ENOMEM;
485 491
486 ret = BEGIN_LP_RING(2); 492 ret = intel_ring_begin(ring, 2);
487 if (ret) { 493 if (ret) {
488 kfree(request); 494 kfree(request);
489 return ret; 495 return ret;
490 } 496 }
491 497
492 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 498 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
493 OUT_RING(MI_NOOP); 499 intel_ring_emit(ring, MI_NOOP);
494 ADVANCE_LP_RING(); 500 intel_ring_advance(ring);
495 501
496 ret = intel_overlay_do_wait_request(overlay, request, 502 ret = intel_overlay_do_wait_request(overlay, request,
497 intel_overlay_release_old_vid_tail); 503 intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
619 0x3000, 0x0800, 0x3000 625 0x3000, 0x0800, 0x3000
620}; 626};
621 627
622static void update_polyphase_filter(struct overlay_registers *regs) 628static void update_polyphase_filter(struct overlay_registers __iomem *regs)
623{ 629{
624 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); 630 memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
625 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); 631 memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
632 sizeof(uv_static_hcoeffs));
626} 633}
627 634
628static bool update_scaling_factors(struct intel_overlay *overlay, 635static bool update_scaling_factors(struct intel_overlay *overlay,
629 struct overlay_registers *regs, 636 struct overlay_registers __iomem *regs,
630 struct put_image_params *params) 637 struct put_image_params *params)
631{ 638{
632 /* fixed point with a 12 bit shift */ 639 /* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
665 overlay->old_xscale = xscale; 672 overlay->old_xscale = xscale;
666 overlay->old_yscale = yscale; 673 overlay->old_yscale = yscale;
667 674
668 regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | 675 iowrite32(((yscale & FRACT_MASK) << 20) |
669 ((xscale >> FP_SHIFT) << 16) | 676 ((xscale >> FP_SHIFT) << 16) |
670 ((xscale & FRACT_MASK) << 3)); 677 ((xscale & FRACT_MASK) << 3),
678 &regs->YRGBSCALE);
671 679
672 regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | 680 iowrite32(((yscale_UV & FRACT_MASK) << 20) |
673 ((xscale_UV >> FP_SHIFT) << 16) | 681 ((xscale_UV >> FP_SHIFT) << 16) |
674 ((xscale_UV & FRACT_MASK) << 3)); 682 ((xscale_UV & FRACT_MASK) << 3),
683 &regs->UVSCALE);
675 684
676 regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | 685 iowrite32((((yscale >> FP_SHIFT) << 16) |
677 ((yscale_UV >> FP_SHIFT) << 0))); 686 ((yscale_UV >> FP_SHIFT) << 0)),
687 &regs->UVSCALEV);
678 688
679 if (scale_changed) 689 if (scale_changed)
680 update_polyphase_filter(regs); 690 update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
683} 693}
684 694
685static void update_colorkey(struct intel_overlay *overlay, 695static void update_colorkey(struct intel_overlay *overlay,
686 struct overlay_registers *regs) 696 struct overlay_registers __iomem *regs)
687{ 697{
688 u32 key = overlay->color_key; 698 u32 key = overlay->color_key;
689 699
690 switch (overlay->crtc->base.fb->bits_per_pixel) { 700 switch (overlay->crtc->base.fb->bits_per_pixel) {
691 case 8: 701 case 8:
692 regs->DCLRKV = 0; 702 iowrite32(0, &regs->DCLRKV);
693 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; 703 iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
694 break; 704 break;
695 705
696 case 16: 706 case 16:
697 if (overlay->crtc->base.fb->depth == 15) { 707 if (overlay->crtc->base.fb->depth == 15) {
698 regs->DCLRKV = RGB15_TO_COLORKEY(key); 708 iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
699 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; 709 iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
710 &regs->DCLRKM);
700 } else { 711 } else {
701 regs->DCLRKV = RGB16_TO_COLORKEY(key); 712 iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
702 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; 713 iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
714 &regs->DCLRKM);
703 } 715 }
704 break; 716 break;
705 717
706 case 24: 718 case 24:
707 case 32: 719 case 32:
708 regs->DCLRKV = key; 720 iowrite32(key, &regs->DCLRKV);
709 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; 721 iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
710 break; 722 break;
711 } 723 }
712} 724}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
761 struct put_image_params *params) 773 struct put_image_params *params)
762{ 774{
763 int ret, tmp_width; 775 int ret, tmp_width;
764 struct overlay_registers *regs; 776 struct overlay_registers __iomem *regs;
765 bool scale_changed = false; 777 bool scale_changed = false;
766 struct drm_device *dev = overlay->dev; 778 struct drm_device *dev = overlay->dev;
779 u32 swidth, swidthsw, sheight, ostride;
767 780
768 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 781 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
769 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 782 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
782 goto out_unpin; 795 goto out_unpin;
783 796
784 if (!overlay->active) { 797 if (!overlay->active) {
798 u32 oconfig;
785 regs = intel_overlay_map_regs(overlay); 799 regs = intel_overlay_map_regs(overlay);
786 if (!regs) { 800 if (!regs) {
787 ret = -ENOMEM; 801 ret = -ENOMEM;
788 goto out_unpin; 802 goto out_unpin;
789 } 803 }
790 regs->OCONFIG = OCONF_CC_OUT_8BIT; 804 oconfig = OCONF_CC_OUT_8BIT;
791 if (IS_GEN4(overlay->dev)) 805 if (IS_GEN4(overlay->dev))
792 regs->OCONFIG |= OCONF_CSC_MODE_BT709; 806 oconfig |= OCONF_CSC_MODE_BT709;
793 regs->OCONFIG |= overlay->crtc->pipe == 0 ? 807 oconfig |= overlay->crtc->pipe == 0 ?
794 OCONF_PIPE_A : OCONF_PIPE_B; 808 OCONF_PIPE_A : OCONF_PIPE_B;
809 iowrite32(oconfig, &regs->OCONFIG);
795 intel_overlay_unmap_regs(overlay, regs); 810 intel_overlay_unmap_regs(overlay, regs);
796 811
797 ret = intel_overlay_on(overlay); 812 ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
805 goto out_unpin; 820 goto out_unpin;
806 } 821 }
807 822
808 regs->DWINPOS = (params->dst_y << 16) | params->dst_x; 823 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
809 regs->DWINSZ = (params->dst_h << 16) | params->dst_w; 824 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
810 825
811 if (params->format & I915_OVERLAY_YUV_PACKED) 826 if (params->format & I915_OVERLAY_YUV_PACKED)
812 tmp_width = packed_width_bytes(params->format, params->src_w); 827 tmp_width = packed_width_bytes(params->format, params->src_w);
813 else 828 else
814 tmp_width = params->src_w; 829 tmp_width = params->src_w;
815 830
816 regs->SWIDTH = params->src_w; 831 swidth = params->src_w;
817 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 832 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
818 params->offset_Y, tmp_width); 833 sheight = params->src_h;
819 regs->SHEIGHT = params->src_h; 834 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
820 regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y; 835 ostride = params->stride_Y;
821 regs->OSTRIDE = params->stride_Y;
822 836
823 if (params->format & I915_OVERLAY_YUV_PLANAR) { 837 if (params->format & I915_OVERLAY_YUV_PLANAR) {
824 int uv_hscale = uv_hsubsampling(params->format); 838 int uv_hscale = uv_hsubsampling(params->format);
825 int uv_vscale = uv_vsubsampling(params->format); 839 int uv_vscale = uv_vsubsampling(params->format);
826 u32 tmp_U, tmp_V; 840 u32 tmp_U, tmp_V;
827 regs->SWIDTH |= (params->src_w/uv_hscale) << 16; 841 swidth |= (params->src_w/uv_hscale) << 16;
828 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 842 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
829 params->src_w/uv_hscale); 843 params->src_w/uv_hscale);
830 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 844 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
831 params->src_w/uv_hscale); 845 params->src_w/uv_hscale);
832 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 846 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
833 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 847 sheight |= (params->src_h/uv_vscale) << 16;
834 regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; 848 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
835 regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; 849 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
836 regs->OSTRIDE |= params->stride_UV << 16; 850 ostride |= params->stride_UV << 16;
837 } 851 }
838 852
853 iowrite32(swidth, &regs->SWIDTH);
854 iowrite32(swidthsw, &regs->SWIDTHSW);
855 iowrite32(sheight, &regs->SHEIGHT);
856 iowrite32(ostride, &regs->OSTRIDE);
857
839 scale_changed = update_scaling_factors(overlay, regs, params); 858 scale_changed = update_scaling_factors(overlay, regs, params);
840 859
841 update_colorkey(overlay, regs); 860 update_colorkey(overlay, regs);
842 861
843 regs->OCMD = overlay_cmd_reg(params); 862 iowrite32(overlay_cmd_reg(params), &regs->OCMD);
844 863
845 intel_overlay_unmap_regs(overlay, regs); 864 intel_overlay_unmap_regs(overlay, regs);
846 865
@@ -860,7 +879,7 @@ out_unpin:
860 879
861int intel_overlay_switch_off(struct intel_overlay *overlay) 880int intel_overlay_switch_off(struct intel_overlay *overlay)
862{ 881{
863 struct overlay_registers *regs; 882 struct overlay_registers __iomem *regs;
864 struct drm_device *dev = overlay->dev; 883 struct drm_device *dev = overlay->dev;
865 int ret; 884 int ret;
866 885
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
879 return ret; 898 return ret;
880 899
881 regs = intel_overlay_map_regs(overlay); 900 regs = intel_overlay_map_regs(overlay);
882 regs->OCMD = 0; 901 iowrite32(0, &regs->OCMD);
883 intel_overlay_unmap_regs(overlay, regs); 902 intel_overlay_unmap_regs(overlay, regs);
884 903
885 ret = intel_overlay_off(overlay); 904 ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1109 struct put_image_params *params; 1128 struct put_image_params *params;
1110 int ret; 1129 int ret;
1111 1130
1112 if (!dev_priv) { 1131 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1113 DRM_ERROR("called with no initialization\n");
1114 return -EINVAL;
1115 }
1116
1117 overlay = dev_priv->overlay; 1132 overlay = dev_priv->overlay;
1118 if (!overlay) { 1133 if (!overlay) {
1119 DRM_DEBUG("userspace bug: no overlay\n"); 1134 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
1250} 1265}
1251 1266
1252static void update_reg_attrs(struct intel_overlay *overlay, 1267static void update_reg_attrs(struct intel_overlay *overlay,
1253 struct overlay_registers *regs) 1268 struct overlay_registers __iomem *regs)
1254{ 1269{
1255 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff); 1270 iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
1256 regs->OCLRC1 = overlay->saturation; 1271 &regs->OCLRC0);
1272 iowrite32(overlay->saturation, &regs->OCLRC1);
1257} 1273}
1258 1274
1259static bool check_gamma_bounds(u32 gamma1, u32 gamma2) 1275static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1306 struct drm_intel_overlay_attrs *attrs = data; 1322 struct drm_intel_overlay_attrs *attrs = data;
1307 drm_i915_private_t *dev_priv = dev->dev_private; 1323 drm_i915_private_t *dev_priv = dev->dev_private;
1308 struct intel_overlay *overlay; 1324 struct intel_overlay *overlay;
1309 struct overlay_registers *regs; 1325 struct overlay_registers __iomem *regs;
1310 int ret; 1326 int ret;
1311 1327
1312 if (!dev_priv) { 1328 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1313 DRM_ERROR("called with no initialization\n");
1314 return -EINVAL;
1315 }
1316
1317 overlay = dev_priv->overlay; 1329 overlay = dev_priv->overlay;
1318 if (!overlay) { 1330 if (!overlay) {
1319 DRM_DEBUG("userspace bug: no overlay\n"); 1331 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
1396 drm_i915_private_t *dev_priv = dev->dev_private; 1408 drm_i915_private_t *dev_priv = dev->dev_private;
1397 struct intel_overlay *overlay; 1409 struct intel_overlay *overlay;
1398 struct drm_i915_gem_object *reg_bo; 1410 struct drm_i915_gem_object *reg_bo;
1399 struct overlay_registers *regs; 1411 struct overlay_registers __iomem *regs;
1400 int ret; 1412 int ret;
1401 1413
1402 if (!HAS_OVERLAY(dev)) 1414 if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
1451 if (!regs) 1463 if (!regs)
1452 goto out_unpin_bo; 1464 goto out_unpin_bo;
1453 1465
1454 memset(regs, 0, sizeof(struct overlay_registers)); 1466 memset_io(regs, 0, sizeof(struct overlay_registers));
1455 update_polyphase_filter(regs); 1467 update_polyphase_filter(regs);
1456 update_reg_attrs(overlay, regs); 1468 update_reg_attrs(overlay, regs);
1457 1469
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
1499 u32 isr; 1511 u32 isr;
1500}; 1512};
1501 1513
1502static struct overlay_registers * 1514static struct overlay_registers __iomem *
1503intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1515intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1504{ 1516{
1505 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 1517 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
1506 struct overlay_registers *regs; 1518 struct overlay_registers __iomem *regs;
1507 1519
1508 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1520 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1509 regs = overlay->reg_bo->phys_obj->handle->vaddr; 1521 /* Cast to make sparse happy, but it's wc memory anyway, so
1522 * equivalent to the wc io mapping on X86. */
1523 regs = (struct overlay_registers __iomem *)
1524 overlay->reg_bo->phys_obj->handle->vaddr;
1510 else 1525 else
1511 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1526 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1512 overlay->reg_bo->gtt_offset); 1527 overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1515} 1530}
1516 1531
1517static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1532static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1518 struct overlay_registers *regs) 1533 struct overlay_registers __iomem *regs)
1519{ 1534{
1520 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1535 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1521 io_mapping_unmap_atomic(regs); 1536 io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1540 error->dovsta = I915_READ(DOVSTA); 1555 error->dovsta = I915_READ(DOVSTA);
1541 error->isr = I915_READ(ISR); 1556 error->isr = I915_READ(ISR);
1542 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1557 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1543 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; 1558 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
1544 else 1559 else
1545 error->base = (long) overlay->reg_bo->gtt_offset; 1560 error->base = overlay->reg_bo->gtt_offset;
1546 1561
1547 regs = intel_overlay_map_regs_atomic(overlay); 1562 regs = intel_overlay_map_regs_atomic(overlay);
1548 if (!regs) 1563 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 48177ec4720e..2a1625d84a69 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
28 * Chris Wilson <chris@chris-wilson.co.uk> 28 * Chris Wilson <chris@chris-wilson.co.uk>
29 */ 29 */
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/moduleparam.h>
31#include "intel_drv.h" 34#include "intel_drv.h"
32 35
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -169,7 +172,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
169 /* XXX add code here to query mode clock or hardware clock 172 /* XXX add code here to query mode clock or hardware clock
170 * and program max PWM appropriately. 173 * and program max PWM appropriately.
171 */ 174 */
172 printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); 175 pr_warn_once("fixme: max PWM is zero\n");
173 return 1; 176 return 1;
174 } 177 }
175 178
@@ -189,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
189 return max; 192 return max;
190} 193}
191 194
195static int i915_panel_invert_brightness;
196MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
197 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
198 "report PCI device ID, subsystem vendor and subsystem device ID "
199 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
200 "It will then be included in an upcoming module version.");
201module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
202static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
203{
204 struct drm_i915_private *dev_priv = dev->dev_private;
205
206 if (i915_panel_invert_brightness < 0)
207 return val;
208
209 if (i915_panel_invert_brightness > 0 ||
210 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
211 return intel_panel_get_max_backlight(dev) - val;
212
213 return val;
214}
215
192u32 intel_panel_get_backlight(struct drm_device *dev) 216u32 intel_panel_get_backlight(struct drm_device *dev)
193{ 217{
194 struct drm_i915_private *dev_priv = dev->dev_private; 218 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -209,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
209 } 233 }
210 } 234 }
211 235
236 val = intel_panel_compute_brightness(dev, val);
212 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 237 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
213 return val; 238 return val;
214} 239}
@@ -226,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
226 u32 tmp; 251 u32 tmp;
227 252
228 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 253 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
254 level = intel_panel_compute_brightness(dev, level);
229 255
230 if (HAS_PCH_SPLIT(dev)) 256 if (HAS_PCH_SPLIT(dev))
231 return intel_pch_panel_set_backlight(dev, level); 257 return intel_pch_panel_set_backlight(dev, level);
@@ -342,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
342 else 368 else
343 return -ENODEV; 369 return -ENODEV;
344 370
371 memset(&props, 0, sizeof(props));
345 props.type = BACKLIGHT_RAW; 372 props.type = BACKLIGHT_RAW;
346 props.max_brightness = intel_panel_get_max_backlight(dev); 373 props.max_brightness = intel_panel_get_max_backlight(dev);
347 dev_priv->backlight = 374 dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 000000000000..8e79ff67ec98
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3796 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include <linux/cpufreq.h>
29#include "i915_drv.h"
30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
33
34/* FBC, or Frame Buffer Compression, is a technique employed to compress the
35 * framebuffer contents in-memory, aiming at reducing the required bandwidth
36 * during in-memory transfers and, therefore, reduce the power packet.
37 *
38 * The benefits of FBC are mostly visible with solid backgrounds and
39 * variation-less patterns.
40 *
41 * FBC-related functionality can be enabled by the means of the
42 * i915.i915_enable_fbc parameter
43 */
44
45static void i8xx_disable_fbc(struct drm_device *dev)
46{
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 u32 fbc_ctl;
49
50 /* Disable compression */
51 fbc_ctl = I915_READ(FBC_CONTROL);
52 if ((fbc_ctl & FBC_CTL_EN) == 0)
53 return;
54
55 fbc_ctl &= ~FBC_CTL_EN;
56 I915_WRITE(FBC_CONTROL, fbc_ctl);
57
58 /* Wait for compressing bit to clear */
59 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
60 DRM_DEBUG_KMS("FBC idle timed out\n");
61 return;
62 }
63
64 DRM_DEBUG_KMS("disabled FBC\n");
65}
66
67static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
68{
69 struct drm_device *dev = crtc->dev;
70 struct drm_i915_private *dev_priv = dev->dev_private;
71 struct drm_framebuffer *fb = crtc->fb;
72 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
73 struct drm_i915_gem_object *obj = intel_fb->obj;
74 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
75 int cfb_pitch;
76 int plane, i;
77 u32 fbc_ctl, fbc_ctl2;
78
79 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
80 if (fb->pitches[0] < cfb_pitch)
81 cfb_pitch = fb->pitches[0];
82
83 /* FBC_CTL wants 64B units */
84 cfb_pitch = (cfb_pitch / 64) - 1;
85 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
86
87 /* Clear old tags */
88 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
89 I915_WRITE(FBC_TAG + (i * 4), 0);
90
91 /* Set it up... */
92 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
93 fbc_ctl2 |= plane;
94 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
95 I915_WRITE(FBC_FENCE_OFF, crtc->y);
96
97 /* enable it... */
98 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
99 if (IS_I945GM(dev))
100 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
101 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
102 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
103 fbc_ctl |= obj->fence_reg;
104 I915_WRITE(FBC_CONTROL, fbc_ctl);
105
106 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
107 cfb_pitch, crtc->y, intel_crtc->plane);
108}
109
110static bool i8xx_fbc_enabled(struct drm_device *dev)
111{
112 struct drm_i915_private *dev_priv = dev->dev_private;
113
114 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
115}
116
117static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
118{
119 struct drm_device *dev = crtc->dev;
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 struct drm_framebuffer *fb = crtc->fb;
122 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
123 struct drm_i915_gem_object *obj = intel_fb->obj;
124 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
125 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
126 unsigned long stall_watermark = 200;
127 u32 dpfc_ctl;
128
129 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
130 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
131 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
132
133 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
134 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
135 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
136 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
137
138 /* enable it... */
139 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
140
141 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
142}
143
144static void g4x_disable_fbc(struct drm_device *dev)
145{
146 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 dpfc_ctl;
148
149 /* Disable compression */
150 dpfc_ctl = I915_READ(DPFC_CONTROL);
151 if (dpfc_ctl & DPFC_CTL_EN) {
152 dpfc_ctl &= ~DPFC_CTL_EN;
153 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
154
155 DRM_DEBUG_KMS("disabled FBC\n");
156 }
157}
158
159static bool g4x_fbc_enabled(struct drm_device *dev)
160{
161 struct drm_i915_private *dev_priv = dev->dev_private;
162
163 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
164}
165
166static void sandybridge_blit_fbc_update(struct drm_device *dev)
167{
168 struct drm_i915_private *dev_priv = dev->dev_private;
169 u32 blt_ecoskpd;
170
171 /* Make sure blitter notifies FBC of writes */
172 gen6_gt_force_wake_get(dev_priv);
173 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
174 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
175 GEN6_BLITTER_LOCK_SHIFT;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
177 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
178 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
179 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
180 GEN6_BLITTER_LOCK_SHIFT);
181 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
182 POSTING_READ(GEN6_BLITTER_ECOSKPD);
183 gen6_gt_force_wake_put(dev_priv);
184}
185
186static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
187{
188 struct drm_device *dev = crtc->dev;
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 struct drm_framebuffer *fb = crtc->fb;
191 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
192 struct drm_i915_gem_object *obj = intel_fb->obj;
193 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
194 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
195 unsigned long stall_watermark = 200;
196 u32 dpfc_ctl;
197
198 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
199 dpfc_ctl &= DPFC_RESERVED;
200 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
201 /* Set persistent mode for front-buffer rendering, ala X. */
202 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
203 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
204 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
205
206 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
207 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
208 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
209 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
210 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
211 /* enable it... */
212 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
213
214 if (IS_GEN6(dev)) {
215 I915_WRITE(SNB_DPFC_CTL_SA,
216 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
217 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
218 sandybridge_blit_fbc_update(dev);
219 }
220
221 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
222}
223
224static void ironlake_disable_fbc(struct drm_device *dev)
225{
226 struct drm_i915_private *dev_priv = dev->dev_private;
227 u32 dpfc_ctl;
228
229 /* Disable compression */
230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
231 if (dpfc_ctl & DPFC_CTL_EN) {
232 dpfc_ctl &= ~DPFC_CTL_EN;
233 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
234
235 DRM_DEBUG_KMS("disabled FBC\n");
236 }
237}
238
239static bool ironlake_fbc_enabled(struct drm_device *dev)
240{
241 struct drm_i915_private *dev_priv = dev->dev_private;
242
243 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
244}
245
246bool intel_fbc_enabled(struct drm_device *dev)
247{
248 struct drm_i915_private *dev_priv = dev->dev_private;
249
250 if (!dev_priv->display.fbc_enabled)
251 return false;
252
253 return dev_priv->display.fbc_enabled(dev);
254}
255
256static void intel_fbc_work_fn(struct work_struct *__work)
257{
258 struct intel_fbc_work *work =
259 container_of(to_delayed_work(__work),
260 struct intel_fbc_work, work);
261 struct drm_device *dev = work->crtc->dev;
262 struct drm_i915_private *dev_priv = dev->dev_private;
263
264 mutex_lock(&dev->struct_mutex);
265 if (work == dev_priv->fbc_work) {
266 /* Double check that we haven't switched fb without cancelling
267 * the prior work.
268 */
269 if (work->crtc->fb == work->fb) {
270 dev_priv->display.enable_fbc(work->crtc,
271 work->interval);
272
273 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
274 dev_priv->cfb_fb = work->crtc->fb->base.id;
275 dev_priv->cfb_y = work->crtc->y;
276 }
277
278 dev_priv->fbc_work = NULL;
279 }
280 mutex_unlock(&dev->struct_mutex);
281
282 kfree(work);
283}
284
285static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
286{
287 if (dev_priv->fbc_work == NULL)
288 return;
289
290 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
291
292 /* Synchronisation is provided by struct_mutex and checking of
293 * dev_priv->fbc_work, so we can perform the cancellation
294 * entirely asynchronously.
295 */
296 if (cancel_delayed_work(&dev_priv->fbc_work->work))
297 /* tasklet was killed before being run, clean up */
298 kfree(dev_priv->fbc_work);
299
300 /* Mark the work as no longer wanted so that if it does
301 * wake-up (because the work was already running and waiting
302 * for our mutex), it will discover that is no longer
303 * necessary to run.
304 */
305 dev_priv->fbc_work = NULL;
306}
307
308void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
309{
310 struct intel_fbc_work *work;
311 struct drm_device *dev = crtc->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313
314 if (!dev_priv->display.enable_fbc)
315 return;
316
317 intel_cancel_fbc_work(dev_priv);
318
319 work = kzalloc(sizeof *work, GFP_KERNEL);
320 if (work == NULL) {
321 dev_priv->display.enable_fbc(crtc, interval);
322 return;
323 }
324
325 work->crtc = crtc;
326 work->fb = crtc->fb;
327 work->interval = interval;
328 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
329
330 dev_priv->fbc_work = work;
331
332 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
333
334 /* Delay the actual enabling to let pageflipping cease and the
335 * display to settle before starting the compression. Note that
336 * this delay also serves a second purpose: it allows for a
337 * vblank to pass after disabling the FBC before we attempt
338 * to modify the control registers.
339 *
340 * A more complicated solution would involve tracking vblanks
341 * following the termination of the page-flipping sequence
342 * and indeed performing the enable as a co-routine and not
343 * waiting synchronously upon the vblank.
344 */
345 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
346}
347
348void intel_disable_fbc(struct drm_device *dev)
349{
350 struct drm_i915_private *dev_priv = dev->dev_private;
351
352 intel_cancel_fbc_work(dev_priv);
353
354 if (!dev_priv->display.disable_fbc)
355 return;
356
357 dev_priv->display.disable_fbc(dev);
358 dev_priv->cfb_plane = -1;
359}
360
361/**
362 * intel_update_fbc - enable/disable FBC as needed
363 * @dev: the drm_device
364 *
365 * Set up the framebuffer compression hardware at mode set time. We
366 * enable it if possible:
367 * - plane A only (on pre-965)
368 * - no pixel mulitply/line duplication
369 * - no alpha buffer discard
370 * - no dual wide
371 * - framebuffer <= 2048 in width, 1536 in height
372 *
373 * We can't assume that any compression will take place (worst case),
374 * so the compressed buffer has to be the same size as the uncompressed
375 * one. It also must reside (along with the line length buffer) in
376 * stolen memory.
377 *
378 * We need to enable/disable FBC on a global basis.
379 */
380void intel_update_fbc(struct drm_device *dev)
381{
382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct drm_crtc *crtc = NULL, *tmp_crtc;
384 struct intel_crtc *intel_crtc;
385 struct drm_framebuffer *fb;
386 struct intel_framebuffer *intel_fb;
387 struct drm_i915_gem_object *obj;
388 int enable_fbc;
389
390 DRM_DEBUG_KMS("\n");
391
392 if (!i915_powersave)
393 return;
394
395 if (!I915_HAS_FBC(dev))
396 return;
397
398 /*
399 * If FBC is already on, we just have to verify that we can
400 * keep it that way...
401 * Need to disable if:
402 * - more than one pipe is active
403 * - changing FBC params (stride, fence, mode)
404 * - new fb is too large to fit in compressed buffer
405 * - going to an unsupported config (interlace, pixel multiply, etc.)
406 */
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled && tmp_crtc->fb) {
409 if (crtc) {
410 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
411 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
412 goto out_disable;
413 }
414 crtc = tmp_crtc;
415 }
416 }
417
418 if (!crtc || crtc->fb == NULL) {
419 DRM_DEBUG_KMS("no output, disabling\n");
420 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
421 goto out_disable;
422 }
423
424 intel_crtc = to_intel_crtc(crtc);
425 fb = crtc->fb;
426 intel_fb = to_intel_framebuffer(fb);
427 obj = intel_fb->obj;
428
429 enable_fbc = i915_enable_fbc;
430 if (enable_fbc < 0) {
431 DRM_DEBUG_KMS("fbc set to per-chip default\n");
432 enable_fbc = 1;
433 if (INTEL_INFO(dev)->gen <= 6)
434 enable_fbc = 0;
435 }
436 if (!enable_fbc) {
437 DRM_DEBUG_KMS("fbc disabled per module param\n");
438 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
439 goto out_disable;
440 }
441 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
442 DRM_DEBUG_KMS("framebuffer too large, disabling "
443 "compression\n");
444 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
445 goto out_disable;
446 }
447 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
448 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
449 DRM_DEBUG_KMS("mode incompatible with compression, "
450 "disabling\n");
451 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
452 goto out_disable;
453 }
454 if ((crtc->mode.hdisplay > 2048) ||
455 (crtc->mode.vdisplay > 1536)) {
456 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
457 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
458 goto out_disable;
459 }
460 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
461 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
462 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
463 goto out_disable;
464 }
465
466 /* The use of a CPU fence is mandatory in order to detect writes
467 * by the CPU to the scanout and trigger updates to the FBC.
468 */
469 if (obj->tiling_mode != I915_TILING_X ||
470 obj->fence_reg == I915_FENCE_REG_NONE) {
471 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
472 dev_priv->no_fbc_reason = FBC_NOT_TILED;
473 goto out_disable;
474 }
475
476 /* If the kernel debugger is active, always disable compression */
477 if (in_dbg_master())
478 goto out_disable;
479
480 /* If the scanout has not changed, don't modify the FBC settings.
481 * Note that we make the fundamental assumption that the fb->obj
482 * cannot be unpinned (and have its GTT offset and fence revoked)
483 * without first being decoupled from the scanout and FBC disabled.
484 */
485 if (dev_priv->cfb_plane == intel_crtc->plane &&
486 dev_priv->cfb_fb == fb->base.id &&
487 dev_priv->cfb_y == crtc->y)
488 return;
489
490 if (intel_fbc_enabled(dev)) {
491 /* We update FBC along two paths, after changing fb/crtc
492 * configuration (modeswitching) and after page-flipping
493 * finishes. For the latter, we know that not only did
494 * we disable the FBC at the start of the page-flip
495 * sequence, but also more than one vblank has passed.
496 *
497 * For the former case of modeswitching, it is possible
498 * to switch between two FBC valid configurations
499 * instantaneously so we do need to disable the FBC
500 * before we can modify its control registers. We also
501 * have to wait for the next vblank for that to take
502 * effect. However, since we delay enabling FBC we can
503 * assume that a vblank has passed since disabling and
504 * that we can safely alter the registers in the deferred
505 * callback.
506 *
507 * In the scenario that we go from a valid to invalid
508 * and then back to valid FBC configuration we have
509 * no strict enforcement that a vblank occurred since
510 * disabling the FBC. However, along all current pipe
511 * disabling paths we do need to wait for a vblank at
512 * some point. And we wait before enabling FBC anyway.
513 */
514 DRM_DEBUG_KMS("disabling active FBC for update\n");
515 intel_disable_fbc(dev);
516 }
517
518 intel_enable_fbc(crtc, 500);
519 return;
520
521out_disable:
522 /* Multiple disables should be harmless */
523 if (intel_fbc_enabled(dev)) {
524 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
525 intel_disable_fbc(dev);
526 }
527}
528
529static void i915_pineview_get_mem_freq(struct drm_device *dev)
530{
531 drm_i915_private_t *dev_priv = dev->dev_private;
532 u32 tmp;
533
534 tmp = I915_READ(CLKCFG);
535
536 switch (tmp & CLKCFG_FSB_MASK) {
537 case CLKCFG_FSB_533:
538 dev_priv->fsb_freq = 533; /* 133*4 */
539 break;
540 case CLKCFG_FSB_800:
541 dev_priv->fsb_freq = 800; /* 200*4 */
542 break;
543 case CLKCFG_FSB_667:
544 dev_priv->fsb_freq = 667; /* 167*4 */
545 break;
546 case CLKCFG_FSB_400:
547 dev_priv->fsb_freq = 400; /* 100*4 */
548 break;
549 }
550
551 switch (tmp & CLKCFG_MEM_MASK) {
552 case CLKCFG_MEM_533:
553 dev_priv->mem_freq = 533;
554 break;
555 case CLKCFG_MEM_667:
556 dev_priv->mem_freq = 667;
557 break;
558 case CLKCFG_MEM_800:
559 dev_priv->mem_freq = 800;
560 break;
561 }
562
563 /* detect pineview DDR3 setting */
564 tmp = I915_READ(CSHRDDR3CTL);
565 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
566}
567
568static void i915_ironlake_get_mem_freq(struct drm_device *dev)
569{
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 u16 ddrpll, csipll;
572
573 ddrpll = I915_READ16(DDRMPLL1);
574 csipll = I915_READ16(CSIPLL0);
575
576 switch (ddrpll & 0xff) {
577 case 0xc:
578 dev_priv->mem_freq = 800;
579 break;
580 case 0x10:
581 dev_priv->mem_freq = 1066;
582 break;
583 case 0x14:
584 dev_priv->mem_freq = 1333;
585 break;
586 case 0x18:
587 dev_priv->mem_freq = 1600;
588 break;
589 default:
590 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
591 ddrpll & 0xff);
592 dev_priv->mem_freq = 0;
593 break;
594 }
595
596 dev_priv->r_t = dev_priv->mem_freq;
597
598 switch (csipll & 0x3ff) {
599 case 0x00c:
600 dev_priv->fsb_freq = 3200;
601 break;
602 case 0x00e:
603 dev_priv->fsb_freq = 3733;
604 break;
605 case 0x010:
606 dev_priv->fsb_freq = 4266;
607 break;
608 case 0x012:
609 dev_priv->fsb_freq = 4800;
610 break;
611 case 0x014:
612 dev_priv->fsb_freq = 5333;
613 break;
614 case 0x016:
615 dev_priv->fsb_freq = 5866;
616 break;
617 case 0x018:
618 dev_priv->fsb_freq = 6400;
619 break;
620 default:
621 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
622 csipll & 0x3ff);
623 dev_priv->fsb_freq = 0;
624 break;
625 }
626
627 if (dev_priv->fsb_freq == 3200) {
628 dev_priv->c_m = 0;
629 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
630 dev_priv->c_m = 1;
631 } else {
632 dev_priv->c_m = 2;
633 }
634}
635
636static const struct cxsr_latency cxsr_latency_table[] = {
637 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
638 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
639 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
640 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
641 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
642
643 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
644 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
645 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
646 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
647 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
648
649 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
650 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
651 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
652 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
653 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
654
655 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
656 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
657 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
658 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
659 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
660
661 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
662 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
663 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
664 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
665 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
666
667 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
668 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
669 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
670 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
671 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
672};
673
674static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
675 int is_ddr3,
676 int fsb,
677 int mem)
678{
679 const struct cxsr_latency *latency;
680 int i;
681
682 if (fsb == 0 || mem == 0)
683 return NULL;
684
685 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
686 latency = &cxsr_latency_table[i];
687 if (is_desktop == latency->is_desktop &&
688 is_ddr3 == latency->is_ddr3 &&
689 fsb == latency->fsb_freq && mem == latency->mem_freq)
690 return latency;
691 }
692
693 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
694
695 return NULL;
696}
697
698static void pineview_disable_cxsr(struct drm_device *dev)
699{
700 struct drm_i915_private *dev_priv = dev->dev_private;
701
702 /* deactivate cxsr */
703 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
704}
705
706/*
707 * Latency for FIFO fetches is dependent on several factors:
708 * - memory configuration (speed, channels)
709 * - chipset
710 * - current MCH state
711 * It can be fairly high in some situations, so here we assume a fairly
712 * pessimal value. It's a tradeoff between extra memory fetches (if we
713 * set this value too high, the FIFO will fetch frequently to stay full)
714 * and power consumption (set it too low to save power and we might see
715 * FIFO underruns and display "flicker").
716 *
717 * A value of 5us seems to be a good balance; safe for very low end
718 * platforms but not overly aggressive on lower latency configs.
719 */
720static const int latency_ns = 5000;
721
722static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
723{
724 struct drm_i915_private *dev_priv = dev->dev_private;
725 uint32_t dsparb = I915_READ(DSPARB);
726 int size;
727
728 size = dsparb & 0x7f;
729 if (plane)
730 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
731
732 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
733 plane ? "B" : "A", size);
734
735 return size;
736}
737
738static int i85x_get_fifo_size(struct drm_device *dev, int plane)
739{
740 struct drm_i915_private *dev_priv = dev->dev_private;
741 uint32_t dsparb = I915_READ(DSPARB);
742 int size;
743
744 size = dsparb & 0x1ff;
745 if (plane)
746 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
747 size >>= 1; /* Convert to cachelines */
748
749 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
750 plane ? "B" : "A", size);
751
752 return size;
753}
754
755static int i845_get_fifo_size(struct drm_device *dev, int plane)
756{
757 struct drm_i915_private *dev_priv = dev->dev_private;
758 uint32_t dsparb = I915_READ(DSPARB);
759 int size;
760
761 size = dsparb & 0x7f;
762 size >>= 2; /* Convert to cachelines */
763
764 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
765 plane ? "B" : "A",
766 size);
767
768 return size;
769}
770
771static int i830_get_fifo_size(struct drm_device *dev, int plane)
772{
773 struct drm_i915_private *dev_priv = dev->dev_private;
774 uint32_t dsparb = I915_READ(DSPARB);
775 int size;
776
777 size = dsparb & 0x7f;
778 size >>= 1; /* Convert to cachelines */
779
780 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
781 plane ? "B" : "A", size);
782
783 return size;
784}
785
786/* Pineview has different values for various configs */
787static const struct intel_watermark_params pineview_display_wm = {
788 PINEVIEW_DISPLAY_FIFO,
789 PINEVIEW_MAX_WM,
790 PINEVIEW_DFT_WM,
791 PINEVIEW_GUARD_WM,
792 PINEVIEW_FIFO_LINE_SIZE
793};
794static const struct intel_watermark_params pineview_display_hplloff_wm = {
795 PINEVIEW_DISPLAY_FIFO,
796 PINEVIEW_MAX_WM,
797 PINEVIEW_DFT_HPLLOFF_WM,
798 PINEVIEW_GUARD_WM,
799 PINEVIEW_FIFO_LINE_SIZE
800};
801static const struct intel_watermark_params pineview_cursor_wm = {
802 PINEVIEW_CURSOR_FIFO,
803 PINEVIEW_CURSOR_MAX_WM,
804 PINEVIEW_CURSOR_DFT_WM,
805 PINEVIEW_CURSOR_GUARD_WM,
806 PINEVIEW_FIFO_LINE_SIZE,
807};
808static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
809 PINEVIEW_CURSOR_FIFO,
810 PINEVIEW_CURSOR_MAX_WM,
811 PINEVIEW_CURSOR_DFT_WM,
812 PINEVIEW_CURSOR_GUARD_WM,
813 PINEVIEW_FIFO_LINE_SIZE
814};
815static const struct intel_watermark_params g4x_wm_info = {
816 G4X_FIFO_SIZE,
817 G4X_MAX_WM,
818 G4X_MAX_WM,
819 2,
820 G4X_FIFO_LINE_SIZE,
821};
822static const struct intel_watermark_params g4x_cursor_wm_info = {
823 I965_CURSOR_FIFO,
824 I965_CURSOR_MAX_WM,
825 I965_CURSOR_DFT_WM,
826 2,
827 G4X_FIFO_LINE_SIZE,
828};
829static const struct intel_watermark_params valleyview_wm_info = {
830 VALLEYVIEW_FIFO_SIZE,
831 VALLEYVIEW_MAX_WM,
832 VALLEYVIEW_MAX_WM,
833 2,
834 G4X_FIFO_LINE_SIZE,
835};
836static const struct intel_watermark_params valleyview_cursor_wm_info = {
837 I965_CURSOR_FIFO,
838 VALLEYVIEW_CURSOR_MAX_WM,
839 I965_CURSOR_DFT_WM,
840 2,
841 G4X_FIFO_LINE_SIZE,
842};
843static const struct intel_watermark_params i965_cursor_wm_info = {
844 I965_CURSOR_FIFO,
845 I965_CURSOR_MAX_WM,
846 I965_CURSOR_DFT_WM,
847 2,
848 I915_FIFO_LINE_SIZE,
849};
850static const struct intel_watermark_params i945_wm_info = {
851 I945_FIFO_SIZE,
852 I915_MAX_WM,
853 1,
854 2,
855 I915_FIFO_LINE_SIZE
856};
857static const struct intel_watermark_params i915_wm_info = {
858 I915_FIFO_SIZE,
859 I915_MAX_WM,
860 1,
861 2,
862 I915_FIFO_LINE_SIZE
863};
864static const struct intel_watermark_params i855_wm_info = {
865 I855GM_FIFO_SIZE,
866 I915_MAX_WM,
867 1,
868 2,
869 I830_FIFO_LINE_SIZE
870};
871static const struct intel_watermark_params i830_wm_info = {
872 I830_FIFO_SIZE,
873 I915_MAX_WM,
874 1,
875 2,
876 I830_FIFO_LINE_SIZE
877};
878
879static const struct intel_watermark_params ironlake_display_wm_info = {
880 ILK_DISPLAY_FIFO,
881 ILK_DISPLAY_MAXWM,
882 ILK_DISPLAY_DFTWM,
883 2,
884 ILK_FIFO_LINE_SIZE
885};
886static const struct intel_watermark_params ironlake_cursor_wm_info = {
887 ILK_CURSOR_FIFO,
888 ILK_CURSOR_MAXWM,
889 ILK_CURSOR_DFTWM,
890 2,
891 ILK_FIFO_LINE_SIZE
892};
893static const struct intel_watermark_params ironlake_display_srwm_info = {
894 ILK_DISPLAY_SR_FIFO,
895 ILK_DISPLAY_MAX_SRWM,
896 ILK_DISPLAY_DFT_SRWM,
897 2,
898 ILK_FIFO_LINE_SIZE
899};
900static const struct intel_watermark_params ironlake_cursor_srwm_info = {
901 ILK_CURSOR_SR_FIFO,
902 ILK_CURSOR_MAX_SRWM,
903 ILK_CURSOR_DFT_SRWM,
904 2,
905 ILK_FIFO_LINE_SIZE
906};
907
908static const struct intel_watermark_params sandybridge_display_wm_info = {
909 SNB_DISPLAY_FIFO,
910 SNB_DISPLAY_MAXWM,
911 SNB_DISPLAY_DFTWM,
912 2,
913 SNB_FIFO_LINE_SIZE
914};
915static const struct intel_watermark_params sandybridge_cursor_wm_info = {
916 SNB_CURSOR_FIFO,
917 SNB_CURSOR_MAXWM,
918 SNB_CURSOR_DFTWM,
919 2,
920 SNB_FIFO_LINE_SIZE
921};
922static const struct intel_watermark_params sandybridge_display_srwm_info = {
923 SNB_DISPLAY_SR_FIFO,
924 SNB_DISPLAY_MAX_SRWM,
925 SNB_DISPLAY_DFT_SRWM,
926 2,
927 SNB_FIFO_LINE_SIZE
928};
929static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
930 SNB_CURSOR_SR_FIFO,
931 SNB_CURSOR_MAX_SRWM,
932 SNB_CURSOR_DFT_SRWM,
933 2,
934 SNB_FIFO_LINE_SIZE
935};
936
937
938/**
939 * intel_calculate_wm - calculate watermark level
940 * @clock_in_khz: pixel clock
941 * @wm: chip FIFO params
942 * @pixel_size: display pixel size
943 * @latency_ns: memory latency for the platform
944 *
945 * Calculate the watermark level (the level at which the display plane will
946 * start fetching from memory again). Each chip has a different display
947 * FIFO size and allocation, so the caller needs to figure that out and pass
948 * in the correct intel_watermark_params structure.
949 *
950 * As the pixel clock runs, the FIFO will be drained at a rate that depends
951 * on the pixel size. When it reaches the watermark level, it'll start
952 * fetching FIFO line sized based chunks from memory until the FIFO fills
953 * past the watermark point. If the FIFO drains completely, a FIFO underrun
954 * will occur, and a display engine hang could result.
955 */
956static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
957 const struct intel_watermark_params *wm,
958 int fifo_size,
959 int pixel_size,
960 unsigned long latency_ns)
961{
962 long entries_required, wm_size;
963
964 /*
965 * Note: we need to make sure we don't overflow for various clock &
966 * latency values.
967 * clocks go from a few thousand to several hundred thousand.
968 * latency is usually a few thousand
969 */
970 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
971 1000;
972 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
973
974 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
975
976 wm_size = fifo_size - (entries_required + wm->guard_size);
977
978 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
979
980 /* Don't promote wm_size to unsigned... */
981 if (wm_size > (long)wm->max_wm)
982 wm_size = wm->max_wm;
983 if (wm_size <= 0)
984 wm_size = wm->default_wm;
985 return wm_size;
986}
987
988static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
989{
990 struct drm_crtc *crtc, *enabled = NULL;
991
992 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
993 if (crtc->enabled && crtc->fb) {
994 if (enabled)
995 return NULL;
996 enabled = crtc;
997 }
998 }
999
1000 return enabled;
1001}
1002
1003static void pineview_update_wm(struct drm_device *dev)
1004{
1005 struct drm_i915_private *dev_priv = dev->dev_private;
1006 struct drm_crtc *crtc;
1007 const struct cxsr_latency *latency;
1008 u32 reg;
1009 unsigned long wm;
1010
1011 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1012 dev_priv->fsb_freq, dev_priv->mem_freq);
1013 if (!latency) {
1014 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1015 pineview_disable_cxsr(dev);
1016 return;
1017 }
1018
1019 crtc = single_enabled_crtc(dev);
1020 if (crtc) {
1021 int clock = crtc->mode.clock;
1022 int pixel_size = crtc->fb->bits_per_pixel / 8;
1023
1024 /* Display SR */
1025 wm = intel_calculate_wm(clock, &pineview_display_wm,
1026 pineview_display_wm.fifo_size,
1027 pixel_size, latency->display_sr);
1028 reg = I915_READ(DSPFW1);
1029 reg &= ~DSPFW_SR_MASK;
1030 reg |= wm << DSPFW_SR_SHIFT;
1031 I915_WRITE(DSPFW1, reg);
1032 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1033
1034 /* cursor SR */
1035 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1036 pineview_display_wm.fifo_size,
1037 pixel_size, latency->cursor_sr);
1038 reg = I915_READ(DSPFW3);
1039 reg &= ~DSPFW_CURSOR_SR_MASK;
1040 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1041 I915_WRITE(DSPFW3, reg);
1042
1043 /* Display HPLL off SR */
1044 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1045 pineview_display_hplloff_wm.fifo_size,
1046 pixel_size, latency->display_hpll_disable);
1047 reg = I915_READ(DSPFW3);
1048 reg &= ~DSPFW_HPLL_SR_MASK;
1049 reg |= wm & DSPFW_HPLL_SR_MASK;
1050 I915_WRITE(DSPFW3, reg);
1051
1052 /* cursor HPLL off SR */
1053 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1054 pineview_display_hplloff_wm.fifo_size,
1055 pixel_size, latency->cursor_hpll_disable);
1056 reg = I915_READ(DSPFW3);
1057 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1058 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1059 I915_WRITE(DSPFW3, reg);
1060 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1061
1062 /* activate cxsr */
1063 I915_WRITE(DSPFW3,
1064 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1065 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1066 } else {
1067 pineview_disable_cxsr(dev);
1068 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1069 }
1070}
1071
1072static bool g4x_compute_wm0(struct drm_device *dev,
1073 int plane,
1074 const struct intel_watermark_params *display,
1075 int display_latency_ns,
1076 const struct intel_watermark_params *cursor,
1077 int cursor_latency_ns,
1078 int *plane_wm,
1079 int *cursor_wm)
1080{
1081 struct drm_crtc *crtc;
1082 int htotal, hdisplay, clock, pixel_size;
1083 int line_time_us, line_count;
1084 int entries, tlb_miss;
1085
1086 crtc = intel_get_crtc_for_plane(dev, plane);
1087 if (crtc->fb == NULL || !crtc->enabled) {
1088 *cursor_wm = cursor->guard_size;
1089 *plane_wm = display->guard_size;
1090 return false;
1091 }
1092
1093 htotal = crtc->mode.htotal;
1094 hdisplay = crtc->mode.hdisplay;
1095 clock = crtc->mode.clock;
1096 pixel_size = crtc->fb->bits_per_pixel / 8;
1097
1098 /* Use the small buffer method to calculate plane watermark */
1099 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1100 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1101 if (tlb_miss > 0)
1102 entries += tlb_miss;
1103 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1104 *plane_wm = entries + display->guard_size;
1105 if (*plane_wm > (int)display->max_wm)
1106 *plane_wm = display->max_wm;
1107
1108 /* Use the large buffer method to calculate cursor watermark */
1109 line_time_us = ((htotal * 1000) / clock);
1110 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1111 entries = line_count * 64 * pixel_size;
1112 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1113 if (tlb_miss > 0)
1114 entries += tlb_miss;
1115 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1116 *cursor_wm = entries + cursor->guard_size;
1117 if (*cursor_wm > (int)cursor->max_wm)
1118 *cursor_wm = (int)cursor->max_wm;
1119
1120 return true;
1121}
1122
1123/*
1124 * Check the wm result.
1125 *
1126 * If any calculated watermark values is larger than the maximum value that
1127 * can be programmed into the associated watermark register, that watermark
1128 * must be disabled.
1129 */
1130static bool g4x_check_srwm(struct drm_device *dev,
1131 int display_wm, int cursor_wm,
1132 const struct intel_watermark_params *display,
1133 const struct intel_watermark_params *cursor)
1134{
1135 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1136 display_wm, cursor_wm);
1137
1138 if (display_wm > display->max_wm) {
1139 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1140 display_wm, display->max_wm);
1141 return false;
1142 }
1143
1144 if (cursor_wm > cursor->max_wm) {
1145 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1146 cursor_wm, cursor->max_wm);
1147 return false;
1148 }
1149
1150 if (!(display_wm || cursor_wm)) {
1151 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1152 return false;
1153 }
1154
1155 return true;
1156}
1157
1158static bool g4x_compute_srwm(struct drm_device *dev,
1159 int plane,
1160 int latency_ns,
1161 const struct intel_watermark_params *display,
1162 const struct intel_watermark_params *cursor,
1163 int *display_wm, int *cursor_wm)
1164{
1165 struct drm_crtc *crtc;
1166 int hdisplay, htotal, pixel_size, clock;
1167 unsigned long line_time_us;
1168 int line_count, line_size;
1169 int small, large;
1170 int entries;
1171
1172 if (!latency_ns) {
1173 *display_wm = *cursor_wm = 0;
1174 return false;
1175 }
1176
1177 crtc = intel_get_crtc_for_plane(dev, plane);
1178 hdisplay = crtc->mode.hdisplay;
1179 htotal = crtc->mode.htotal;
1180 clock = crtc->mode.clock;
1181 pixel_size = crtc->fb->bits_per_pixel / 8;
1182
1183 line_time_us = (htotal * 1000) / clock;
1184 line_count = (latency_ns / line_time_us + 1000) / 1000;
1185 line_size = hdisplay * pixel_size;
1186
1187 /* Use the minimum of the small and large buffer method for primary */
1188 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1189 large = line_count * line_size;
1190
1191 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1192 *display_wm = entries + display->guard_size;
1193
1194 /* calculate the self-refresh watermark for display cursor */
1195 entries = line_count * pixel_size * 64;
1196 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1197 *cursor_wm = entries + cursor->guard_size;
1198
1199 return g4x_check_srwm(dev,
1200 *display_wm, *cursor_wm,
1201 display, cursor);
1202}
1203
1204static bool vlv_compute_drain_latency(struct drm_device *dev,
1205 int plane,
1206 int *plane_prec_mult,
1207 int *plane_dl,
1208 int *cursor_prec_mult,
1209 int *cursor_dl)
1210{
1211 struct drm_crtc *crtc;
1212 int clock, pixel_size;
1213 int entries;
1214
1215 crtc = intel_get_crtc_for_plane(dev, plane);
1216 if (crtc->fb == NULL || !crtc->enabled)
1217 return false;
1218
1219 clock = crtc->mode.clock; /* VESA DOT Clock */
1220 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1221
1222 entries = (clock / 1000) * pixel_size;
1223 *plane_prec_mult = (entries > 256) ?
1224 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1225 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1226 pixel_size);
1227
1228 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1229 *cursor_prec_mult = (entries > 256) ?
1230 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1231 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1232
1233 return true;
1234}
1235
1236/*
1237 * Update drain latency registers of memory arbiter
1238 *
1239 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1240 * to be programmed. Each plane has a drain latency multiplier and a drain
1241 * latency value.
1242 */
1243
1244static void vlv_update_drain_latency(struct drm_device *dev)
1245{
1246 struct drm_i915_private *dev_priv = dev->dev_private;
1247 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1248 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1249 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1250 either 16 or 32 */
1251
1252 /* For plane A, Cursor A */
1253 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1254 &cursor_prec_mult, &cursora_dl)) {
1255 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1256 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1257 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1258 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1259
1260 I915_WRITE(VLV_DDL1, cursora_prec |
1261 (cursora_dl << DDL_CURSORA_SHIFT) |
1262 planea_prec | planea_dl);
1263 }
1264
1265 /* For plane B, Cursor B */
1266 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1267 &cursor_prec_mult, &cursorb_dl)) {
1268 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1269 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1270 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1272
1273 I915_WRITE(VLV_DDL2, cursorb_prec |
1274 (cursorb_dl << DDL_CURSORB_SHIFT) |
1275 planeb_prec | planeb_dl);
1276 }
1277}
1278
1279#define single_plane_enabled(mask) is_power_of_2(mask)
1280
1281static void valleyview_update_wm(struct drm_device *dev)
1282{
1283 static const int sr_latency_ns = 12000;
1284 struct drm_i915_private *dev_priv = dev->dev_private;
1285 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1286 int plane_sr, cursor_sr;
1287 unsigned int enabled = 0;
1288
1289 vlv_update_drain_latency(dev);
1290
1291 if (g4x_compute_wm0(dev, 0,
1292 &valleyview_wm_info, latency_ns,
1293 &valleyview_cursor_wm_info, latency_ns,
1294 &planea_wm, &cursora_wm))
1295 enabled |= 1;
1296
1297 if (g4x_compute_wm0(dev, 1,
1298 &valleyview_wm_info, latency_ns,
1299 &valleyview_cursor_wm_info, latency_ns,
1300 &planeb_wm, &cursorb_wm))
1301 enabled |= 2;
1302
1303 plane_sr = cursor_sr = 0;
1304 if (single_plane_enabled(enabled) &&
1305 g4x_compute_srwm(dev, ffs(enabled) - 1,
1306 sr_latency_ns,
1307 &valleyview_wm_info,
1308 &valleyview_cursor_wm_info,
1309 &plane_sr, &cursor_sr))
1310 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1311 else
1312 I915_WRITE(FW_BLC_SELF_VLV,
1313 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1314
1315 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1316 planea_wm, cursora_wm,
1317 planeb_wm, cursorb_wm,
1318 plane_sr, cursor_sr);
1319
1320 I915_WRITE(DSPFW1,
1321 (plane_sr << DSPFW_SR_SHIFT) |
1322 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1323 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1324 planea_wm);
1325 I915_WRITE(DSPFW2,
1326 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1327 (cursora_wm << DSPFW_CURSORA_SHIFT));
1328 I915_WRITE(DSPFW3,
1329 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1330}
1331
1332static void g4x_update_wm(struct drm_device *dev)
1333{
1334 static const int sr_latency_ns = 12000;
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1337 int plane_sr, cursor_sr;
1338 unsigned int enabled = 0;
1339
1340 if (g4x_compute_wm0(dev, 0,
1341 &g4x_wm_info, latency_ns,
1342 &g4x_cursor_wm_info, latency_ns,
1343 &planea_wm, &cursora_wm))
1344 enabled |= 1;
1345
1346 if (g4x_compute_wm0(dev, 1,
1347 &g4x_wm_info, latency_ns,
1348 &g4x_cursor_wm_info, latency_ns,
1349 &planeb_wm, &cursorb_wm))
1350 enabled |= 2;
1351
1352 plane_sr = cursor_sr = 0;
1353 if (single_plane_enabled(enabled) &&
1354 g4x_compute_srwm(dev, ffs(enabled) - 1,
1355 sr_latency_ns,
1356 &g4x_wm_info,
1357 &g4x_cursor_wm_info,
1358 &plane_sr, &cursor_sr))
1359 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1360 else
1361 I915_WRITE(FW_BLC_SELF,
1362 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1363
1364 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1365 planea_wm, cursora_wm,
1366 planeb_wm, cursorb_wm,
1367 plane_sr, cursor_sr);
1368
1369 I915_WRITE(DSPFW1,
1370 (plane_sr << DSPFW_SR_SHIFT) |
1371 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1372 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1373 planea_wm);
1374 I915_WRITE(DSPFW2,
1375 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1376 (cursora_wm << DSPFW_CURSORA_SHIFT));
1377 /* HPLL off in SR has some issues on G4x... disable it */
1378 I915_WRITE(DSPFW3,
1379 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1380 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1381}
1382
1383static void i965_update_wm(struct drm_device *dev)
1384{
1385 struct drm_i915_private *dev_priv = dev->dev_private;
1386 struct drm_crtc *crtc;
1387 int srwm = 1;
1388 int cursor_sr = 16;
1389
1390 /* Calc sr entries for one plane configs */
1391 crtc = single_enabled_crtc(dev);
1392 if (crtc) {
1393 /* self-refresh has much higher latency */
1394 static const int sr_latency_ns = 12000;
1395 int clock = crtc->mode.clock;
1396 int htotal = crtc->mode.htotal;
1397 int hdisplay = crtc->mode.hdisplay;
1398 int pixel_size = crtc->fb->bits_per_pixel / 8;
1399 unsigned long line_time_us;
1400 int entries;
1401
1402 line_time_us = ((htotal * 1000) / clock);
1403
1404 /* Use ns/us then divide to preserve precision */
1405 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1406 pixel_size * hdisplay;
1407 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1408 srwm = I965_FIFO_SIZE - entries;
1409 if (srwm < 0)
1410 srwm = 1;
1411 srwm &= 0x1ff;
1412 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1413 entries, srwm);
1414
1415 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1416 pixel_size * 64;
1417 entries = DIV_ROUND_UP(entries,
1418 i965_cursor_wm_info.cacheline_size);
1419 cursor_sr = i965_cursor_wm_info.fifo_size -
1420 (entries + i965_cursor_wm_info.guard_size);
1421
1422 if (cursor_sr > i965_cursor_wm_info.max_wm)
1423 cursor_sr = i965_cursor_wm_info.max_wm;
1424
1425 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1426 "cursor %d\n", srwm, cursor_sr);
1427
1428 if (IS_CRESTLINE(dev))
1429 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1430 } else {
1431 /* Turn off self refresh if both pipes are enabled */
1432 if (IS_CRESTLINE(dev))
1433 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1434 & ~FW_BLC_SELF_EN);
1435 }
1436
1437 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1438 srwm);
1439
1440 /* 965 has limitations... */
1441 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1442 (8 << 16) | (8 << 8) | (8 << 0));
1443 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1444 /* update cursor SR watermark */
1445 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1446}
1447
1448static void i9xx_update_wm(struct drm_device *dev)
1449{
1450 struct drm_i915_private *dev_priv = dev->dev_private;
1451 const struct intel_watermark_params *wm_info;
1452 uint32_t fwater_lo;
1453 uint32_t fwater_hi;
1454 int cwm, srwm = 1;
1455 int fifo_size;
1456 int planea_wm, planeb_wm;
1457 struct drm_crtc *crtc, *enabled = NULL;
1458
1459 if (IS_I945GM(dev))
1460 wm_info = &i945_wm_info;
1461 else if (!IS_GEN2(dev))
1462 wm_info = &i915_wm_info;
1463 else
1464 wm_info = &i855_wm_info;
1465
1466 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1467 crtc = intel_get_crtc_for_plane(dev, 0);
1468 if (crtc->enabled && crtc->fb) {
1469 planea_wm = intel_calculate_wm(crtc->mode.clock,
1470 wm_info, fifo_size,
1471 crtc->fb->bits_per_pixel / 8,
1472 latency_ns);
1473 enabled = crtc;
1474 } else
1475 planea_wm = fifo_size - wm_info->guard_size;
1476
1477 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1478 crtc = intel_get_crtc_for_plane(dev, 1);
1479 if (crtc->enabled && crtc->fb) {
1480 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1481 wm_info, fifo_size,
1482 crtc->fb->bits_per_pixel / 8,
1483 latency_ns);
1484 if (enabled == NULL)
1485 enabled = crtc;
1486 else
1487 enabled = NULL;
1488 } else
1489 planeb_wm = fifo_size - wm_info->guard_size;
1490
1491 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1492
1493 /*
1494 * Overlay gets an aggressive default since video jitter is bad.
1495 */
1496 cwm = 2;
1497
1498 /* Play safe and disable self-refresh before adjusting watermarks. */
1499 if (IS_I945G(dev) || IS_I945GM(dev))
1500 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1501 else if (IS_I915GM(dev))
1502 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1503
1504 /* Calc sr entries for one plane configs */
1505 if (HAS_FW_BLC(dev) && enabled) {
1506 /* self-refresh has much higher latency */
1507 static const int sr_latency_ns = 6000;
1508 int clock = enabled->mode.clock;
1509 int htotal = enabled->mode.htotal;
1510 int hdisplay = enabled->mode.hdisplay;
1511 int pixel_size = enabled->fb->bits_per_pixel / 8;
1512 unsigned long line_time_us;
1513 int entries;
1514
1515 line_time_us = (htotal * 1000) / clock;
1516
1517 /* Use ns/us then divide to preserve precision */
1518 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1519 pixel_size * hdisplay;
1520 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1521 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1522 srwm = wm_info->fifo_size - entries;
1523 if (srwm < 0)
1524 srwm = 1;
1525
1526 if (IS_I945G(dev) || IS_I945GM(dev))
1527 I915_WRITE(FW_BLC_SELF,
1528 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1529 else if (IS_I915GM(dev))
1530 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1531 }
1532
1533 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1534 planea_wm, planeb_wm, cwm, srwm);
1535
1536 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1537 fwater_hi = (cwm & 0x1f);
1538
1539 /* Set request length to 8 cachelines per fetch */
1540 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1541 fwater_hi = fwater_hi | (1 << 8);
1542
1543 I915_WRITE(FW_BLC, fwater_lo);
1544 I915_WRITE(FW_BLC2, fwater_hi);
1545
1546 if (HAS_FW_BLC(dev)) {
1547 if (enabled) {
1548 if (IS_I945G(dev) || IS_I945GM(dev))
1549 I915_WRITE(FW_BLC_SELF,
1550 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1551 else if (IS_I915GM(dev))
1552 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1553 DRM_DEBUG_KMS("memory self refresh enabled\n");
1554 } else
1555 DRM_DEBUG_KMS("memory self refresh disabled\n");
1556 }
1557}
1558
1559static void i830_update_wm(struct drm_device *dev)
1560{
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 struct drm_crtc *crtc;
1563 uint32_t fwater_lo;
1564 int planea_wm;
1565
1566 crtc = single_enabled_crtc(dev);
1567 if (crtc == NULL)
1568 return;
1569
1570 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1571 dev_priv->display.get_fifo_size(dev, 0),
1572 crtc->fb->bits_per_pixel / 8,
1573 latency_ns);
1574 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1575 fwater_lo |= (3<<8) | planea_wm;
1576
1577 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1578
1579 I915_WRITE(FW_BLC, fwater_lo);
1580}
1581
1582#define ILK_LP0_PLANE_LATENCY 700
1583#define ILK_LP0_CURSOR_LATENCY 1300
1584
1585/*
1586 * Check the wm result.
1587 *
1588 * If any calculated watermark values is larger than the maximum value that
1589 * can be programmed into the associated watermark register, that watermark
1590 * must be disabled.
1591 */
1592static bool ironlake_check_srwm(struct drm_device *dev, int level,
1593 int fbc_wm, int display_wm, int cursor_wm,
1594 const struct intel_watermark_params *display,
1595 const struct intel_watermark_params *cursor)
1596{
1597 struct drm_i915_private *dev_priv = dev->dev_private;
1598
1599 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1600 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1601
1602 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1603 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1604 fbc_wm, SNB_FBC_MAX_SRWM, level);
1605
1606 /* fbc has it's own way to disable FBC WM */
1607 I915_WRITE(DISP_ARB_CTL,
1608 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1609 return false;
1610 }
1611
1612 if (display_wm > display->max_wm) {
1613 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1614 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1615 return false;
1616 }
1617
1618 if (cursor_wm > cursor->max_wm) {
1619 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1620 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1621 return false;
1622 }
1623
1624 if (!(fbc_wm || display_wm || cursor_wm)) {
1625 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1626 return false;
1627 }
1628
1629 return true;
1630}
1631
1632/*
1633 * Compute watermark values of WM[1-3],
1634 */
1635static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1636 int latency_ns,
1637 const struct intel_watermark_params *display,
1638 const struct intel_watermark_params *cursor,
1639 int *fbc_wm, int *display_wm, int *cursor_wm)
1640{
1641 struct drm_crtc *crtc;
1642 unsigned long line_time_us;
1643 int hdisplay, htotal, pixel_size, clock;
1644 int line_count, line_size;
1645 int small, large;
1646 int entries;
1647
1648 if (!latency_ns) {
1649 *fbc_wm = *display_wm = *cursor_wm = 0;
1650 return false;
1651 }
1652
1653 crtc = intel_get_crtc_for_plane(dev, plane);
1654 hdisplay = crtc->mode.hdisplay;
1655 htotal = crtc->mode.htotal;
1656 clock = crtc->mode.clock;
1657 pixel_size = crtc->fb->bits_per_pixel / 8;
1658
1659 line_time_us = (htotal * 1000) / clock;
1660 line_count = (latency_ns / line_time_us + 1000) / 1000;
1661 line_size = hdisplay * pixel_size;
1662
1663 /* Use the minimum of the small and large buffer method for primary */
1664 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1665 large = line_count * line_size;
1666
1667 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1668 *display_wm = entries + display->guard_size;
1669
1670 /*
1671 * Spec says:
1672 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1673 */
1674 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1675
1676 /* calculate the self-refresh watermark for display cursor */
1677 entries = line_count * pixel_size * 64;
1678 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1679 *cursor_wm = entries + cursor->guard_size;
1680
1681 return ironlake_check_srwm(dev, level,
1682 *fbc_wm, *display_wm, *cursor_wm,
1683 display, cursor);
1684}
1685
1686static void ironlake_update_wm(struct drm_device *dev)
1687{
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 int fbc_wm, plane_wm, cursor_wm;
1690 unsigned int enabled;
1691
1692 enabled = 0;
1693 if (g4x_compute_wm0(dev, 0,
1694 &ironlake_display_wm_info,
1695 ILK_LP0_PLANE_LATENCY,
1696 &ironlake_cursor_wm_info,
1697 ILK_LP0_CURSOR_LATENCY,
1698 &plane_wm, &cursor_wm)) {
1699 I915_WRITE(WM0_PIPEA_ILK,
1700 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1701 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1702 " plane %d, " "cursor: %d\n",
1703 plane_wm, cursor_wm);
1704 enabled |= 1;
1705 }
1706
1707 if (g4x_compute_wm0(dev, 1,
1708 &ironlake_display_wm_info,
1709 ILK_LP0_PLANE_LATENCY,
1710 &ironlake_cursor_wm_info,
1711 ILK_LP0_CURSOR_LATENCY,
1712 &plane_wm, &cursor_wm)) {
1713 I915_WRITE(WM0_PIPEB_ILK,
1714 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1715 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1716 " plane %d, cursor: %d\n",
1717 plane_wm, cursor_wm);
1718 enabled |= 2;
1719 }
1720
1721 /*
1722 * Calculate and update the self-refresh watermark only when one
1723 * display plane is used.
1724 */
1725 I915_WRITE(WM3_LP_ILK, 0);
1726 I915_WRITE(WM2_LP_ILK, 0);
1727 I915_WRITE(WM1_LP_ILK, 0);
1728
1729 if (!single_plane_enabled(enabled))
1730 return;
1731 enabled = ffs(enabled) - 1;
1732
1733 /* WM1 */
1734 if (!ironlake_compute_srwm(dev, 1, enabled,
1735 ILK_READ_WM1_LATENCY() * 500,
1736 &ironlake_display_srwm_info,
1737 &ironlake_cursor_srwm_info,
1738 &fbc_wm, &plane_wm, &cursor_wm))
1739 return;
1740
1741 I915_WRITE(WM1_LP_ILK,
1742 WM1_LP_SR_EN |
1743 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1744 (fbc_wm << WM1_LP_FBC_SHIFT) |
1745 (plane_wm << WM1_LP_SR_SHIFT) |
1746 cursor_wm);
1747
1748 /* WM2 */
1749 if (!ironlake_compute_srwm(dev, 2, enabled,
1750 ILK_READ_WM2_LATENCY() * 500,
1751 &ironlake_display_srwm_info,
1752 &ironlake_cursor_srwm_info,
1753 &fbc_wm, &plane_wm, &cursor_wm))
1754 return;
1755
1756 I915_WRITE(WM2_LP_ILK,
1757 WM2_LP_EN |
1758 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1759 (fbc_wm << WM1_LP_FBC_SHIFT) |
1760 (plane_wm << WM1_LP_SR_SHIFT) |
1761 cursor_wm);
1762
1763 /*
1764 * WM3 is unsupported on ILK, probably because we don't have latency
1765 * data for that power state
1766 */
1767}
1768
1769static void sandybridge_update_wm(struct drm_device *dev)
1770{
1771 struct drm_i915_private *dev_priv = dev->dev_private;
1772 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1773 u32 val;
1774 int fbc_wm, plane_wm, cursor_wm;
1775 unsigned int enabled;
1776
1777 enabled = 0;
1778 if (g4x_compute_wm0(dev, 0,
1779 &sandybridge_display_wm_info, latency,
1780 &sandybridge_cursor_wm_info, latency,
1781 &plane_wm, &cursor_wm)) {
1782 val = I915_READ(WM0_PIPEA_ILK);
1783 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1784 I915_WRITE(WM0_PIPEA_ILK, val |
1785 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1786 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1787 " plane %d, " "cursor: %d\n",
1788 plane_wm, cursor_wm);
1789 enabled |= 1;
1790 }
1791
1792 if (g4x_compute_wm0(dev, 1,
1793 &sandybridge_display_wm_info, latency,
1794 &sandybridge_cursor_wm_info, latency,
1795 &plane_wm, &cursor_wm)) {
1796 val = I915_READ(WM0_PIPEB_ILK);
1797 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1798 I915_WRITE(WM0_PIPEB_ILK, val |
1799 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1800 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1801 " plane %d, cursor: %d\n",
1802 plane_wm, cursor_wm);
1803 enabled |= 2;
1804 }
1805
1806 if ((dev_priv->num_pipe == 3) &&
1807 g4x_compute_wm0(dev, 2,
1808 &sandybridge_display_wm_info, latency,
1809 &sandybridge_cursor_wm_info, latency,
1810 &plane_wm, &cursor_wm)) {
1811 val = I915_READ(WM0_PIPEC_IVB);
1812 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1813 I915_WRITE(WM0_PIPEC_IVB, val |
1814 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1815 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1816 " plane %d, cursor: %d\n",
1817 plane_wm, cursor_wm);
1818 enabled |= 3;
1819 }
1820
1821 /*
1822 * Calculate and update the self-refresh watermark only when one
1823 * display plane is used.
1824 *
1825 * SNB support 3 levels of watermark.
1826 *
1827 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1828 * and disabled in the descending order
1829 *
1830 */
1831 I915_WRITE(WM3_LP_ILK, 0);
1832 I915_WRITE(WM2_LP_ILK, 0);
1833 I915_WRITE(WM1_LP_ILK, 0);
1834
1835 if (!single_plane_enabled(enabled) ||
1836 dev_priv->sprite_scaling_enabled)
1837 return;
1838 enabled = ffs(enabled) - 1;
1839
1840 /* WM1 */
1841 if (!ironlake_compute_srwm(dev, 1, enabled,
1842 SNB_READ_WM1_LATENCY() * 500,
1843 &sandybridge_display_srwm_info,
1844 &sandybridge_cursor_srwm_info,
1845 &fbc_wm, &plane_wm, &cursor_wm))
1846 return;
1847
1848 I915_WRITE(WM1_LP_ILK,
1849 WM1_LP_SR_EN |
1850 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1851 (fbc_wm << WM1_LP_FBC_SHIFT) |
1852 (plane_wm << WM1_LP_SR_SHIFT) |
1853 cursor_wm);
1854
1855 /* WM2 */
1856 if (!ironlake_compute_srwm(dev, 2, enabled,
1857 SNB_READ_WM2_LATENCY() * 500,
1858 &sandybridge_display_srwm_info,
1859 &sandybridge_cursor_srwm_info,
1860 &fbc_wm, &plane_wm, &cursor_wm))
1861 return;
1862
1863 I915_WRITE(WM2_LP_ILK,
1864 WM2_LP_EN |
1865 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1866 (fbc_wm << WM1_LP_FBC_SHIFT) |
1867 (plane_wm << WM1_LP_SR_SHIFT) |
1868 cursor_wm);
1869
1870 /* WM3 */
1871 if (!ironlake_compute_srwm(dev, 3, enabled,
1872 SNB_READ_WM3_LATENCY() * 500,
1873 &sandybridge_display_srwm_info,
1874 &sandybridge_cursor_srwm_info,
1875 &fbc_wm, &plane_wm, &cursor_wm))
1876 return;
1877
1878 I915_WRITE(WM3_LP_ILK,
1879 WM3_LP_EN |
1880 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1881 (fbc_wm << WM1_LP_FBC_SHIFT) |
1882 (plane_wm << WM1_LP_SR_SHIFT) |
1883 cursor_wm);
1884}
1885
1886static void
1887haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1888 struct drm_display_mode *mode)
1889{
1890 struct drm_i915_private *dev_priv = dev->dev_private;
1891 u32 temp;
1892
1893 temp = I915_READ(PIPE_WM_LINETIME(pipe));
1894 temp &= ~PIPE_WM_LINETIME_MASK;
1895
1896 /* The WM are computed with base on how long it takes to fill a single
1897 * row at the given clock rate, multiplied by 8.
1898 * */
1899 temp |= PIPE_WM_LINETIME_TIME(
1900 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1901
1902 /* IPS watermarks are only used by pipe A, and are ignored by
1903 * pipes B and C. They are calculated similarly to the common
1904 * linetime values, except that we are using CD clock frequency
1905 * in MHz instead of pixel rate for the division.
1906 *
1907 * This is a placeholder for the IPS watermark calculation code.
1908 */
1909
1910 I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1911}
1912
1913static bool
1914sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1915 uint32_t sprite_width, int pixel_size,
1916 const struct intel_watermark_params *display,
1917 int display_latency_ns, int *sprite_wm)
1918{
1919 struct drm_crtc *crtc;
1920 int clock;
1921 int entries, tlb_miss;
1922
1923 crtc = intel_get_crtc_for_plane(dev, plane);
1924 if (crtc->fb == NULL || !crtc->enabled) {
1925 *sprite_wm = display->guard_size;
1926 return false;
1927 }
1928
1929 clock = crtc->mode.clock;
1930
1931 /* Use the small buffer method to calculate the sprite watermark */
1932 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1933 tlb_miss = display->fifo_size*display->cacheline_size -
1934 sprite_width * 8;
1935 if (tlb_miss > 0)
1936 entries += tlb_miss;
1937 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1938 *sprite_wm = entries + display->guard_size;
1939 if (*sprite_wm > (int)display->max_wm)
1940 *sprite_wm = display->max_wm;
1941
1942 return true;
1943}
1944
1945static bool
1946sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1947 uint32_t sprite_width, int pixel_size,
1948 const struct intel_watermark_params *display,
1949 int latency_ns, int *sprite_wm)
1950{
1951 struct drm_crtc *crtc;
1952 unsigned long line_time_us;
1953 int clock;
1954 int line_count, line_size;
1955 int small, large;
1956 int entries;
1957
1958 if (!latency_ns) {
1959 *sprite_wm = 0;
1960 return false;
1961 }
1962
1963 crtc = intel_get_crtc_for_plane(dev, plane);
1964 clock = crtc->mode.clock;
1965 if (!clock) {
1966 *sprite_wm = 0;
1967 return false;
1968 }
1969
1970 line_time_us = (sprite_width * 1000) / clock;
1971 if (!line_time_us) {
1972 *sprite_wm = 0;
1973 return false;
1974 }
1975
1976 line_count = (latency_ns / line_time_us + 1000) / 1000;
1977 line_size = sprite_width * pixel_size;
1978
1979 /* Use the minimum of the small and large buffer method for primary */
1980 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1981 large = line_count * line_size;
1982
1983 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1984 *sprite_wm = entries + display->guard_size;
1985
1986 return *sprite_wm > 0x3ff ? false : true;
1987}
1988
1989static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1990 uint32_t sprite_width, int pixel_size)
1991{
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1993 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1994 u32 val;
1995 int sprite_wm, reg;
1996 int ret;
1997
1998 switch (pipe) {
1999 case 0:
2000 reg = WM0_PIPEA_ILK;
2001 break;
2002 case 1:
2003 reg = WM0_PIPEB_ILK;
2004 break;
2005 case 2:
2006 reg = WM0_PIPEC_IVB;
2007 break;
2008 default:
2009 return; /* bad pipe */
2010 }
2011
2012 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2013 &sandybridge_display_wm_info,
2014 latency, &sprite_wm);
2015 if (!ret) {
2016 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2017 pipe);
2018 return;
2019 }
2020
2021 val = I915_READ(reg);
2022 val &= ~WM0_PIPE_SPRITE_MASK;
2023 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2024 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2025
2026
2027 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2028 pixel_size,
2029 &sandybridge_display_srwm_info,
2030 SNB_READ_WM1_LATENCY() * 500,
2031 &sprite_wm);
2032 if (!ret) {
2033 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2034 pipe);
2035 return;
2036 }
2037 I915_WRITE(WM1S_LP_ILK, sprite_wm);
2038
2039 /* Only IVB has two more LP watermarks for sprite */
2040 if (!IS_IVYBRIDGE(dev))
2041 return;
2042
2043 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2044 pixel_size,
2045 &sandybridge_display_srwm_info,
2046 SNB_READ_WM2_LATENCY() * 500,
2047 &sprite_wm);
2048 if (!ret) {
2049 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2050 pipe);
2051 return;
2052 }
2053 I915_WRITE(WM2S_LP_IVB, sprite_wm);
2054
2055 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2056 pixel_size,
2057 &sandybridge_display_srwm_info,
2058 SNB_READ_WM3_LATENCY() * 500,
2059 &sprite_wm);
2060 if (!ret) {
2061 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2062 pipe);
2063 return;
2064 }
2065 I915_WRITE(WM3S_LP_IVB, sprite_wm);
2066}
2067
2068/**
2069 * intel_update_watermarks - update FIFO watermark values based on current modes
2070 *
2071 * Calculate watermark values for the various WM regs based on current mode
2072 * and plane configuration.
2073 *
2074 * There are several cases to deal with here:
2075 * - normal (i.e. non-self-refresh)
2076 * - self-refresh (SR) mode
2077 * - lines are large relative to FIFO size (buffer can hold up to 2)
2078 * - lines are small relative to FIFO size (buffer can hold more than 2
2079 * lines), so need to account for TLB latency
2080 *
2081 * The normal calculation is:
2082 * watermark = dotclock * bytes per pixel * latency
2083 * where latency is platform & configuration dependent (we assume pessimal
2084 * values here).
2085 *
2086 * The SR calculation is:
2087 * watermark = (trunc(latency/line time)+1) * surface width *
2088 * bytes per pixel
2089 * where
2090 * line time = htotal / dotclock
2091 * surface width = hdisplay for normal plane and 64 for cursor
2092 * and latency is assumed to be high, as above.
2093 *
2094 * The final value programmed to the register should always be rounded up,
2095 * and include an extra 2 entries to account for clock crossings.
2096 *
2097 * We don't use the sprite, so we can ignore that. And on Crestline we have
2098 * to set the non-SR watermarks to 8.
2099 */
2100void intel_update_watermarks(struct drm_device *dev)
2101{
2102 struct drm_i915_private *dev_priv = dev->dev_private;
2103
2104 if (dev_priv->display.update_wm)
2105 dev_priv->display.update_wm(dev);
2106}
2107
2108void intel_update_linetime_watermarks(struct drm_device *dev,
2109 int pipe, struct drm_display_mode *mode)
2110{
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112
2113 if (dev_priv->display.update_linetime_wm)
2114 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2115}
2116
2117void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2118 uint32_t sprite_width, int pixel_size)
2119{
2120 struct drm_i915_private *dev_priv = dev->dev_private;
2121
2122 if (dev_priv->display.update_sprite_wm)
2123 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2124 pixel_size);
2125}
2126
2127static struct drm_i915_gem_object *
2128intel_alloc_context_page(struct drm_device *dev)
2129{
2130 struct drm_i915_gem_object *ctx;
2131 int ret;
2132
2133 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2134
2135 ctx = i915_gem_alloc_object(dev, 4096);
2136 if (!ctx) {
2137 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2138 return NULL;
2139 }
2140
2141 ret = i915_gem_object_pin(ctx, 4096, true);
2142 if (ret) {
2143 DRM_ERROR("failed to pin power context: %d\n", ret);
2144 goto err_unref;
2145 }
2146
2147 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2148 if (ret) {
2149 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2150 goto err_unpin;
2151 }
2152
2153 return ctx;
2154
2155err_unpin:
2156 i915_gem_object_unpin(ctx);
2157err_unref:
2158 drm_gem_object_unreference(&ctx->base);
2159 mutex_unlock(&dev->struct_mutex);
2160 return NULL;
2161}
2162
2163bool ironlake_set_drps(struct drm_device *dev, u8 val)
2164{
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u16 rgvswctl;
2167
2168 rgvswctl = I915_READ16(MEMSWCTL);
2169 if (rgvswctl & MEMCTL_CMD_STS) {
2170 DRM_DEBUG("gpu busy, RCS change rejected\n");
2171 return false; /* still busy with another command */
2172 }
2173
2174 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2175 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2176 I915_WRITE16(MEMSWCTL, rgvswctl);
2177 POSTING_READ16(MEMSWCTL);
2178
2179 rgvswctl |= MEMCTL_CMD_STS;
2180 I915_WRITE16(MEMSWCTL, rgvswctl);
2181
2182 return true;
2183}
2184
2185void ironlake_enable_drps(struct drm_device *dev)
2186{
2187 struct drm_i915_private *dev_priv = dev->dev_private;
2188 u32 rgvmodectl = I915_READ(MEMMODECTL);
2189 u8 fmax, fmin, fstart, vstart;
2190
2191 /* Enable temp reporting */
2192 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2193 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2194
2195 /* 100ms RC evaluation intervals */
2196 I915_WRITE(RCUPEI, 100000);
2197 I915_WRITE(RCDNEI, 100000);
2198
2199 /* Set max/min thresholds to 90ms and 80ms respectively */
2200 I915_WRITE(RCBMAXAVG, 90000);
2201 I915_WRITE(RCBMINAVG, 80000);
2202
2203 I915_WRITE(MEMIHYST, 1);
2204
2205 /* Set up min, max, and cur for interrupt handling */
2206 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2207 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2208 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2209 MEMMODE_FSTART_SHIFT;
2210
2211 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2212 PXVFREQ_PX_SHIFT;
2213
2214 dev_priv->fmax = fmax; /* IPS callback will increase this */
2215 dev_priv->fstart = fstart;
2216
2217 dev_priv->max_delay = fstart;
2218 dev_priv->min_delay = fmin;
2219 dev_priv->cur_delay = fstart;
2220
2221 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2222 fmax, fmin, fstart);
2223
2224 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2225
2226 /*
2227 * Interrupts will be enabled in ironlake_irq_postinstall
2228 */
2229
2230 I915_WRITE(VIDSTART, vstart);
2231 POSTING_READ(VIDSTART);
2232
2233 rgvmodectl |= MEMMODE_SWMODE_EN;
2234 I915_WRITE(MEMMODECTL, rgvmodectl);
2235
2236 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2237 DRM_ERROR("stuck trying to change perf mode\n");
2238 msleep(1);
2239
2240 ironlake_set_drps(dev, fstart);
2241
2242 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2243 I915_READ(0x112e0);
2244 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2245 dev_priv->last_count2 = I915_READ(0x112f4);
2246 getrawmonotonic(&dev_priv->last_time2);
2247}
2248
2249void ironlake_disable_drps(struct drm_device *dev)
2250{
2251 struct drm_i915_private *dev_priv = dev->dev_private;
2252 u16 rgvswctl = I915_READ16(MEMSWCTL);
2253
2254 /* Ack interrupts, disable EFC interrupt */
2255 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2256 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2257 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2258 I915_WRITE(DEIIR, DE_PCU_EVENT);
2259 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2260
2261 /* Go back to the starting frequency */
2262 ironlake_set_drps(dev, dev_priv->fstart);
2263 msleep(1);
2264 rgvswctl |= MEMCTL_CMD_STS;
2265 I915_WRITE(MEMSWCTL, rgvswctl);
2266 msleep(1);
2267
2268}
2269
2270void gen6_set_rps(struct drm_device *dev, u8 val)
2271{
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 swreq;
2274
2275 swreq = (val & 0x3ff) << 25;
2276 I915_WRITE(GEN6_RPNSWREQ, swreq);
2277}
2278
2279void gen6_disable_rps(struct drm_device *dev)
2280{
2281 struct drm_i915_private *dev_priv = dev->dev_private;
2282
2283 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2284 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2285 I915_WRITE(GEN6_PMIER, 0);
2286 /* Complete PM interrupt masking here doesn't race with the rps work
2287 * item again unmasking PM interrupts because that is using a different
2288 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2289 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2290
2291 spin_lock_irq(&dev_priv->rps_lock);
2292 dev_priv->pm_iir = 0;
2293 spin_unlock_irq(&dev_priv->rps_lock);
2294
2295 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2296}
2297
2298int intel_enable_rc6(const struct drm_device *dev)
2299{
2300 /*
2301 * Respect the kernel parameter if it is set
2302 */
2303 if (i915_enable_rc6 >= 0)
2304 return i915_enable_rc6;
2305
2306 /*
2307 * Disable RC6 on Ironlake
2308 */
2309 if (INTEL_INFO(dev)->gen == 5)
2310 return 0;
2311
2312 /* Sorry Haswell, no RC6 for you for now. */
2313 if (IS_HASWELL(dev))
2314 return 0;
2315
2316 /*
2317 * Disable rc6 on Sandybridge
2318 */
2319 if (INTEL_INFO(dev)->gen == 6) {
2320 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2321 return INTEL_RC6_ENABLE;
2322 }
2323 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2324 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2325}
2326
2327void gen6_enable_rps(struct drm_i915_private *dev_priv)
2328{
2329 struct intel_ring_buffer *ring;
2330 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2331 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2332 u32 pcu_mbox, rc6_mask = 0;
2333 u32 gtfifodbg;
2334 int cur_freq, min_freq, max_freq;
2335 int rc6_mode;
2336 int i;
2337
2338 /* Here begins a magic sequence of register writes to enable
2339 * auto-downclocking.
2340 *
2341 * Perhaps there might be some value in exposing these to
2342 * userspace...
2343 */
2344 I915_WRITE(GEN6_RC_STATE, 0);
2345 mutex_lock(&dev_priv->dev->struct_mutex);
2346
2347 /* Clear the DBG now so we don't confuse earlier errors */
2348 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2349 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2350 I915_WRITE(GTFIFODBG, gtfifodbg);
2351 }
2352
2353 gen6_gt_force_wake_get(dev_priv);
2354
2355 /* disable the counters and set deterministic thresholds */
2356 I915_WRITE(GEN6_RC_CONTROL, 0);
2357
2358 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2359 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2360 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2361 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2362 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2363
2364 for_each_ring(ring, dev_priv, i)
2365 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2366
2367 I915_WRITE(GEN6_RC_SLEEP, 0);
2368 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2369 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2370 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2371 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2372
2373 rc6_mode = intel_enable_rc6(dev_priv->dev);
2374 if (rc6_mode & INTEL_RC6_ENABLE)
2375 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2376
2377 if (rc6_mode & INTEL_RC6p_ENABLE)
2378 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2379
2380 if (rc6_mode & INTEL_RC6pp_ENABLE)
2381 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2382
2383 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2384 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2385 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2386 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2387
2388 I915_WRITE(GEN6_RC_CONTROL,
2389 rc6_mask |
2390 GEN6_RC_CTL_EI_MODE(1) |
2391 GEN6_RC_CTL_HW_ENABLE);
2392
2393 I915_WRITE(GEN6_RPNSWREQ,
2394 GEN6_FREQUENCY(10) |
2395 GEN6_OFFSET(0) |
2396 GEN6_AGGRESSIVE_TURBO);
2397 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2398 GEN6_FREQUENCY(12));
2399
2400 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2401 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2402 18 << 24 |
2403 6 << 16);
2404 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2405 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2406 I915_WRITE(GEN6_RP_UP_EI, 100000);
2407 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2408 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2409 I915_WRITE(GEN6_RP_CONTROL,
2410 GEN6_RP_MEDIA_TURBO |
2411 GEN6_RP_MEDIA_HW_MODE |
2412 GEN6_RP_MEDIA_IS_GFX |
2413 GEN6_RP_ENABLE |
2414 GEN6_RP_UP_BUSY_AVG |
2415 GEN6_RP_DOWN_IDLE_CONT);
2416
2417 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2418 500))
2419 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2420
2421 I915_WRITE(GEN6_PCODE_DATA, 0);
2422 I915_WRITE(GEN6_PCODE_MAILBOX,
2423 GEN6_PCODE_READY |
2424 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2425 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2426 500))
2427 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2428
2429 min_freq = (rp_state_cap & 0xff0000) >> 16;
2430 max_freq = rp_state_cap & 0xff;
2431 cur_freq = (gt_perf_status & 0xff00) >> 8;
2432
2433 /* Check for overclock support */
2434 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2435 500))
2436 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2437 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2438 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2439 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2440 500))
2441 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2442 if (pcu_mbox & (1<<31)) { /* OC supported */
2443 max_freq = pcu_mbox & 0xff;
2444 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2445 }
2446
2447 /* In units of 100MHz */
2448 dev_priv->max_delay = max_freq;
2449 dev_priv->min_delay = min_freq;
2450 dev_priv->cur_delay = cur_freq;
2451
2452 /* requires MSI enabled */
2453 I915_WRITE(GEN6_PMIER,
2454 GEN6_PM_MBOX_EVENT |
2455 GEN6_PM_THERMAL_EVENT |
2456 GEN6_PM_RP_DOWN_TIMEOUT |
2457 GEN6_PM_RP_UP_THRESHOLD |
2458 GEN6_PM_RP_DOWN_THRESHOLD |
2459 GEN6_PM_RP_UP_EI_EXPIRED |
2460 GEN6_PM_RP_DOWN_EI_EXPIRED);
2461 spin_lock_irq(&dev_priv->rps_lock);
2462 WARN_ON(dev_priv->pm_iir != 0);
2463 I915_WRITE(GEN6_PMIMR, 0);
2464 spin_unlock_irq(&dev_priv->rps_lock);
2465 /* enable all PM interrupts */
2466 I915_WRITE(GEN6_PMINTRMSK, 0);
2467
2468 gen6_gt_force_wake_put(dev_priv);
2469 mutex_unlock(&dev_priv->dev->struct_mutex);
2470}
2471
2472void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2473{
2474 int min_freq = 15;
2475 int gpu_freq, ia_freq, max_ia_freq;
2476 int scaling_factor = 180;
2477
2478 max_ia_freq = cpufreq_quick_get_max(0);
2479 /*
2480 * Default to measured freq if none found, PCU will ensure we don't go
2481 * over
2482 */
2483 if (!max_ia_freq)
2484 max_ia_freq = tsc_khz;
2485
2486 /* Convert from kHz to MHz */
2487 max_ia_freq /= 1000;
2488
2489 mutex_lock(&dev_priv->dev->struct_mutex);
2490
2491 /*
2492 * For each potential GPU frequency, load a ring frequency we'd like
2493 * to use for memory access. We do this by specifying the IA frequency
2494 * the PCU should use as a reference to determine the ring frequency.
2495 */
2496 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2497 gpu_freq--) {
2498 int diff = dev_priv->max_delay - gpu_freq;
2499
2500 /*
2501 * For GPU frequencies less than 750MHz, just use the lowest
2502 * ring freq.
2503 */
2504 if (gpu_freq < min_freq)
2505 ia_freq = 800;
2506 else
2507 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2508 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2509
2510 I915_WRITE(GEN6_PCODE_DATA,
2511 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2512 gpu_freq);
2513 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2514 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2515 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2516 GEN6_PCODE_READY) == 0, 10)) {
2517 DRM_ERROR("pcode write of freq table timed out\n");
2518 continue;
2519 }
2520 }
2521
2522 mutex_unlock(&dev_priv->dev->struct_mutex);
2523}
2524
2525static void ironlake_teardown_rc6(struct drm_device *dev)
2526{
2527 struct drm_i915_private *dev_priv = dev->dev_private;
2528
2529 if (dev_priv->renderctx) {
2530 i915_gem_object_unpin(dev_priv->renderctx);
2531 drm_gem_object_unreference(&dev_priv->renderctx->base);
2532 dev_priv->renderctx = NULL;
2533 }
2534
2535 if (dev_priv->pwrctx) {
2536 i915_gem_object_unpin(dev_priv->pwrctx);
2537 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2538 dev_priv->pwrctx = NULL;
2539 }
2540}
2541
2542void ironlake_disable_rc6(struct drm_device *dev)
2543{
2544 struct drm_i915_private *dev_priv = dev->dev_private;
2545
2546 if (I915_READ(PWRCTXA)) {
2547 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2548 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2549 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2550 50);
2551
2552 I915_WRITE(PWRCTXA, 0);
2553 POSTING_READ(PWRCTXA);
2554
2555 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2556 POSTING_READ(RSTDBYCTL);
2557 }
2558
2559 ironlake_teardown_rc6(dev);
2560}
2561
2562static int ironlake_setup_rc6(struct drm_device *dev)
2563{
2564 struct drm_i915_private *dev_priv = dev->dev_private;
2565
2566 if (dev_priv->renderctx == NULL)
2567 dev_priv->renderctx = intel_alloc_context_page(dev);
2568 if (!dev_priv->renderctx)
2569 return -ENOMEM;
2570
2571 if (dev_priv->pwrctx == NULL)
2572 dev_priv->pwrctx = intel_alloc_context_page(dev);
2573 if (!dev_priv->pwrctx) {
2574 ironlake_teardown_rc6(dev);
2575 return -ENOMEM;
2576 }
2577
2578 return 0;
2579}
2580
2581void ironlake_enable_rc6(struct drm_device *dev)
2582{
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2585 int ret;
2586
2587 /* rc6 disabled by default due to repeated reports of hanging during
2588 * boot and resume.
2589 */
2590 if (!intel_enable_rc6(dev))
2591 return;
2592
2593 mutex_lock(&dev->struct_mutex);
2594 ret = ironlake_setup_rc6(dev);
2595 if (ret) {
2596 mutex_unlock(&dev->struct_mutex);
2597 return;
2598 }
2599
2600 /*
2601 * GPU can automatically power down the render unit if given a page
2602 * to save state.
2603 */
2604 ret = intel_ring_begin(ring, 6);
2605 if (ret) {
2606 ironlake_teardown_rc6(dev);
2607 mutex_unlock(&dev->struct_mutex);
2608 return;
2609 }
2610
2611 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2612 intel_ring_emit(ring, MI_SET_CONTEXT);
2613 intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2614 MI_MM_SPACE_GTT |
2615 MI_SAVE_EXT_STATE_EN |
2616 MI_RESTORE_EXT_STATE_EN |
2617 MI_RESTORE_INHIBIT);
2618 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2619 intel_ring_emit(ring, MI_NOOP);
2620 intel_ring_emit(ring, MI_FLUSH);
2621 intel_ring_advance(ring);
2622
2623 /*
2624 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2625 * does an implicit flush, combined with MI_FLUSH above, it should be
2626 * safe to assume that renderctx is valid
2627 */
2628 ret = intel_wait_ring_idle(ring);
2629 if (ret) {
2630 DRM_ERROR("failed to enable ironlake power power savings\n");
2631 ironlake_teardown_rc6(dev);
2632 mutex_unlock(&dev->struct_mutex);
2633 return;
2634 }
2635
2636 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2637 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2638 mutex_unlock(&dev->struct_mutex);
2639}
2640
2641static unsigned long intel_pxfreq(u32 vidfreq)
2642{
2643 unsigned long freq;
2644 int div = (vidfreq & 0x3f0000) >> 16;
2645 int post = (vidfreq & 0x3000) >> 12;
2646 int pre = (vidfreq & 0x7);
2647
2648 if (!pre)
2649 return 0;
2650
2651 freq = ((div * 133333) / ((1<<post) * pre));
2652
2653 return freq;
2654}
2655
2656static const struct cparams {
2657 u16 i;
2658 u16 t;
2659 u16 m;
2660 u16 c;
2661} cparams[] = {
2662 { 1, 1333, 301, 28664 },
2663 { 1, 1066, 294, 24460 },
2664 { 1, 800, 294, 25192 },
2665 { 0, 1333, 276, 27605 },
2666 { 0, 1066, 276, 27605 },
2667 { 0, 800, 231, 23784 },
2668};
2669
2670unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2671{
2672 u64 total_count, diff, ret;
2673 u32 count1, count2, count3, m = 0, c = 0;
2674 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2675 int i;
2676
2677 diff1 = now - dev_priv->last_time1;
2678
2679 /* Prevent division-by-zero if we are asking too fast.
2680 * Also, we don't get interesting results if we are polling
2681 * faster than once in 10ms, so just return the saved value
2682 * in such cases.
2683 */
2684 if (diff1 <= 10)
2685 return dev_priv->chipset_power;
2686
2687 count1 = I915_READ(DMIEC);
2688 count2 = I915_READ(DDREC);
2689 count3 = I915_READ(CSIEC);
2690
2691 total_count = count1 + count2 + count3;
2692
2693 /* FIXME: handle per-counter overflow */
2694 if (total_count < dev_priv->last_count1) {
2695 diff = ~0UL - dev_priv->last_count1;
2696 diff += total_count;
2697 } else {
2698 diff = total_count - dev_priv->last_count1;
2699 }
2700
2701 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2702 if (cparams[i].i == dev_priv->c_m &&
2703 cparams[i].t == dev_priv->r_t) {
2704 m = cparams[i].m;
2705 c = cparams[i].c;
2706 break;
2707 }
2708 }
2709
2710 diff = div_u64(diff, diff1);
2711 ret = ((m * diff) + c);
2712 ret = div_u64(ret, 10);
2713
2714 dev_priv->last_count1 = total_count;
2715 dev_priv->last_time1 = now;
2716
2717 dev_priv->chipset_power = ret;
2718
2719 return ret;
2720}
2721
2722unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2723{
2724 unsigned long m, x, b;
2725 u32 tsfs;
2726
2727 tsfs = I915_READ(TSFS);
2728
2729 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2730 x = I915_READ8(TR1);
2731
2732 b = tsfs & TSFS_INTR_MASK;
2733
2734 return ((m * x) / 127) - b;
2735}
2736
2737static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2738{
2739 static const struct v_table {
2740 u16 vd; /* in .1 mil */
2741 u16 vm; /* in .1 mil */
2742 } v_table[] = {
2743 { 0, 0, },
2744 { 375, 0, },
2745 { 500, 0, },
2746 { 625, 0, },
2747 { 750, 0, },
2748 { 875, 0, },
2749 { 1000, 0, },
2750 { 1125, 0, },
2751 { 4125, 3000, },
2752 { 4125, 3000, },
2753 { 4125, 3000, },
2754 { 4125, 3000, },
2755 { 4125, 3000, },
2756 { 4125, 3000, },
2757 { 4125, 3000, },
2758 { 4125, 3000, },
2759 { 4125, 3000, },
2760 { 4125, 3000, },
2761 { 4125, 3000, },
2762 { 4125, 3000, },
2763 { 4125, 3000, },
2764 { 4125, 3000, },
2765 { 4125, 3000, },
2766 { 4125, 3000, },
2767 { 4125, 3000, },
2768 { 4125, 3000, },
2769 { 4125, 3000, },
2770 { 4125, 3000, },
2771 { 4125, 3000, },
2772 { 4125, 3000, },
2773 { 4125, 3000, },
2774 { 4125, 3000, },
2775 { 4250, 3125, },
2776 { 4375, 3250, },
2777 { 4500, 3375, },
2778 { 4625, 3500, },
2779 { 4750, 3625, },
2780 { 4875, 3750, },
2781 { 5000, 3875, },
2782 { 5125, 4000, },
2783 { 5250, 4125, },
2784 { 5375, 4250, },
2785 { 5500, 4375, },
2786 { 5625, 4500, },
2787 { 5750, 4625, },
2788 { 5875, 4750, },
2789 { 6000, 4875, },
2790 { 6125, 5000, },
2791 { 6250, 5125, },
2792 { 6375, 5250, },
2793 { 6500, 5375, },
2794 { 6625, 5500, },
2795 { 6750, 5625, },
2796 { 6875, 5750, },
2797 { 7000, 5875, },
2798 { 7125, 6000, },
2799 { 7250, 6125, },
2800 { 7375, 6250, },
2801 { 7500, 6375, },
2802 { 7625, 6500, },
2803 { 7750, 6625, },
2804 { 7875, 6750, },
2805 { 8000, 6875, },
2806 { 8125, 7000, },
2807 { 8250, 7125, },
2808 { 8375, 7250, },
2809 { 8500, 7375, },
2810 { 8625, 7500, },
2811 { 8750, 7625, },
2812 { 8875, 7750, },
2813 { 9000, 7875, },
2814 { 9125, 8000, },
2815 { 9250, 8125, },
2816 { 9375, 8250, },
2817 { 9500, 8375, },
2818 { 9625, 8500, },
2819 { 9750, 8625, },
2820 { 9875, 8750, },
2821 { 10000, 8875, },
2822 { 10125, 9000, },
2823 { 10250, 9125, },
2824 { 10375, 9250, },
2825 { 10500, 9375, },
2826 { 10625, 9500, },
2827 { 10750, 9625, },
2828 { 10875, 9750, },
2829 { 11000, 9875, },
2830 { 11125, 10000, },
2831 { 11250, 10125, },
2832 { 11375, 10250, },
2833 { 11500, 10375, },
2834 { 11625, 10500, },
2835 { 11750, 10625, },
2836 { 11875, 10750, },
2837 { 12000, 10875, },
2838 { 12125, 11000, },
2839 { 12250, 11125, },
2840 { 12375, 11250, },
2841 { 12500, 11375, },
2842 { 12625, 11500, },
2843 { 12750, 11625, },
2844 { 12875, 11750, },
2845 { 13000, 11875, },
2846 { 13125, 12000, },
2847 { 13250, 12125, },
2848 { 13375, 12250, },
2849 { 13500, 12375, },
2850 { 13625, 12500, },
2851 { 13750, 12625, },
2852 { 13875, 12750, },
2853 { 14000, 12875, },
2854 { 14125, 13000, },
2855 { 14250, 13125, },
2856 { 14375, 13250, },
2857 { 14500, 13375, },
2858 { 14625, 13500, },
2859 { 14750, 13625, },
2860 { 14875, 13750, },
2861 { 15000, 13875, },
2862 { 15125, 14000, },
2863 { 15250, 14125, },
2864 { 15375, 14250, },
2865 { 15500, 14375, },
2866 { 15625, 14500, },
2867 { 15750, 14625, },
2868 { 15875, 14750, },
2869 { 16000, 14875, },
2870 { 16125, 15000, },
2871 };
2872 if (dev_priv->info->is_mobile)
2873 return v_table[pxvid].vm;
2874 else
2875 return v_table[pxvid].vd;
2876}
2877
2878void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2879{
2880 struct timespec now, diff1;
2881 u64 diff;
2882 unsigned long diffms;
2883 u32 count;
2884
2885 if (dev_priv->info->gen != 5)
2886 return;
2887
2888 getrawmonotonic(&now);
2889 diff1 = timespec_sub(now, dev_priv->last_time2);
2890
2891 /* Don't divide by 0 */
2892 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2893 if (!diffms)
2894 return;
2895
2896 count = I915_READ(GFXEC);
2897
2898 if (count < dev_priv->last_count2) {
2899 diff = ~0UL - dev_priv->last_count2;
2900 diff += count;
2901 } else {
2902 diff = count - dev_priv->last_count2;
2903 }
2904
2905 dev_priv->last_count2 = count;
2906 dev_priv->last_time2 = now;
2907
2908 /* More magic constants... */
2909 diff = diff * 1181;
2910 diff = div_u64(diff, diffms * 10);
2911 dev_priv->gfx_power = diff;
2912}
2913
2914unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2915{
2916 unsigned long t, corr, state1, corr2, state2;
2917 u32 pxvid, ext_v;
2918
2919 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2920 pxvid = (pxvid >> 24) & 0x7f;
2921 ext_v = pvid_to_extvid(dev_priv, pxvid);
2922
2923 state1 = ext_v;
2924
2925 t = i915_mch_val(dev_priv);
2926
2927 /* Revel in the empirically derived constants */
2928
2929 /* Correction factor in 1/100000 units */
2930 if (t > 80)
2931 corr = ((t * 2349) + 135940);
2932 else if (t >= 50)
2933 corr = ((t * 964) + 29317);
2934 else /* < 50 */
2935 corr = ((t * 301) + 1004);
2936
2937 corr = corr * ((150142 * state1) / 10000 - 78642);
2938 corr /= 100000;
2939 corr2 = (corr * dev_priv->corr);
2940
2941 state2 = (corr2 * state1) / 10000;
2942 state2 /= 100; /* convert to mW */
2943
2944 i915_update_gfx_val(dev_priv);
2945
2946 return dev_priv->gfx_power + state2;
2947}
2948
2949/* Global for IPS driver to get at the current i915 device */
2950static struct drm_i915_private *i915_mch_dev;
2951/*
2952 * Lock protecting IPS related data structures
2953 * - i915_mch_dev
2954 * - dev_priv->max_delay
2955 * - dev_priv->min_delay
2956 * - dev_priv->fmax
2957 * - dev_priv->gpu_busy
2958 */
2959static DEFINE_SPINLOCK(mchdev_lock);
2960
2961/**
2962 * i915_read_mch_val - return value for IPS use
2963 *
2964 * Calculate and return a value for the IPS driver to use when deciding whether
2965 * we have thermal and power headroom to increase CPU or GPU power budget.
2966 */
2967unsigned long i915_read_mch_val(void)
2968{
2969 struct drm_i915_private *dev_priv;
2970 unsigned long chipset_val, graphics_val, ret = 0;
2971
2972 spin_lock(&mchdev_lock);
2973 if (!i915_mch_dev)
2974 goto out_unlock;
2975 dev_priv = i915_mch_dev;
2976
2977 chipset_val = i915_chipset_val(dev_priv);
2978 graphics_val = i915_gfx_val(dev_priv);
2979
2980 ret = chipset_val + graphics_val;
2981
2982out_unlock:
2983 spin_unlock(&mchdev_lock);
2984
2985 return ret;
2986}
2987EXPORT_SYMBOL_GPL(i915_read_mch_val);
2988
2989/**
2990 * i915_gpu_raise - raise GPU frequency limit
2991 *
2992 * Raise the limit; IPS indicates we have thermal headroom.
2993 */
2994bool i915_gpu_raise(void)
2995{
2996 struct drm_i915_private *dev_priv;
2997 bool ret = true;
2998
2999 spin_lock(&mchdev_lock);
3000 if (!i915_mch_dev) {
3001 ret = false;
3002 goto out_unlock;
3003 }
3004 dev_priv = i915_mch_dev;
3005
3006 if (dev_priv->max_delay > dev_priv->fmax)
3007 dev_priv->max_delay--;
3008
3009out_unlock:
3010 spin_unlock(&mchdev_lock);
3011
3012 return ret;
3013}
3014EXPORT_SYMBOL_GPL(i915_gpu_raise);
3015
3016/**
3017 * i915_gpu_lower - lower GPU frequency limit
3018 *
3019 * IPS indicates we're close to a thermal limit, so throttle back the GPU
3020 * frequency maximum.
3021 */
3022bool i915_gpu_lower(void)
3023{
3024 struct drm_i915_private *dev_priv;
3025 bool ret = true;
3026
3027 spin_lock(&mchdev_lock);
3028 if (!i915_mch_dev) {
3029 ret = false;
3030 goto out_unlock;
3031 }
3032 dev_priv = i915_mch_dev;
3033
3034 if (dev_priv->max_delay < dev_priv->min_delay)
3035 dev_priv->max_delay++;
3036
3037out_unlock:
3038 spin_unlock(&mchdev_lock);
3039
3040 return ret;
3041}
3042EXPORT_SYMBOL_GPL(i915_gpu_lower);
3043
3044/**
3045 * i915_gpu_busy - indicate GPU business to IPS
3046 *
3047 * Tell the IPS driver whether or not the GPU is busy.
3048 */
3049bool i915_gpu_busy(void)
3050{
3051 struct drm_i915_private *dev_priv;
3052 bool ret = false;
3053
3054 spin_lock(&mchdev_lock);
3055 if (!i915_mch_dev)
3056 goto out_unlock;
3057 dev_priv = i915_mch_dev;
3058
3059 ret = dev_priv->busy;
3060
3061out_unlock:
3062 spin_unlock(&mchdev_lock);
3063
3064 return ret;
3065}
3066EXPORT_SYMBOL_GPL(i915_gpu_busy);
3067
3068/**
3069 * i915_gpu_turbo_disable - disable graphics turbo
3070 *
3071 * Disable graphics turbo by resetting the max frequency and setting the
3072 * current frequency to the default.
3073 */
3074bool i915_gpu_turbo_disable(void)
3075{
3076 struct drm_i915_private *dev_priv;
3077 bool ret = true;
3078
3079 spin_lock(&mchdev_lock);
3080 if (!i915_mch_dev) {
3081 ret = false;
3082 goto out_unlock;
3083 }
3084 dev_priv = i915_mch_dev;
3085
3086 dev_priv->max_delay = dev_priv->fstart;
3087
3088 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
3089 ret = false;
3090
3091out_unlock:
3092 spin_unlock(&mchdev_lock);
3093
3094 return ret;
3095}
3096EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3097
3098/**
3099 * Tells the intel_ips driver that the i915 driver is now loaded, if
3100 * IPS got loaded first.
3101 *
3102 * This awkward dance is so that neither module has to depend on the
3103 * other in order for IPS to do the appropriate communication of
3104 * GPU turbo limits to i915.
3105 */
3106static void
3107ips_ping_for_i915_load(void)
3108{
3109 void (*link)(void);
3110
3111 link = symbol_get(ips_link_to_i915_driver);
3112 if (link) {
3113 link();
3114 symbol_put(ips_link_to_i915_driver);
3115 }
3116}
3117
3118void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3119{
3120 spin_lock(&mchdev_lock);
3121 i915_mch_dev = dev_priv;
3122 dev_priv->mchdev_lock = &mchdev_lock;
3123 spin_unlock(&mchdev_lock);
3124
3125 ips_ping_for_i915_load();
3126}
3127
3128void intel_gpu_ips_teardown(void)
3129{
3130 spin_lock(&mchdev_lock);
3131 i915_mch_dev = NULL;
3132 spin_unlock(&mchdev_lock);
3133}
3134
3135void intel_init_emon(struct drm_device *dev)
3136{
3137 struct drm_i915_private *dev_priv = dev->dev_private;
3138 u32 lcfuse;
3139 u8 pxw[16];
3140 int i;
3141
3142 /* Disable to program */
3143 I915_WRITE(ECR, 0);
3144 POSTING_READ(ECR);
3145
3146 /* Program energy weights for various events */
3147 I915_WRITE(SDEW, 0x15040d00);
3148 I915_WRITE(CSIEW0, 0x007f0000);
3149 I915_WRITE(CSIEW1, 0x1e220004);
3150 I915_WRITE(CSIEW2, 0x04000004);
3151
3152 for (i = 0; i < 5; i++)
3153 I915_WRITE(PEW + (i * 4), 0);
3154 for (i = 0; i < 3; i++)
3155 I915_WRITE(DEW + (i * 4), 0);
3156
3157 /* Program P-state weights to account for frequency power adjustment */
3158 for (i = 0; i < 16; i++) {
3159 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3160 unsigned long freq = intel_pxfreq(pxvidfreq);
3161 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3162 PXVFREQ_PX_SHIFT;
3163 unsigned long val;
3164
3165 val = vid * vid;
3166 val *= (freq / 1000);
3167 val *= 255;
3168 val /= (127*127*900);
3169 if (val > 0xff)
3170 DRM_ERROR("bad pxval: %ld\n", val);
3171 pxw[i] = val;
3172 }
3173 /* Render standby states get 0 weight */
3174 pxw[14] = 0;
3175 pxw[15] = 0;
3176
3177 for (i = 0; i < 4; i++) {
3178 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3179 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3180 I915_WRITE(PXW + (i * 4), val);
3181 }
3182
3183 /* Adjust magic regs to magic values (more experimental results) */
3184 I915_WRITE(OGW0, 0);
3185 I915_WRITE(OGW1, 0);
3186 I915_WRITE(EG0, 0x00007f00);
3187 I915_WRITE(EG1, 0x0000000e);
3188 I915_WRITE(EG2, 0x000e0000);
3189 I915_WRITE(EG3, 0x68000300);
3190 I915_WRITE(EG4, 0x42000000);
3191 I915_WRITE(EG5, 0x00140031);
3192 I915_WRITE(EG6, 0);
3193 I915_WRITE(EG7, 0);
3194
3195 for (i = 0; i < 8; i++)
3196 I915_WRITE(PXWL + (i * 4), 0);
3197
3198 /* Enable PMON + select events */
3199 I915_WRITE(ECR, 0x80000019);
3200
3201 lcfuse = I915_READ(LCFUSE02);
3202
3203 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
3204}
3205
3206static void ironlake_init_clock_gating(struct drm_device *dev)
3207{
3208 struct drm_i915_private *dev_priv = dev->dev_private;
3209 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3210
3211 /* Required for FBC */
3212 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
3213 DPFCRUNIT_CLOCK_GATE_DISABLE |
3214 DPFDUNIT_CLOCK_GATE_DISABLE;
3215 /* Required for CxSR */
3216 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3217
3218 I915_WRITE(PCH_3DCGDIS0,
3219 MARIUNIT_CLOCK_GATE_DISABLE |
3220 SVSMUNIT_CLOCK_GATE_DISABLE);
3221 I915_WRITE(PCH_3DCGDIS1,
3222 VFMUNIT_CLOCK_GATE_DISABLE);
3223
3224 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3225
3226 /*
3227 * According to the spec the following bits should be set in
3228 * order to enable memory self-refresh
3229 * The bit 22/21 of 0x42004
3230 * The bit 5 of 0x42020
3231 * The bit 15 of 0x45000
3232 */
3233 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3234 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3235 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3236 I915_WRITE(ILK_DSPCLK_GATE,
3237 (I915_READ(ILK_DSPCLK_GATE) |
3238 ILK_DPARB_CLK_GATE));
3239 I915_WRITE(DISP_ARB_CTL,
3240 (I915_READ(DISP_ARB_CTL) |
3241 DISP_FBC_WM_DIS));
3242 I915_WRITE(WM3_LP_ILK, 0);
3243 I915_WRITE(WM2_LP_ILK, 0);
3244 I915_WRITE(WM1_LP_ILK, 0);
3245
3246 /*
3247 * Based on the document from hardware guys the following bits
3248 * should be set unconditionally in order to enable FBC.
3249 * The bit 22 of 0x42000
3250 * The bit 22 of 0x42004
3251 * The bit 7,8,9 of 0x42020.
3252 */
3253 if (IS_IRONLAKE_M(dev)) {
3254 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3255 I915_READ(ILK_DISPLAY_CHICKEN1) |
3256 ILK_FBCQ_DIS);
3257 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3258 I915_READ(ILK_DISPLAY_CHICKEN2) |
3259 ILK_DPARB_GATE);
3260 I915_WRITE(ILK_DSPCLK_GATE,
3261 I915_READ(ILK_DSPCLK_GATE) |
3262 ILK_DPFC_DIS1 |
3263 ILK_DPFC_DIS2 |
3264 ILK_CLK_FBC);
3265 }
3266
3267 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3268 I915_READ(ILK_DISPLAY_CHICKEN2) |
3269 ILK_ELPIN_409_SELECT);
3270 I915_WRITE(_3D_CHICKEN2,
3271 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3272 _3D_CHICKEN2_WM_READ_PIPELINED);
3273}
3274
3275static void gen6_init_clock_gating(struct drm_device *dev)
3276{
3277 struct drm_i915_private *dev_priv = dev->dev_private;
3278 int pipe;
3279 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3280
3281 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3282
3283 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3284 I915_READ(ILK_DISPLAY_CHICKEN2) |
3285 ILK_ELPIN_409_SELECT);
3286
3287 I915_WRITE(WM3_LP_ILK, 0);
3288 I915_WRITE(WM2_LP_ILK, 0);
3289 I915_WRITE(WM1_LP_ILK, 0);
3290
3291 I915_WRITE(CACHE_MODE_0,
3292 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3293
3294 I915_WRITE(GEN6_UCGCTL1,
3295 I915_READ(GEN6_UCGCTL1) |
3296 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3297 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3298
3299 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3300 * gating disable must be set. Failure to set it results in
3301 * flickering pixels due to Z write ordering failures after
3302 * some amount of runtime in the Mesa "fire" demo, and Unigine
3303 * Sanctuary and Tropics, and apparently anything else with
3304 * alpha test or pixel discard.
3305 *
3306 * According to the spec, bit 11 (RCCUNIT) must also be set,
3307 * but we didn't debug actual testcases to find it out.
3308 */
3309 I915_WRITE(GEN6_UCGCTL2,
3310 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3311 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3312
3313 /* Bspec says we need to always set all mask bits. */
3314 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
3315 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
3316
3317 /*
3318 * According to the spec the following bits should be
3319 * set in order to enable memory self-refresh and fbc:
3320 * The bit21 and bit22 of 0x42000
3321 * The bit21 and bit22 of 0x42004
3322 * The bit5 and bit7 of 0x42020
3323 * The bit14 of 0x70180
3324 * The bit14 of 0x71180
3325 */
3326 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3327 I915_READ(ILK_DISPLAY_CHICKEN1) |
3328 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3329 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3330 I915_READ(ILK_DISPLAY_CHICKEN2) |
3331 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3332 I915_WRITE(ILK_DSPCLK_GATE,
3333 I915_READ(ILK_DSPCLK_GATE) |
3334 ILK_DPARB_CLK_GATE |
3335 ILK_DPFD_CLK_GATE);
3336
3337 for_each_pipe(pipe) {
3338 I915_WRITE(DSPCNTR(pipe),
3339 I915_READ(DSPCNTR(pipe)) |
3340 DISPPLANE_TRICKLE_FEED_DISABLE);
3341 intel_flush_display_plane(dev_priv, pipe);
3342 }
3343}
3344
3345static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3346{
3347 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3348
3349 reg &= ~GEN7_FF_SCHED_MASK;
3350 reg |= GEN7_FF_TS_SCHED_HW;
3351 reg |= GEN7_FF_VS_SCHED_HW;
3352 reg |= GEN7_FF_DS_SCHED_HW;
3353
3354 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3355}
3356
3357static void ivybridge_init_clock_gating(struct drm_device *dev)
3358{
3359 struct drm_i915_private *dev_priv = dev->dev_private;
3360 int pipe;
3361 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3362
3363 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3364
3365 I915_WRITE(WM3_LP_ILK, 0);
3366 I915_WRITE(WM2_LP_ILK, 0);
3367 I915_WRITE(WM1_LP_ILK, 0);
3368
3369 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3370 * This implements the WaDisableRCZUnitClockGating workaround.
3371 */
3372 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3373
3374 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3375
3376 I915_WRITE(IVB_CHICKEN3,
3377 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3378 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3379
3380 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3381 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3382 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3383
3384 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3385 I915_WRITE(GEN7_L3CNTLREG1,
3386 GEN7_WA_FOR_GEN7_L3_CONTROL);
3387 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3388 GEN7_WA_L3_CHICKEN_MODE);
3389
3390 /* This is required by WaCatErrorRejectionIssue */
3391 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3392 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3393 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3394
3395 for_each_pipe(pipe) {
3396 I915_WRITE(DSPCNTR(pipe),
3397 I915_READ(DSPCNTR(pipe)) |
3398 DISPPLANE_TRICKLE_FEED_DISABLE);
3399 intel_flush_display_plane(dev_priv, pipe);
3400 }
3401
3402 gen7_setup_fixed_func_scheduler(dev_priv);
3403
3404 /* WaDisable4x2SubspanOptimization */
3405 I915_WRITE(CACHE_MODE_1,
3406 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3407}
3408
3409static void valleyview_init_clock_gating(struct drm_device *dev)
3410{
3411 struct drm_i915_private *dev_priv = dev->dev_private;
3412 int pipe;
3413 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3414
3415 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3416
3417 I915_WRITE(WM3_LP_ILK, 0);
3418 I915_WRITE(WM2_LP_ILK, 0);
3419 I915_WRITE(WM1_LP_ILK, 0);
3420
3421 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3422 * This implements the WaDisableRCZUnitClockGating workaround.
3423 */
3424 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3425
3426 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3427
3428 I915_WRITE(IVB_CHICKEN3,
3429 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3430 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3431
3432 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3433 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3434 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3435
3436 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3437 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
3438 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3439
3440 /* This is required by WaCatErrorRejectionIssue */
3441 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3442 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3443 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3444
3445 for_each_pipe(pipe) {
3446 I915_WRITE(DSPCNTR(pipe),
3447 I915_READ(DSPCNTR(pipe)) |
3448 DISPPLANE_TRICKLE_FEED_DISABLE);
3449 intel_flush_display_plane(dev_priv, pipe);
3450 }
3451
3452 I915_WRITE(CACHE_MODE_1,
3453 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3454}
3455
3456static void g4x_init_clock_gating(struct drm_device *dev)
3457{
3458 struct drm_i915_private *dev_priv = dev->dev_private;
3459 uint32_t dspclk_gate;
3460
3461 I915_WRITE(RENCLK_GATE_D1, 0);
3462 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3463 GS_UNIT_CLOCK_GATE_DISABLE |
3464 CL_UNIT_CLOCK_GATE_DISABLE);
3465 I915_WRITE(RAMCLK_GATE_D, 0);
3466 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3467 OVRUNIT_CLOCK_GATE_DISABLE |
3468 OVCUNIT_CLOCK_GATE_DISABLE;
3469 if (IS_GM45(dev))
3470 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3471 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3472}
3473
3474static void crestline_init_clock_gating(struct drm_device *dev)
3475{
3476 struct drm_i915_private *dev_priv = dev->dev_private;
3477
3478 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3479 I915_WRITE(RENCLK_GATE_D2, 0);
3480 I915_WRITE(DSPCLK_GATE_D, 0);
3481 I915_WRITE(RAMCLK_GATE_D, 0);
3482 I915_WRITE16(DEUC, 0);
3483}
3484
3485static void broadwater_init_clock_gating(struct drm_device *dev)
3486{
3487 struct drm_i915_private *dev_priv = dev->dev_private;
3488
3489 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3490 I965_RCC_CLOCK_GATE_DISABLE |
3491 I965_RCPB_CLOCK_GATE_DISABLE |
3492 I965_ISC_CLOCK_GATE_DISABLE |
3493 I965_FBC_CLOCK_GATE_DISABLE);
3494 I915_WRITE(RENCLK_GATE_D2, 0);
3495}
3496
3497static void gen3_init_clock_gating(struct drm_device *dev)
3498{
3499 struct drm_i915_private *dev_priv = dev->dev_private;
3500 u32 dstate = I915_READ(D_STATE);
3501
3502 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3503 DSTATE_DOT_CLOCK_GATING;
3504 I915_WRITE(D_STATE, dstate);
3505
3506 if (IS_PINEVIEW(dev))
3507 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3508}
3509
3510static void i85x_init_clock_gating(struct drm_device *dev)
3511{
3512 struct drm_i915_private *dev_priv = dev->dev_private;
3513
3514 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3515}
3516
3517static void i830_init_clock_gating(struct drm_device *dev)
3518{
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520
3521 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3522}
3523
3524static void ibx_init_clock_gating(struct drm_device *dev)
3525{
3526 struct drm_i915_private *dev_priv = dev->dev_private;
3527
3528 /*
3529 * On Ibex Peak and Cougar Point, we need to disable clock
3530 * gating for the panel power sequencer or it will fail to
3531 * start up when no ports are active.
3532 */
3533 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3534}
3535
3536static void cpt_init_clock_gating(struct drm_device *dev)
3537{
3538 struct drm_i915_private *dev_priv = dev->dev_private;
3539 int pipe;
3540
3541 /*
3542 * On Ibex Peak and Cougar Point, we need to disable clock
3543 * gating for the panel power sequencer or it will fail to
3544 * start up when no ports are active.
3545 */
3546 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3547 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3548 DPLS_EDP_PPS_FIX_DIS);
3549 /* Without this, mode sets may fail silently on FDI */
3550 for_each_pipe(pipe)
3551 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3552}
3553
3554void intel_init_clock_gating(struct drm_device *dev)
3555{
3556 struct drm_i915_private *dev_priv = dev->dev_private;
3557
3558 dev_priv->display.init_clock_gating(dev);
3559
3560 if (dev_priv->display.init_pch_clock_gating)
3561 dev_priv->display.init_pch_clock_gating(dev);
3562}
3563
3564static void gen6_sanitize_pm(struct drm_device *dev)
3565{
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 u32 limits, delay, old;
3568
3569 gen6_gt_force_wake_get(dev_priv);
3570
3571 old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3572 /* Make sure we continue to get interrupts
3573 * until we hit the minimum or maximum frequencies.
3574 */
3575 limits &= ~(0x3f << 16 | 0x3f << 24);
3576 delay = dev_priv->cur_delay;
3577 if (delay < dev_priv->max_delay)
3578 limits |= (dev_priv->max_delay & 0x3f) << 24;
3579 if (delay > dev_priv->min_delay)
3580 limits |= (dev_priv->min_delay & 0x3f) << 16;
3581
3582 if (old != limits) {
3583 DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
3584 limits, old);
3585 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3586 }
3587
3588 gen6_gt_force_wake_put(dev_priv);
3589}
3590
3591void intel_sanitize_pm(struct drm_device *dev)
3592{
3593 struct drm_i915_private *dev_priv = dev->dev_private;
3594
3595 if (dev_priv->display.sanitize_pm)
3596 dev_priv->display.sanitize_pm(dev);
3597}
3598
3599/* Starting with Haswell, we have different power wells for
3600 * different parts of the GPU. This attempts to enable them all.
3601 */
3602void intel_init_power_wells(struct drm_device *dev)
3603{
3604 struct drm_i915_private *dev_priv = dev->dev_private;
3605 unsigned long power_wells[] = {
3606 HSW_PWR_WELL_CTL1,
3607 HSW_PWR_WELL_CTL2,
3608 HSW_PWR_WELL_CTL4
3609 };
3610 int i;
3611
3612 if (!IS_HASWELL(dev))
3613 return;
3614
3615 mutex_lock(&dev->struct_mutex);
3616
3617 for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
3618 int well = I915_READ(power_wells[i]);
3619
3620 if ((well & HSW_PWR_WELL_STATE) == 0) {
3621 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3622 if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3623 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3624 }
3625 }
3626
3627 mutex_unlock(&dev->struct_mutex);
3628}
3629
3630/* Set up chip specific power management-related functions */
3631void intel_init_pm(struct drm_device *dev)
3632{
3633 struct drm_i915_private *dev_priv = dev->dev_private;
3634
3635 if (I915_HAS_FBC(dev)) {
3636 if (HAS_PCH_SPLIT(dev)) {
3637 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3638 dev_priv->display.enable_fbc = ironlake_enable_fbc;
3639 dev_priv->display.disable_fbc = ironlake_disable_fbc;
3640 } else if (IS_GM45(dev)) {
3641 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3642 dev_priv->display.enable_fbc = g4x_enable_fbc;
3643 dev_priv->display.disable_fbc = g4x_disable_fbc;
3644 } else if (IS_CRESTLINE(dev)) {
3645 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3646 dev_priv->display.enable_fbc = i8xx_enable_fbc;
3647 dev_priv->display.disable_fbc = i8xx_disable_fbc;
3648 }
3649 /* 855GM needs testing */
3650 }
3651
3652 /* For cxsr */
3653 if (IS_PINEVIEW(dev))
3654 i915_pineview_get_mem_freq(dev);
3655 else if (IS_GEN5(dev))
3656 i915_ironlake_get_mem_freq(dev);
3657
3658 /* For FIFO watermark updates */
3659 if (HAS_PCH_SPLIT(dev)) {
3660 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
3661 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
3662
3663 /* IVB configs may use multi-threaded forcewake */
3664 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3665 u32 ecobus;
3666
3667 /* A small trick here - if the bios hasn't configured MT forcewake,
3668 * and if the device is in RC6, then force_wake_mt_get will not wake
3669 * the device and the ECOBUS read will return zero. Which will be
3670 * (correctly) interpreted by the test below as MT forcewake being
3671 * disabled.
3672 */
3673 mutex_lock(&dev->struct_mutex);
3674 __gen6_gt_force_wake_mt_get(dev_priv);
3675 ecobus = I915_READ_NOTRACE(ECOBUS);
3676 __gen6_gt_force_wake_mt_put(dev_priv);
3677 mutex_unlock(&dev->struct_mutex);
3678
3679 if (ecobus & FORCEWAKE_MT_ENABLE) {
3680 DRM_DEBUG_KMS("Using MT version of forcewake\n");
3681 dev_priv->display.force_wake_get =
3682 __gen6_gt_force_wake_mt_get;
3683 dev_priv->display.force_wake_put =
3684 __gen6_gt_force_wake_mt_put;
3685 }
3686 }
3687
3688 if (HAS_PCH_IBX(dev))
3689 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3690 else if (HAS_PCH_CPT(dev))
3691 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3692
3693 if (IS_GEN5(dev)) {
3694 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3695 dev_priv->display.update_wm = ironlake_update_wm;
3696 else {
3697 DRM_DEBUG_KMS("Failed to get proper latency. "
3698 "Disable CxSR\n");
3699 dev_priv->display.update_wm = NULL;
3700 }
3701 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3702 } else if (IS_GEN6(dev)) {
3703 if (SNB_READ_WM0_LATENCY()) {
3704 dev_priv->display.update_wm = sandybridge_update_wm;
3705 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3706 } else {
3707 DRM_DEBUG_KMS("Failed to read display plane latency. "
3708 "Disable CxSR\n");
3709 dev_priv->display.update_wm = NULL;
3710 }
3711 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3712 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3713 } else if (IS_IVYBRIDGE(dev)) {
3714 /* FIXME: detect B0+ stepping and use auto training */
3715 if (SNB_READ_WM0_LATENCY()) {
3716 dev_priv->display.update_wm = sandybridge_update_wm;
3717 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3718 } else {
3719 DRM_DEBUG_KMS("Failed to read display plane latency. "
3720 "Disable CxSR\n");
3721 dev_priv->display.update_wm = NULL;
3722 }
3723 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3724 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3725 } else if (IS_HASWELL(dev)) {
3726 if (SNB_READ_WM0_LATENCY()) {
3727 dev_priv->display.update_wm = sandybridge_update_wm;
3728 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3729 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3730 } else {
3731 DRM_DEBUG_KMS("Failed to read display plane latency. "
3732 "Disable CxSR\n");
3733 dev_priv->display.update_wm = NULL;
3734 }
3735 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3736 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3737 } else
3738 dev_priv->display.update_wm = NULL;
3739 } else if (IS_VALLEYVIEW(dev)) {
3740 dev_priv->display.update_wm = valleyview_update_wm;
3741 dev_priv->display.init_clock_gating =
3742 valleyview_init_clock_gating;
3743 dev_priv->display.force_wake_get = vlv_force_wake_get;
3744 dev_priv->display.force_wake_put = vlv_force_wake_put;
3745 } else if (IS_PINEVIEW(dev)) {
3746 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3747 dev_priv->is_ddr3,
3748 dev_priv->fsb_freq,
3749 dev_priv->mem_freq)) {
3750 DRM_INFO("failed to find known CxSR latency "
3751 "(found ddr%s fsb freq %d, mem freq %d), "
3752 "disabling CxSR\n",
3753 (dev_priv->is_ddr3 == 1) ? "3" : "2",
3754 dev_priv->fsb_freq, dev_priv->mem_freq);
3755 /* Disable CxSR and never update its watermark again */
3756 pineview_disable_cxsr(dev);
3757 dev_priv->display.update_wm = NULL;
3758 } else
3759 dev_priv->display.update_wm = pineview_update_wm;
3760 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3761 } else if (IS_G4X(dev)) {
3762 dev_priv->display.update_wm = g4x_update_wm;
3763 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3764 } else if (IS_GEN4(dev)) {
3765 dev_priv->display.update_wm = i965_update_wm;
3766 if (IS_CRESTLINE(dev))
3767 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3768 else if (IS_BROADWATER(dev))
3769 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3770 } else if (IS_GEN3(dev)) {
3771 dev_priv->display.update_wm = i9xx_update_wm;
3772 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3773 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3774 } else if (IS_I865G(dev)) {
3775 dev_priv->display.update_wm = i830_update_wm;
3776 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3777 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3778 } else if (IS_I85X(dev)) {
3779 dev_priv->display.update_wm = i9xx_update_wm;
3780 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3781 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3782 } else {
3783 dev_priv->display.update_wm = i830_update_wm;
3784 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3785 if (IS_845G(dev))
3786 dev_priv->display.get_fifo_size = i845_get_fifo_size;
3787 else
3788 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3789 }
3790
3791 /* We attempt to init the necessary power wells early in the initialization
3792 * time, so the subsystems that expect power to be enabled can work.
3793 */
3794 intel_init_power_wells(dev);
3795}
3796
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ede..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
53} 53}
54 54
55static int 55static int
56render_ring_flush(struct intel_ring_buffer *ring, 56gen2_render_ring_flush(struct intel_ring_buffer *ring,
57 u32 invalidate_domains, 57 u32 invalidate_domains,
58 u32 flush_domains) 58 u32 flush_domains)
59{
60 u32 cmd;
61 int ret;
62
63 cmd = MI_FLUSH;
64 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
65 cmd |= MI_NO_WRITE_FLUSH;
66
67 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
68 cmd |= MI_READ_FLUSH;
69
70 ret = intel_ring_begin(ring, 2);
71 if (ret)
72 return ret;
73
74 intel_ring_emit(ring, cmd);
75 intel_ring_emit(ring, MI_NOOP);
76 intel_ring_advance(ring);
77
78 return 0;
79}
80
81static int
82gen4_render_ring_flush(struct intel_ring_buffer *ring,
83 u32 invalidate_domains,
84 u32 flush_domains)
59{ 85{
60 struct drm_device *dev = ring->dev; 86 struct drm_device *dev = ring->dev;
61 u32 cmd; 87 u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
90 */ 116 */
91 117
92 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 118 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93 if ((invalidate_domains|flush_domains) & 119 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
94 I915_GEM_DOMAIN_RENDER)
95 cmd &= ~MI_NO_WRITE_FLUSH; 120 cmd &= ~MI_NO_WRITE_FLUSH;
96 if (INTEL_INFO(dev)->gen < 4) {
97 /*
98 * On the 965, the sampler cache always gets flushed
99 * and this bit is reserved.
100 */
101 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102 cmd |= MI_READ_FLUSH;
103 }
104 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105 cmd |= MI_EXE_FLUSH; 122 cmd |= MI_EXE_FLUSH;
106 123
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
290 | RING_VALID); 307 | RING_VALID);
291 308
292 /* If the head is still not zero, the ring is dead */ 309 /* If the head is still not zero, the ring is dead */
293 if ((I915_READ_CTL(ring) & RING_VALID) == 0 || 310 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
294 I915_READ_START(ring) != obj->gtt_offset || 311 I915_READ_START(ring) == obj->gtt_offset &&
295 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { 312 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
296 DRM_ERROR("%s initialization failed " 313 DRM_ERROR("%s initialization failed "
297 "ctl %08x head %08x tail %08x start %08x\n", 314 "ctl %08x head %08x tail %08x start %08x\n",
298 ring->name, 315 ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
384 int ret = init_ring_common(ring); 401 int ret = init_ring_common(ring);
385 402
386 if (INTEL_INFO(dev)->gen > 3) { 403 if (INTEL_INFO(dev)->gen > 3) {
387 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 404 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
388 I915_WRITE(MI_MODE, mode);
389 if (IS_GEN7(dev)) 405 if (IS_GEN7(dev))
390 I915_WRITE(GFX_MODE_GEN7, 406 I915_WRITE(GFX_MODE_GEN7,
391 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 407 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
392 GFX_MODE_ENABLE(GFX_REPLAY_MODE)); 408 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
393 } 409 }
394 410
395 if (INTEL_INFO(dev)->gen >= 5) { 411 if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
398 return ret; 414 return ret;
399 } 415 }
400 416
401
402 if (IS_GEN6(dev)) { 417 if (IS_GEN6(dev)) {
403 /* From the Sandybridge PRM, volume 1 part 3, page 24: 418 /* From the Sandybridge PRM, volume 1 part 3, page 24:
404 * "If this bit is set, STCunit will have LRA as replacement 419 * "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
406 * policy is not supported." 421 * policy is not supported."
407 */ 422 */
408 I915_WRITE(CACHE_MODE_0, 423 I915_WRITE(CACHE_MODE_0,
409 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); 424 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
410 } 425 }
411 426
412 if (INTEL_INFO(dev)->gen >= 6) { 427 if (INTEL_INFO(dev)->gen >= 6)
413 I915_WRITE(INSTPM, 428 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
414 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
415 }
416 429
417 return ret; 430 return ret;
418} 431}
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
483 * @seqno - seqno which the waiter will block on 496 * @seqno - seqno which the waiter will block on
484 */ 497 */
485static int 498static int
486intel_ring_sync(struct intel_ring_buffer *waiter, 499gen6_ring_sync(struct intel_ring_buffer *waiter,
487 struct intel_ring_buffer *signaller, 500 struct intel_ring_buffer *signaller,
488 int ring, 501 u32 seqno)
489 u32 seqno)
490{ 502{
491 int ret; 503 int ret;
492 u32 dw1 = MI_SEMAPHORE_MBOX | 504 u32 dw1 = MI_SEMAPHORE_MBOX |
493 MI_SEMAPHORE_COMPARE | 505 MI_SEMAPHORE_COMPARE |
494 MI_SEMAPHORE_REGISTER; 506 MI_SEMAPHORE_REGISTER;
495 507
508 /* Throughout all of the GEM code, seqno passed implies our current
509 * seqno is >= the last seqno executed. However for hardware the
510 * comparison is strictly greater than.
511 */
512 seqno -= 1;
513
514 WARN_ON(signaller->semaphore_register[waiter->id] ==
515 MI_SEMAPHORE_SYNC_INVALID);
516
496 ret = intel_ring_begin(waiter, 4); 517 ret = intel_ring_begin(waiter, 4);
497 if (ret) 518 if (ret)
498 return ret; 519 return ret;
499 520
500 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); 521 intel_ring_emit(waiter,
522 dw1 | signaller->semaphore_register[waiter->id]);
501 intel_ring_emit(waiter, seqno); 523 intel_ring_emit(waiter, seqno);
502 intel_ring_emit(waiter, 0); 524 intel_ring_emit(waiter, 0);
503 intel_ring_emit(waiter, MI_NOOP); 525 intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
506 return 0; 528 return 0;
507} 529}
508 530
509/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
510int
511render_ring_sync_to(struct intel_ring_buffer *waiter,
512 struct intel_ring_buffer *signaller,
513 u32 seqno)
514{
515 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
516 return intel_ring_sync(waiter,
517 signaller,
518 RCS,
519 seqno);
520}
521
522/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
523int
524gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
525 struct intel_ring_buffer *signaller,
526 u32 seqno)
527{
528 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
529 return intel_ring_sync(waiter,
530 signaller,
531 VCS,
532 seqno);
533}
534
535/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
536int
537gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
538 struct intel_ring_buffer *signaller,
539 u32 seqno)
540{
541 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
542 return intel_ring_sync(waiter,
543 signaller,
544 BCS,
545 seqno);
546}
547
548
549
550#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 531#define PIPE_CONTROL_FLUSH(ring__, addr__) \
551do { \ 532do { \
552 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 533 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
608 return 0; 589 return 0;
609} 590}
610 591
611static int
612render_ring_add_request(struct intel_ring_buffer *ring,
613 u32 *result)
614{
615 u32 seqno = i915_gem_next_request_seqno(ring);
616 int ret;
617
618 ret = intel_ring_begin(ring, 4);
619 if (ret)
620 return ret;
621
622 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
623 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
624 intel_ring_emit(ring, seqno);
625 intel_ring_emit(ring, MI_USER_INTERRUPT);
626 intel_ring_advance(ring);
627
628 *result = seqno;
629 return 0;
630}
631
632static u32 592static u32
633gen6_ring_get_seqno(struct intel_ring_buffer *ring) 593gen6_ring_get_seqno(struct intel_ring_buffer *ring)
634{ 594{
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
655 return pc->cpu_page[0]; 615 return pc->cpu_page[0];
656} 616}
657 617
658static void 618static bool
659ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 619gen5_ring_get_irq(struct intel_ring_buffer *ring)
660{ 620{
661 dev_priv->gt_irq_mask &= ~mask; 621 struct drm_device *dev = ring->dev;
662 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 622 drm_i915_private_t *dev_priv = dev->dev_private;
663 POSTING_READ(GTIMR); 623 unsigned long flags;
624
625 if (!dev->irq_enabled)
626 return false;
627
628 spin_lock_irqsave(&dev_priv->irq_lock, flags);
629 if (ring->irq_refcount++ == 0) {
630 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
632 POSTING_READ(GTIMR);
633 }
634 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
635
636 return true;
664} 637}
665 638
666static void 639static void
667ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 640gen5_ring_put_irq(struct intel_ring_buffer *ring)
668{ 641{
669 dev_priv->gt_irq_mask |= mask; 642 struct drm_device *dev = ring->dev;
670 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 643 drm_i915_private_t *dev_priv = dev->dev_private;
671 POSTING_READ(GTIMR); 644 unsigned long flags;
645
646 spin_lock_irqsave(&dev_priv->irq_lock, flags);
647 if (--ring->irq_refcount == 0) {
648 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
649 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
650 POSTING_READ(GTIMR);
651 }
652 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
672} 653}
673 654
674static void 655static bool
675i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 656i9xx_ring_get_irq(struct intel_ring_buffer *ring)
676{ 657{
677 dev_priv->irq_mask &= ~mask; 658 struct drm_device *dev = ring->dev;
678 I915_WRITE(IMR, dev_priv->irq_mask); 659 drm_i915_private_t *dev_priv = dev->dev_private;
679 POSTING_READ(IMR); 660 unsigned long flags;
661
662 if (!dev->irq_enabled)
663 return false;
664
665 spin_lock_irqsave(&dev_priv->irq_lock, flags);
666 if (ring->irq_refcount++ == 0) {
667 dev_priv->irq_mask &= ~ring->irq_enable_mask;
668 I915_WRITE(IMR, dev_priv->irq_mask);
669 POSTING_READ(IMR);
670 }
671 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
672
673 return true;
680} 674}
681 675
682static void 676static void
683i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 677i9xx_ring_put_irq(struct intel_ring_buffer *ring)
684{ 678{
685 dev_priv->irq_mask |= mask; 679 struct drm_device *dev = ring->dev;
686 I915_WRITE(IMR, dev_priv->irq_mask); 680 drm_i915_private_t *dev_priv = dev->dev_private;
687 POSTING_READ(IMR); 681 unsigned long flags;
682
683 spin_lock_irqsave(&dev_priv->irq_lock, flags);
684 if (--ring->irq_refcount == 0) {
685 dev_priv->irq_mask |= ring->irq_enable_mask;
686 I915_WRITE(IMR, dev_priv->irq_mask);
687 POSTING_READ(IMR);
688 }
689 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
688} 690}
689 691
690static bool 692static bool
691render_ring_get_irq(struct intel_ring_buffer *ring) 693i8xx_ring_get_irq(struct intel_ring_buffer *ring)
692{ 694{
693 struct drm_device *dev = ring->dev; 695 struct drm_device *dev = ring->dev;
694 drm_i915_private_t *dev_priv = dev->dev_private; 696 drm_i915_private_t *dev_priv = dev->dev_private;
697 unsigned long flags;
695 698
696 if (!dev->irq_enabled) 699 if (!dev->irq_enabled)
697 return false; 700 return false;
698 701
699 spin_lock(&ring->irq_lock); 702 spin_lock_irqsave(&dev_priv->irq_lock, flags);
700 if (ring->irq_refcount++ == 0) { 703 if (ring->irq_refcount++ == 0) {
701 if (HAS_PCH_SPLIT(dev)) 704 dev_priv->irq_mask &= ~ring->irq_enable_mask;
702 ironlake_enable_irq(dev_priv, 705 I915_WRITE16(IMR, dev_priv->irq_mask);
703 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 706 POSTING_READ16(IMR);
704 else
705 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
706 } 707 }
707 spin_unlock(&ring->irq_lock); 708 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
708 709
709 return true; 710 return true;
710} 711}
711 712
712static void 713static void
713render_ring_put_irq(struct intel_ring_buffer *ring) 714i8xx_ring_put_irq(struct intel_ring_buffer *ring)
714{ 715{
715 struct drm_device *dev = ring->dev; 716 struct drm_device *dev = ring->dev;
716 drm_i915_private_t *dev_priv = dev->dev_private; 717 drm_i915_private_t *dev_priv = dev->dev_private;
718 unsigned long flags;
717 719
718 spin_lock(&ring->irq_lock); 720 spin_lock_irqsave(&dev_priv->irq_lock, flags);
719 if (--ring->irq_refcount == 0) { 721 if (--ring->irq_refcount == 0) {
720 if (HAS_PCH_SPLIT(dev)) 722 dev_priv->irq_mask |= ring->irq_enable_mask;
721 ironlake_disable_irq(dev_priv, 723 I915_WRITE16(IMR, dev_priv->irq_mask);
722 GT_USER_INTERRUPT | 724 POSTING_READ16(IMR);
723 GT_PIPE_NOTIFY);
724 else
725 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
726 } 725 }
727 spin_unlock(&ring->irq_lock); 726 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
728} 727}
729 728
730void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 729void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
776} 775}
777 776
778static int 777static int
779ring_add_request(struct intel_ring_buffer *ring, 778i9xx_add_request(struct intel_ring_buffer *ring,
780 u32 *result) 779 u32 *result)
781{ 780{
782 u32 seqno; 781 u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
799} 798}
800 799
801static bool 800static bool
802gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 801gen6_ring_get_irq(struct intel_ring_buffer *ring)
803{ 802{
804 struct drm_device *dev = ring->dev; 803 struct drm_device *dev = ring->dev;
805 drm_i915_private_t *dev_priv = dev->dev_private; 804 drm_i915_private_t *dev_priv = dev->dev_private;
805 unsigned long flags;
806 806
807 if (!dev->irq_enabled) 807 if (!dev->irq_enabled)
808 return false; 808 return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
812 * blt/bsd rings on ivb. */ 812 * blt/bsd rings on ivb. */
813 gen6_gt_force_wake_get(dev_priv); 813 gen6_gt_force_wake_get(dev_priv);
814 814
815 spin_lock(&ring->irq_lock); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
816 if (ring->irq_refcount++ == 0) { 816 if (ring->irq_refcount++ == 0) {
817 ring->irq_mask &= ~rflag; 817 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
818 I915_WRITE_IMR(ring, ring->irq_mask); 818 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
819 ironlake_enable_irq(dev_priv, gflag); 819 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
820 POSTING_READ(GTIMR);
820 } 821 }
821 spin_unlock(&ring->irq_lock); 822 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
822 823
823 return true; 824 return true;
824} 825}
825 826
826static void 827static void
827gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 828gen6_ring_put_irq(struct intel_ring_buffer *ring)
828{ 829{
829 struct drm_device *dev = ring->dev; 830 struct drm_device *dev = ring->dev;
830 drm_i915_private_t *dev_priv = dev->dev_private; 831 drm_i915_private_t *dev_priv = dev->dev_private;
832 unsigned long flags;
831 833
832 spin_lock(&ring->irq_lock); 834 spin_lock_irqsave(&dev_priv->irq_lock, flags);
833 if (--ring->irq_refcount == 0) { 835 if (--ring->irq_refcount == 0) {
834 ring->irq_mask |= rflag; 836 I915_WRITE_IMR(ring, ~0);
835 I915_WRITE_IMR(ring, ring->irq_mask); 837 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
836 ironlake_disable_irq(dev_priv, gflag); 838 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
839 POSTING_READ(GTIMR);
837 } 840 }
838 spin_unlock(&ring->irq_lock); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
839 842
840 gen6_gt_force_wake_put(dev_priv); 843 gen6_gt_force_wake_put(dev_priv);
841} 844}
842 845
843static bool 846static int
844bsd_ring_get_irq(struct intel_ring_buffer *ring) 847i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
845{ 848{
846 struct drm_device *dev = ring->dev; 849 int ret;
847 drm_i915_private_t *dev_priv = dev->dev_private;
848
849 if (!dev->irq_enabled)
850 return false;
851 850
852 spin_lock(&ring->irq_lock); 851 ret = intel_ring_begin(ring, 2);
853 if (ring->irq_refcount++ == 0) { 852 if (ret)
854 if (IS_G4X(dev)) 853 return ret;
855 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
856 else
857 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
858 }
859 spin_unlock(&ring->irq_lock);
860 854
861 return true; 855 intel_ring_emit(ring,
862} 856 MI_BATCH_BUFFER_START |
863static void 857 MI_BATCH_GTT |
864bsd_ring_put_irq(struct intel_ring_buffer *ring) 858 MI_BATCH_NON_SECURE_I965);
865{ 859 intel_ring_emit(ring, offset);
866 struct drm_device *dev = ring->dev; 860 intel_ring_advance(ring);
867 drm_i915_private_t *dev_priv = dev->dev_private;
868 861
869 spin_lock(&ring->irq_lock); 862 return 0;
870 if (--ring->irq_refcount == 0) {
871 if (IS_G4X(dev))
872 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
873 else
874 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
875 }
876 spin_unlock(&ring->irq_lock);
877} 863}
878 864
879static int 865static int
880ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 866i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
867 u32 offset, u32 len)
881{ 868{
882 int ret; 869 int ret;
883 870
884 ret = intel_ring_begin(ring, 2); 871 ret = intel_ring_begin(ring, 4);
885 if (ret) 872 if (ret)
886 return ret; 873 return ret;
887 874
888 intel_ring_emit(ring, 875 intel_ring_emit(ring, MI_BATCH_BUFFER);
889 MI_BATCH_BUFFER_START | (2 << 6) | 876 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
890 MI_BATCH_NON_SECURE_I965); 877 intel_ring_emit(ring, offset + len - 8);
891 intel_ring_emit(ring, offset); 878 intel_ring_emit(ring, 0);
892 intel_ring_advance(ring); 879 intel_ring_advance(ring);
893 880
894 return 0; 881 return 0;
895} 882}
896 883
897static int 884static int
898render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 885i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
899 u32 offset, u32 len) 886 u32 offset, u32 len)
900{ 887{
901 struct drm_device *dev = ring->dev;
902 int ret; 888 int ret;
903 889
904 if (IS_I830(dev) || IS_845G(dev)) { 890 ret = intel_ring_begin(ring, 2);
905 ret = intel_ring_begin(ring, 4); 891 if (ret)
906 if (ret) 892 return ret;
907 return ret;
908
909 intel_ring_emit(ring, MI_BATCH_BUFFER);
910 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
911 intel_ring_emit(ring, offset + len - 8);
912 intel_ring_emit(ring, 0);
913 } else {
914 ret = intel_ring_begin(ring, 2);
915 if (ret)
916 return ret;
917 893
918 if (INTEL_INFO(dev)->gen >= 4) { 894 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
919 intel_ring_emit(ring, 895 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
920 MI_BATCH_BUFFER_START | (2 << 6) |
921 MI_BATCH_NON_SECURE_I965);
922 intel_ring_emit(ring, offset);
923 } else {
924 intel_ring_emit(ring,
925 MI_BATCH_BUFFER_START | (2 << 6));
926 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
927 }
928 }
929 intel_ring_advance(ring); 896 intel_ring_advance(ring);
930 897
931 return 0; 898 return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
933 900
934static void cleanup_status_page(struct intel_ring_buffer *ring) 901static void cleanup_status_page(struct intel_ring_buffer *ring)
935{ 902{
936 drm_i915_private_t *dev_priv = ring->dev->dev_private;
937 struct drm_i915_gem_object *obj; 903 struct drm_i915_gem_object *obj;
938 904
939 obj = ring->status_page.obj; 905 obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
944 i915_gem_object_unpin(obj); 910 i915_gem_object_unpin(obj);
945 drm_gem_object_unreference(&obj->base); 911 drm_gem_object_unreference(&obj->base);
946 ring->status_page.obj = NULL; 912 ring->status_page.obj = NULL;
947
948 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
949} 913}
950 914
951static int init_status_page(struct intel_ring_buffer *ring) 915static int init_status_page(struct intel_ring_buffer *ring)
952{ 916{
953 struct drm_device *dev = ring->dev; 917 struct drm_device *dev = ring->dev;
954 drm_i915_private_t *dev_priv = dev->dev_private;
955 struct drm_i915_gem_object *obj; 918 struct drm_i915_gem_object *obj;
956 int ret; 919 int ret;
957 920
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
972 ring->status_page.gfx_addr = obj->gtt_offset; 935 ring->status_page.gfx_addr = obj->gtt_offset;
973 ring->status_page.page_addr = kmap(obj->pages[0]); 936 ring->status_page.page_addr = kmap(obj->pages[0]);
974 if (ring->status_page.page_addr == NULL) { 937 if (ring->status_page.page_addr == NULL) {
975 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
976 goto err_unpin; 938 goto err_unpin;
977 } 939 }
978 ring->status_page.obj = obj; 940 ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
992 return ret; 954 return ret;
993} 955}
994 956
995int intel_init_ring_buffer(struct drm_device *dev, 957static int intel_init_ring_buffer(struct drm_device *dev,
996 struct intel_ring_buffer *ring) 958 struct intel_ring_buffer *ring)
997{ 959{
998 struct drm_i915_gem_object *obj; 960 struct drm_i915_gem_object *obj;
999 int ret; 961 int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
1002 INIT_LIST_HEAD(&ring->active_list); 964 INIT_LIST_HEAD(&ring->active_list);
1003 INIT_LIST_HEAD(&ring->request_list); 965 INIT_LIST_HEAD(&ring->request_list);
1004 INIT_LIST_HEAD(&ring->gpu_write_list); 966 INIT_LIST_HEAD(&ring->gpu_write_list);
967 ring->size = 32 * PAGE_SIZE;
1005 968
1006 init_waitqueue_head(&ring->irq_queue); 969 init_waitqueue_head(&ring->irq_queue);
1007 spin_lock_init(&ring->irq_lock);
1008 ring->irq_mask = ~0;
1009 970
1010 if (I915_NEED_GFX_HWS(dev)) { 971 if (I915_NEED_GFX_HWS(dev)) {
1011 ret = init_status_page(ring); 972 ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
1026 if (ret) 987 if (ret)
1027 goto err_unref; 988 goto err_unref;
1028 989
1029 ring->map.size = ring->size; 990 ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
1030 ring->map.offset = dev->agp->base + obj->gtt_offset; 991 ring->size);
1031 ring->map.type = 0; 992 if (ring->virtual_start == NULL) {
1032 ring->map.flags = 0;
1033 ring->map.mtrr = 0;
1034
1035 drm_core_ioremap_wc(&ring->map, dev);
1036 if (ring->map.handle == NULL) {
1037 DRM_ERROR("Failed to map ringbuffer.\n"); 993 DRM_ERROR("Failed to map ringbuffer.\n");
1038 ret = -EINVAL; 994 ret = -EINVAL;
1039 goto err_unpin; 995 goto err_unpin;
1040 } 996 }
1041 997
1042 ring->virtual_start = ring->map.handle;
1043 ret = ring->init(ring); 998 ret = ring->init(ring);
1044 if (ret) 999 if (ret)
1045 goto err_unmap; 1000 goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
1055 return 0; 1010 return 0;
1056 1011
1057err_unmap: 1012err_unmap:
1058 drm_core_ioremapfree(&ring->map, dev); 1013 iounmap(ring->virtual_start);
1059err_unpin: 1014err_unpin:
1060 i915_gem_object_unpin(obj); 1015 i915_gem_object_unpin(obj);
1061err_unref: 1016err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1083 1038
1084 I915_WRITE_CTL(ring, 0); 1039 I915_WRITE_CTL(ring, 0);
1085 1040
1086 drm_core_ioremapfree(&ring->map, ring->dev); 1041 iounmap(ring->virtual_start);
1087 1042
1088 i915_gem_object_unpin(ring->obj); 1043 i915_gem_object_unpin(ring->obj);
1089 drm_gem_object_unreference(&ring->obj->base); 1044 drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1097 1052
1098static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1053static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1099{ 1054{
1100 unsigned int *virt; 1055 uint32_t __iomem *virt;
1101 int rem = ring->size - ring->tail; 1056 int rem = ring->size - ring->tail;
1102 1057
1103 if (ring->space < rem) { 1058 if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1106 return ret; 1061 return ret;
1107 } 1062 }
1108 1063
1109 virt = (unsigned int *)(ring->virtual_start + ring->tail); 1064 virt = ring->virtual_start + ring->tail;
1110 rem /= 8; 1065 rem /= 4;
1111 while (rem--) { 1066 while (rem--)
1112 *virt++ = MI_NOOP; 1067 iowrite32(MI_NOOP, virt++);
1113 *virt++ = MI_NOOP;
1114 }
1115 1068
1116 ring->tail = 0; 1069 ring->tail = 0;
1117 ring->space = ring_space(ring); 1070 ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1132 was_interruptible = dev_priv->mm.interruptible; 1085 was_interruptible = dev_priv->mm.interruptible;
1133 dev_priv->mm.interruptible = false; 1086 dev_priv->mm.interruptible = false;
1134 1087
1135 ret = i915_wait_request(ring, seqno, true); 1088 ret = i915_wait_request(ring, seqno);
1136 1089
1137 dev_priv->mm.interruptible = was_interruptible; 1090 dev_priv->mm.interruptible = was_interruptible;
1091 if (!ret)
1092 i915_gem_retire_requests_ring(ring);
1138 1093
1139 return ret; 1094 return ret;
1140} 1095}
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1208 return ret; 1163 return ret;
1209 1164
1210 trace_i915_ring_wait_begin(ring); 1165 trace_i915_ring_wait_begin(ring);
1211 if (drm_core_check_feature(dev, DRIVER_GEM)) 1166 /* With GEM the hangcheck timer should kick us out of the loop,
1212 /* With GEM the hangcheck timer should kick us out of the loop, 1167 * leaving it early runs the risk of corrupting GEM state (due
1213 * leaving it early runs the risk of corrupting GEM state (due 1168 * to running on almost untested codepaths). But on resume
1214 * to running on almost untested codepaths). But on resume 1169 * timers don't work yet, so prevent a complete hang in that
1215 * timers don't work yet, so prevent a complete hang in that 1170 * case by choosing an insanely large timeout. */
1216 * case by choosing an insanely large timeout. */ 1171 end = jiffies + 60 * HZ;
1217 end = jiffies + 60 * HZ;
1218 else
1219 end = jiffies + 3 * HZ;
1220 1172
1221 do { 1173 do {
1222 ring->head = I915_READ_HEAD(ring); 1174 ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1268 1220
1269void intel_ring_advance(struct intel_ring_buffer *ring) 1221void intel_ring_advance(struct intel_ring_buffer *ring)
1270{ 1222{
1223 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1224
1271 ring->tail &= ring->size - 1; 1225 ring->tail &= ring->size - 1;
1226 if (dev_priv->stop_rings & intel_ring_flag(ring))
1227 return;
1272 ring->write_tail(ring, ring->tail); 1228 ring->write_tail(ring, ring->tail);
1273} 1229}
1274 1230
1275static const struct intel_ring_buffer render_ring = {
1276 .name = "render ring",
1277 .id = RCS,
1278 .mmio_base = RENDER_RING_BASE,
1279 .size = 32 * PAGE_SIZE,
1280 .init = init_render_ring,
1281 .write_tail = ring_write_tail,
1282 .flush = render_ring_flush,
1283 .add_request = render_ring_add_request,
1284 .get_seqno = ring_get_seqno,
1285 .irq_get = render_ring_get_irq,
1286 .irq_put = render_ring_put_irq,
1287 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1288 .cleanup = render_ring_cleanup,
1289 .sync_to = render_ring_sync_to,
1290 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1291 MI_SEMAPHORE_SYNC_RV,
1292 MI_SEMAPHORE_SYNC_RB},
1293 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1294};
1295
1296/* ring buffer for bit-stream decoder */
1297
1298static const struct intel_ring_buffer bsd_ring = {
1299 .name = "bsd ring",
1300 .id = VCS,
1301 .mmio_base = BSD_RING_BASE,
1302 .size = 32 * PAGE_SIZE,
1303 .init = init_ring_common,
1304 .write_tail = ring_write_tail,
1305 .flush = bsd_ring_flush,
1306 .add_request = ring_add_request,
1307 .get_seqno = ring_get_seqno,
1308 .irq_get = bsd_ring_get_irq,
1309 .irq_put = bsd_ring_put_irq,
1310 .dispatch_execbuffer = ring_dispatch_execbuffer,
1311};
1312
1313 1231
1314static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1232static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1315 u32 value) 1233 u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1372 return 0; 1290 return 0;
1373} 1291}
1374 1292
1375static bool
1376gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1377{
1378 return gen6_ring_get_irq(ring,
1379 GT_USER_INTERRUPT,
1380 GEN6_RENDER_USER_INTERRUPT);
1381}
1382
1383static void
1384gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1385{
1386 return gen6_ring_put_irq(ring,
1387 GT_USER_INTERRUPT,
1388 GEN6_RENDER_USER_INTERRUPT);
1389}
1390
1391static bool
1392gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1393{
1394 return gen6_ring_get_irq(ring,
1395 GT_GEN6_BSD_USER_INTERRUPT,
1396 GEN6_BSD_USER_INTERRUPT);
1397}
1398
1399static void
1400gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1401{
1402 return gen6_ring_put_irq(ring,
1403 GT_GEN6_BSD_USER_INTERRUPT,
1404 GEN6_BSD_USER_INTERRUPT);
1405}
1406
1407/* ring buffer for Video Codec for Gen6+ */
1408static const struct intel_ring_buffer gen6_bsd_ring = {
1409 .name = "gen6 bsd ring",
1410 .id = VCS,
1411 .mmio_base = GEN6_BSD_RING_BASE,
1412 .size = 32 * PAGE_SIZE,
1413 .init = init_ring_common,
1414 .write_tail = gen6_bsd_ring_write_tail,
1415 .flush = gen6_ring_flush,
1416 .add_request = gen6_add_request,
1417 .get_seqno = gen6_ring_get_seqno,
1418 .irq_get = gen6_bsd_ring_get_irq,
1419 .irq_put = gen6_bsd_ring_put_irq,
1420 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1421 .sync_to = gen6_bsd_ring_sync_to,
1422 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1423 MI_SEMAPHORE_SYNC_INVALID,
1424 MI_SEMAPHORE_SYNC_VB},
1425 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1426};
1427
1428/* Blitter support (SandyBridge+) */ 1293/* Blitter support (SandyBridge+) */
1429 1294
1430static bool
1431blt_ring_get_irq(struct intel_ring_buffer *ring)
1432{
1433 return gen6_ring_get_irq(ring,
1434 GT_BLT_USER_INTERRUPT,
1435 GEN6_BLITTER_USER_INTERRUPT);
1436}
1437
1438static void
1439blt_ring_put_irq(struct intel_ring_buffer *ring)
1440{
1441 gen6_ring_put_irq(ring,
1442 GT_BLT_USER_INTERRUPT,
1443 GEN6_BLITTER_USER_INTERRUPT);
1444}
1445
1446static int blt_ring_flush(struct intel_ring_buffer *ring, 1295static int blt_ring_flush(struct intel_ring_buffer *ring,
1447 u32 invalidate, u32 flush) 1296 u32 invalidate, u32 flush)
1448{ 1297{
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1464 return 0; 1313 return 0;
1465} 1314}
1466 1315
1467static const struct intel_ring_buffer gen6_blt_ring = {
1468 .name = "blt ring",
1469 .id = BCS,
1470 .mmio_base = BLT_RING_BASE,
1471 .size = 32 * PAGE_SIZE,
1472 .init = init_ring_common,
1473 .write_tail = ring_write_tail,
1474 .flush = blt_ring_flush,
1475 .add_request = gen6_add_request,
1476 .get_seqno = gen6_ring_get_seqno,
1477 .irq_get = blt_ring_get_irq,
1478 .irq_put = blt_ring_put_irq,
1479 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1480 .sync_to = gen6_blt_ring_sync_to,
1481 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1482 MI_SEMAPHORE_SYNC_BV,
1483 MI_SEMAPHORE_SYNC_INVALID},
1484 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1485};
1486
1487int intel_init_render_ring_buffer(struct drm_device *dev) 1316int intel_init_render_ring_buffer(struct drm_device *dev)
1488{ 1317{
1489 drm_i915_private_t *dev_priv = dev->dev_private; 1318 drm_i915_private_t *dev_priv = dev->dev_private;
1490 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1319 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1491 1320
1492 *ring = render_ring; 1321 ring->name = "render ring";
1322 ring->id = RCS;
1323 ring->mmio_base = RENDER_RING_BASE;
1324
1493 if (INTEL_INFO(dev)->gen >= 6) { 1325 if (INTEL_INFO(dev)->gen >= 6) {
1494 ring->add_request = gen6_add_request; 1326 ring->add_request = gen6_add_request;
1495 ring->flush = gen6_render_ring_flush; 1327 ring->flush = gen6_render_ring_flush;
1496 ring->irq_get = gen6_render_ring_get_irq; 1328 ring->irq_get = gen6_ring_get_irq;
1497 ring->irq_put = gen6_render_ring_put_irq; 1329 ring->irq_put = gen6_ring_put_irq;
1330 ring->irq_enable_mask = GT_USER_INTERRUPT;
1498 ring->get_seqno = gen6_ring_get_seqno; 1331 ring->get_seqno = gen6_ring_get_seqno;
1332 ring->sync_to = gen6_ring_sync;
1333 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1334 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1335 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1336 ring->signal_mbox[0] = GEN6_VRSYNC;
1337 ring->signal_mbox[1] = GEN6_BRSYNC;
1499 } else if (IS_GEN5(dev)) { 1338 } else if (IS_GEN5(dev)) {
1500 ring->add_request = pc_render_add_request; 1339 ring->add_request = pc_render_add_request;
1340 ring->flush = gen4_render_ring_flush;
1501 ring->get_seqno = pc_render_get_seqno; 1341 ring->get_seqno = pc_render_get_seqno;
1342 ring->irq_get = gen5_ring_get_irq;
1343 ring->irq_put = gen5_ring_put_irq;
1344 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1345 } else {
1346 ring->add_request = i9xx_add_request;
1347 if (INTEL_INFO(dev)->gen < 4)
1348 ring->flush = gen2_render_ring_flush;
1349 else
1350 ring->flush = gen4_render_ring_flush;
1351 ring->get_seqno = ring_get_seqno;
1352 if (IS_GEN2(dev)) {
1353 ring->irq_get = i8xx_ring_get_irq;
1354 ring->irq_put = i8xx_ring_put_irq;
1355 } else {
1356 ring->irq_get = i9xx_ring_get_irq;
1357 ring->irq_put = i9xx_ring_put_irq;
1358 }
1359 ring->irq_enable_mask = I915_USER_INTERRUPT;
1502 } 1360 }
1361 ring->write_tail = ring_write_tail;
1362 if (INTEL_INFO(dev)->gen >= 6)
1363 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1364 else if (INTEL_INFO(dev)->gen >= 4)
1365 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1366 else if (IS_I830(dev) || IS_845G(dev))
1367 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1368 else
1369 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1370 ring->init = init_render_ring;
1371 ring->cleanup = render_ring_cleanup;
1372
1503 1373
1504 if (!I915_NEED_GFX_HWS(dev)) { 1374 if (!I915_NEED_GFX_HWS(dev)) {
1505 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1375 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1514 drm_i915_private_t *dev_priv = dev->dev_private; 1384 drm_i915_private_t *dev_priv = dev->dev_private;
1515 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1385 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1516 1386
1517 *ring = render_ring; 1387 ring->name = "render ring";
1388 ring->id = RCS;
1389 ring->mmio_base = RENDER_RING_BASE;
1390
1518 if (INTEL_INFO(dev)->gen >= 6) { 1391 if (INTEL_INFO(dev)->gen >= 6) {
1519 ring->add_request = gen6_add_request; 1392 /* non-kms not supported on gen6+ */
1520 ring->irq_get = gen6_render_ring_get_irq; 1393 return -ENODEV;
1521 ring->irq_put = gen6_render_ring_put_irq; 1394 }
1522 } else if (IS_GEN5(dev)) { 1395
1523 ring->add_request = pc_render_add_request; 1396 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1524 ring->get_seqno = pc_render_get_seqno; 1397 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1398 * the special gen5 functions. */
1399 ring->add_request = i9xx_add_request;
1400 if (INTEL_INFO(dev)->gen < 4)
1401 ring->flush = gen2_render_ring_flush;
1402 else
1403 ring->flush = gen4_render_ring_flush;
1404 ring->get_seqno = ring_get_seqno;
1405 if (IS_GEN2(dev)) {
1406 ring->irq_get = i8xx_ring_get_irq;
1407 ring->irq_put = i8xx_ring_put_irq;
1408 } else {
1409 ring->irq_get = i9xx_ring_get_irq;
1410 ring->irq_put = i9xx_ring_put_irq;
1525 } 1411 }
1412 ring->irq_enable_mask = I915_USER_INTERRUPT;
1413 ring->write_tail = ring_write_tail;
1414 if (INTEL_INFO(dev)->gen >= 4)
1415 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1416 else if (IS_I830(dev) || IS_845G(dev))
1417 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1418 else
1419 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1420 ring->init = init_render_ring;
1421 ring->cleanup = render_ring_cleanup;
1526 1422
1527 if (!I915_NEED_GFX_HWS(dev)) 1423 if (!I915_NEED_GFX_HWS(dev))
1528 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1424 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1537 if (IS_I830(ring->dev)) 1433 if (IS_I830(ring->dev))
1538 ring->effective_size -= 128; 1434 ring->effective_size -= 128;
1539 1435
1540 ring->map.offset = start; 1436 ring->virtual_start = ioremap_wc(start, size);
1541 ring->map.size = size; 1437 if (ring->virtual_start == NULL) {
1542 ring->map.type = 0;
1543 ring->map.flags = 0;
1544 ring->map.mtrr = 0;
1545
1546 drm_core_ioremap_wc(&ring->map, dev);
1547 if (ring->map.handle == NULL) {
1548 DRM_ERROR("can not ioremap virtual address for" 1438 DRM_ERROR("can not ioremap virtual address for"
1549 " ring buffer\n"); 1439 " ring buffer\n");
1550 return -ENOMEM; 1440 return -ENOMEM;
1551 } 1441 }
1552 1442
1553 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1554 return 0; 1443 return 0;
1555} 1444}
1556 1445
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1559 drm_i915_private_t *dev_priv = dev->dev_private; 1448 drm_i915_private_t *dev_priv = dev->dev_private;
1560 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1449 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1561 1450
1562 if (IS_GEN6(dev) || IS_GEN7(dev)) 1451 ring->name = "bsd ring";
1563 *ring = gen6_bsd_ring; 1452 ring->id = VCS;
1564 else 1453
1565 *ring = bsd_ring; 1454 ring->write_tail = ring_write_tail;
1455 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1456 ring->mmio_base = GEN6_BSD_RING_BASE;
1457 /* gen6 bsd needs a special wa for tail updates */
1458 if (IS_GEN6(dev))
1459 ring->write_tail = gen6_bsd_ring_write_tail;
1460 ring->flush = gen6_ring_flush;
1461 ring->add_request = gen6_add_request;
1462 ring->get_seqno = gen6_ring_get_seqno;
1463 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1464 ring->irq_get = gen6_ring_get_irq;
1465 ring->irq_put = gen6_ring_put_irq;
1466 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1467 ring->sync_to = gen6_ring_sync;
1468 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1469 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1470 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1471 ring->signal_mbox[0] = GEN6_RVSYNC;
1472 ring->signal_mbox[1] = GEN6_BVSYNC;
1473 } else {
1474 ring->mmio_base = BSD_RING_BASE;
1475 ring->flush = bsd_ring_flush;
1476 ring->add_request = i9xx_add_request;
1477 ring->get_seqno = ring_get_seqno;
1478 if (IS_GEN5(dev)) {
1479 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1480 ring->irq_get = gen5_ring_get_irq;
1481 ring->irq_put = gen5_ring_put_irq;
1482 } else {
1483 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1484 ring->irq_get = i9xx_ring_get_irq;
1485 ring->irq_put = i9xx_ring_put_irq;
1486 }
1487 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1488 }
1489 ring->init = init_ring_common;
1490
1566 1491
1567 return intel_init_ring_buffer(dev, ring); 1492 return intel_init_ring_buffer(dev, ring);
1568} 1493}
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1572 drm_i915_private_t *dev_priv = dev->dev_private; 1497 drm_i915_private_t *dev_priv = dev->dev_private;
1573 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 1498 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1574 1499
1575 *ring = gen6_blt_ring; 1500 ring->name = "blitter ring";
1501 ring->id = BCS;
1502
1503 ring->mmio_base = BLT_RING_BASE;
1504 ring->write_tail = ring_write_tail;
1505 ring->flush = blt_ring_flush;
1506 ring->add_request = gen6_add_request;
1507 ring->get_seqno = gen6_ring_get_seqno;
1508 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1509 ring->irq_get = gen6_ring_get_irq;
1510 ring->irq_put = gen6_ring_put_irq;
1511 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1512 ring->sync_to = gen6_ring_sync;
1513 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1514 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1515 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1516 ring->signal_mbox[0] = GEN6_RBSYNC;
1517 ring->signal_mbox[1] = GEN6_VBSYNC;
1518 ring->init = init_ring_common;
1576 1519
1577 return intel_init_ring_buffer(dev, ring); 1520 return intel_init_ring_buffer(dev, ring);
1578} 1521}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4d..55d3da26bae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4struct intel_hw_status_page { 4struct intel_hw_status_page {
5 u32 __iomem *page_addr; 5 u32 *page_addr;
6 unsigned int gfx_addr; 6 unsigned int gfx_addr;
7 struct drm_i915_gem_object *obj; 7 struct drm_i915_gem_object *obj;
8}; 8};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
56 */ 56 */
57 u32 last_retired_head; 57 u32 last_retired_head;
58 58
59 spinlock_t irq_lock; 59 u32 irq_refcount; /* protected by dev_priv->irq_lock */
60 u32 irq_refcount; 60 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
61 u32 irq_mask;
62 u32 irq_seqno; /* last seq seem at irq time */
63 u32 trace_irq_seqno; 61 u32 trace_irq_seqno;
64 u32 waiting_seqno;
65 u32 sync_seqno[I915_NUM_RINGS-1]; 62 u32 sync_seqno[I915_NUM_RINGS-1];
66 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
67 void (*irq_put)(struct intel_ring_buffer *ring); 64 void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct intel_ring_buffer {
118 u32 outstanding_lazy_request; 115 u32 outstanding_lazy_request;
119 116
120 wait_queue_head_t irq_queue; 117 wait_queue_head_t irq_queue;
121 drm_local_map_t map;
122 118
123 void *private; 119 void *private;
124}; 120};
125 121
122static inline bool
123intel_ring_initialized(struct intel_ring_buffer *ring)
124{
125 return ring->obj != NULL;
126}
127
126static inline unsigned 128static inline unsigned
127intel_ring_flag(struct intel_ring_buffer *ring) 129intel_ring_flag(struct intel_ring_buffer *ring)
128{ 130{
@@ -152,7 +154,9 @@ static inline u32
152intel_read_status_page(struct intel_ring_buffer *ring, 154intel_read_status_page(struct intel_ring_buffer *ring,
153 int reg) 155 int reg)
154{ 156{
155 return ioread32(ring->status_page.page_addr + reg); 157 /* Ensure that the compiler doesn't optimize away the load. */
158 barrier();
159 return ring->status_page.page_addr[reg];
156} 160}
157 161
158/** 162/**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
170 * 174 *
171 * The area from dword 0x20 to 0x3ff is available for driver usage. 175 * The area from dword 0x20 to 0x3ff is available for driver usage.
172 */ 176 */
173#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
174#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
175#define I915_GEM_HWS_INDEX 0x20 177#define I915_GEM_HWS_INDEX 0x20
176#define I915_BREADCRUMB_INDEX 0x21
177 178
178void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 179void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
179 180
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ae5e748f39bb..a949b73880c8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
41#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) 41#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
42#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) 42#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
43#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) 43#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
44#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) 44#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
45 45
46#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ 46#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
47 SDVO_TV_MASK) 47 SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
74 struct i2c_adapter ddc; 74 struct i2c_adapter ddc;
75 75
76 /* Register for the SDVO device: SDVOB or SDVOC */ 76 /* Register for the SDVO device: SDVOB or SDVOC */
77 int sdvo_reg; 77 uint32_t sdvo_reg;
78 78
79 /* Active outputs controlled by this SDVO output */ 79 /* Active outputs controlled by this SDVO output */
80 uint16_t controlled_output; 80 uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
114 */ 114 */
115 bool is_tv; 115 bool is_tv;
116 116
117 /* On different gens SDVOB is at different places. */
118 bool is_sdvob;
119
117 /* This is for current tv format name */ 120 /* This is for current tv format name */
118 int tv_format_index; 121 int tv_format_index;
119 122
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
403 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 406 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
404}; 407};
405 408
406#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) 409#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
407#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
408 410
409static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, 411static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
410 const void *args, int args_len) 412 const void *args, int args_len)
@@ -441,9 +443,17 @@ static const char *cmd_status_names[] = {
441static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, 443static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
442 const void *args, int args_len) 444 const void *args, int args_len)
443{ 445{
444 u8 buf[args_len*2 + 2], status; 446 u8 *buf, status;
445 struct i2c_msg msgs[args_len + 3]; 447 struct i2c_msg *msgs;
446 int i, ret; 448 int i, ret = true;
449
450 buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
451 if (!buf)
452 return false;
453
454 msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
455 if (!msgs)
456 return false;
447 457
448 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); 458 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
449 459
@@ -477,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
477 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); 487 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
478 if (ret < 0) { 488 if (ret < 0) {
479 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 489 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
480 return false; 490 ret = false;
491 goto out;
481 } 492 }
482 if (ret != i+3) { 493 if (ret != i+3) {
483 /* failure in I2C transfer */ 494 /* failure in I2C transfer */
484 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); 495 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
485 return false; 496 ret = false;
486 } 497 }
487 498
488 return true; 499out:
500 kfree(msgs);
501 kfree(buf);
502 return ret;
489} 503}
490 504
491static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 505static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -733,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
733 uint16_t h_sync_offset, v_sync_offset; 747 uint16_t h_sync_offset, v_sync_offset;
734 int mode_clock; 748 int mode_clock;
735 749
736 width = mode->crtc_hdisplay; 750 width = mode->hdisplay;
737 height = mode->crtc_vdisplay; 751 height = mode->vdisplay;
738 752
739 /* do some mode translations */ 753 /* do some mode translations */
740 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; 754 h_blank_len = mode->htotal - mode->hdisplay;
741 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 755 h_sync_len = mode->hsync_end - mode->hsync_start;
742 756
743 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; 757 v_blank_len = mode->vtotal - mode->vdisplay;
744 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; 758 v_sync_len = mode->vsync_end - mode->vsync_start;
745 759
746 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 760 h_sync_offset = mode->hsync_start - mode->hdisplay;
747 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 761 v_sync_offset = mode->vsync_start - mode->vdisplay;
748 762
749 mode_clock = mode->clock; 763 mode_clock = mode->clock;
750 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; 764 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -873,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
873 }; 887 };
874 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; 888 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
875 uint8_t set_buf_index[2] = { 1, 0 }; 889 uint8_t set_buf_index[2] = { 1, 0 };
876 uint64_t *data = (uint64_t *)&avi_if; 890 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
891 uint64_t *data = (uint64_t *)sdvo_data;
877 unsigned i; 892 unsigned i;
878 893
879 intel_dip_infoframe_csum(&avi_if); 894 intel_dip_infoframe_csum(&avi_if);
880 895
896 /* sdvo spec says that the ecc is handled by the hw, and it looks like
897 * we must not send the ecc field, either. */
898 memcpy(sdvo_data, &avi_if, 3);
899 sdvo_data[3] = avi_if.checksum;
900 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
901
881 if (!intel_sdvo_set_value(intel_sdvo, 902 if (!intel_sdvo_set_value(intel_sdvo,
882 SDVO_CMD_SET_HBUF_INDEX, 903 SDVO_CMD_SET_HBUF_INDEX,
883 set_buf_index, 2)) 904 set_buf_index, 2))
884 return false; 905 return false;
885 906
886 for (i = 0; i < sizeof(avi_if); i += 8) { 907 for (i = 0; i < sizeof(sdvo_data); i += 8) {
887 if (!intel_sdvo_set_value(intel_sdvo, 908 if (!intel_sdvo_set_value(intel_sdvo,
888 SDVO_CMD_SET_HBUF_DATA, 909 SDVO_CMD_SET_HBUF_DATA,
889 data, 8)) 910 data, 8))
@@ -1260,10 +1281,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
1260 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1281 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1261 1282
1262 return drm_get_edid(connector, 1283 return drm_get_edid(connector,
1263 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 1284 intel_gmbus_get_adapter(dev_priv,
1285 dev_priv->crt_ddc_pin));
1264} 1286}
1265 1287
1266enum drm_connector_status 1288static enum drm_connector_status
1267intel_sdvo_tmds_sink_detect(struct drm_connector *connector) 1289intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1268{ 1290{
1269 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1291 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@ -1349,8 +1371,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1349 return connector_status_unknown; 1371 return connector_status_unknown;
1350 1372
1351 /* add 30ms delay when the output type might be TV */ 1373 /* add 30ms delay when the output type might be TV */
1352 if (intel_sdvo->caps.output_flags & 1374 if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
1353 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1354 mdelay(30); 1375 mdelay(30);
1355 1376
1356 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1377 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1570,9 +1591,6 @@ end:
1570 intel_sdvo->sdvo_lvds_fixed_mode = 1591 intel_sdvo->sdvo_lvds_fixed_mode =
1571 drm_mode_duplicate(connector->dev, newmode); 1592 drm_mode_duplicate(connector->dev, newmode);
1572 1593
1573 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
1574 0);
1575
1576 intel_sdvo->is_lvds = true; 1594 intel_sdvo->is_lvds = true;
1577 break; 1595 break;
1578 } 1596 }
@@ -1901,7 +1919,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1901{ 1919{
1902 struct sdvo_device_mapping *mapping; 1920 struct sdvo_device_mapping *mapping;
1903 1921
1904 if (IS_SDVOB(reg)) 1922 if (sdvo->is_sdvob)
1905 mapping = &(dev_priv->sdvo_mappings[0]); 1923 mapping = &(dev_priv->sdvo_mappings[0]);
1906 else 1924 else
1907 mapping = &(dev_priv->sdvo_mappings[1]); 1925 mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1919,7 +1937,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1919 struct sdvo_device_mapping *mapping; 1937 struct sdvo_device_mapping *mapping;
1920 u8 pin; 1938 u8 pin;
1921 1939
1922 if (IS_SDVOB(reg)) 1940 if (sdvo->is_sdvob)
1923 mapping = &dev_priv->sdvo_mappings[0]; 1941 mapping = &dev_priv->sdvo_mappings[0];
1924 else 1942 else
1925 mapping = &dev_priv->sdvo_mappings[1]; 1943 mapping = &dev_priv->sdvo_mappings[1];
@@ -1928,12 +1946,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1928 if (mapping->initialized) 1946 if (mapping->initialized)
1929 pin = mapping->i2c_pin; 1947 pin = mapping->i2c_pin;
1930 1948
1931 if (pin < GMBUS_NUM_PORTS) { 1949 if (intel_gmbus_is_port_valid(pin)) {
1932 sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1950 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
1933 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); 1951 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
1934 intel_gmbus_force_bit(sdvo->i2c, true); 1952 intel_gmbus_force_bit(sdvo->i2c, true);
1935 } else { 1953 } else {
1936 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; 1954 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
1937 } 1955 }
1938} 1956}
1939 1957
@@ -1944,12 +1962,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
1944} 1962}
1945 1963
1946static u8 1964static u8
1947intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) 1965intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
1948{ 1966{
1949 struct drm_i915_private *dev_priv = dev->dev_private; 1967 struct drm_i915_private *dev_priv = dev->dev_private;
1950 struct sdvo_device_mapping *my_mapping, *other_mapping; 1968 struct sdvo_device_mapping *my_mapping, *other_mapping;
1951 1969
1952 if (IS_SDVOB(sdvo_reg)) { 1970 if (sdvo->is_sdvob) {
1953 my_mapping = &dev_priv->sdvo_mappings[0]; 1971 my_mapping = &dev_priv->sdvo_mappings[0];
1954 other_mapping = &dev_priv->sdvo_mappings[1]; 1972 other_mapping = &dev_priv->sdvo_mappings[1];
1955 } else { 1973 } else {
@@ -1974,7 +1992,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
1974 /* No SDVO device info is found for another DVO port, 1992 /* No SDVO device info is found for another DVO port,
1975 * so use mapping assumption we had before BIOS parsing. 1993 * so use mapping assumption we had before BIOS parsing.
1976 */ 1994 */
1977 if (IS_SDVOB(sdvo_reg)) 1995 if (sdvo->is_sdvob)
1978 return 0x70; 1996 return 0x70;
1979 else 1997 else
1980 return 0x72; 1998 return 0x72;
@@ -2199,6 +2217,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2199 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) 2217 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
2200 return false; 2218 return false;
2201 2219
2220 if (flags & SDVO_OUTPUT_YPRPB0)
2221 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
2222 return false;
2223
2202 if (flags & SDVO_OUTPUT_RGB0) 2224 if (flags & SDVO_OUTPUT_RGB0)
2203 if (!intel_sdvo_analog_init(intel_sdvo, 0)) 2225 if (!intel_sdvo_analog_init(intel_sdvo, 0))
2204 return false; 2226 return false;
@@ -2490,7 +2512,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2490 return i2c_add_adapter(&sdvo->ddc) == 0; 2512 return i2c_add_adapter(&sdvo->ddc) == 0;
2491} 2513}
2492 2514
2493bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2515bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2494{ 2516{
2495 struct drm_i915_private *dev_priv = dev->dev_private; 2517 struct drm_i915_private *dev_priv = dev->dev_private;
2496 struct intel_encoder *intel_encoder; 2518 struct intel_encoder *intel_encoder;
@@ -2502,7 +2524,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2502 return false; 2524 return false;
2503 2525
2504 intel_sdvo->sdvo_reg = sdvo_reg; 2526 intel_sdvo->sdvo_reg = sdvo_reg;
2505 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; 2527 intel_sdvo->is_sdvob = is_sdvob;
2528 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2506 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2529 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2507 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2530 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2508 kfree(intel_sdvo); 2531 kfree(intel_sdvo);
@@ -2519,13 +2542,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2519 u8 byte; 2542 u8 byte;
2520 2543
2521 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { 2544 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
2522 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2545 DRM_DEBUG_KMS("No SDVO device found on %s\n",
2523 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2546 SDVO_NAME(intel_sdvo));
2524 goto err; 2547 goto err;
2525 } 2548 }
2526 } 2549 }
2527 2550
2528 if (IS_SDVOB(sdvo_reg)) 2551 if (intel_sdvo->is_sdvob)
2529 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2552 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2530 else 2553 else
2531 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2554 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2546,8 +2569,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2546 2569
2547 if (intel_sdvo_output_setup(intel_sdvo, 2570 if (intel_sdvo_output_setup(intel_sdvo,
2548 intel_sdvo->caps.output_flags) != true) { 2571 intel_sdvo->caps.output_flags) != true) {
2549 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2572 DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
2550 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2573 SDVO_NAME(intel_sdvo));
2551 goto err; 2574 goto err;
2552 } 2575 }
2553 2576
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e90dfb625c42..2a20fb0781d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
110 * when scaling is disabled. 110 * when scaling is disabled.
111 */ 111 */
112 if (crtc_w != src_w || crtc_h != src_h) { 112 if (crtc_w != src_w || crtc_h != src_h) {
113 dev_priv->sprite_scaling_enabled = true; 113 if (!dev_priv->sprite_scaling_enabled) {
114 sandybridge_update_wm(dev); 114 dev_priv->sprite_scaling_enabled = true;
115 intel_wait_for_vblank(dev, pipe); 115 intel_update_watermarks(dev);
116 intel_wait_for_vblank(dev, pipe);
117 }
116 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 118 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
117 } else { 119 } else {
118 dev_priv->sprite_scaling_enabled = false; 120 if (dev_priv->sprite_scaling_enabled) {
119 /* potentially re-enable LP watermarks */ 121 dev_priv->sprite_scaling_enabled = false;
120 sandybridge_update_wm(dev); 122 /* potentially re-enable LP watermarks */
123 intel_update_watermarks(dev);
124 }
121 } 125 }
122 126
123 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 127 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -133,7 +137,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
133 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 137 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
134 I915_WRITE(SPRSCALE(pipe), sprscale); 138 I915_WRITE(SPRSCALE(pipe), sprscale);
135 I915_WRITE(SPRCTL(pipe), sprctl); 139 I915_WRITE(SPRCTL(pipe), sprctl);
136 I915_WRITE(SPRSURF(pipe), obj->gtt_offset); 140 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
137 POSTING_READ(SPRSURF(pipe)); 141 POSTING_READ(SPRSURF(pipe));
138} 142}
139 143
@@ -149,8 +153,11 @@ ivb_disable_plane(struct drm_plane *plane)
149 /* Can't leave the scaler enabled... */ 153 /* Can't leave the scaler enabled... */
150 I915_WRITE(SPRSCALE(pipe), 0); 154 I915_WRITE(SPRSCALE(pipe), 0);
151 /* Activate double buffered register update */ 155 /* Activate double buffered register update */
152 I915_WRITE(SPRSURF(pipe), 0); 156 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
153 POSTING_READ(SPRSURF(pipe)); 157 POSTING_READ(SPRSURF(pipe));
158
159 dev_priv->sprite_scaling_enabled = false;
160 intel_update_watermarks(dev);
154} 161}
155 162
156static int 163static int
@@ -208,7 +215,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
208} 215}
209 216
210static void 217static void
211snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 218ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
212 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 219 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
213 unsigned int crtc_w, unsigned int crtc_h, 220 unsigned int crtc_w, unsigned int crtc_h,
214 uint32_t x, uint32_t y, 221 uint32_t x, uint32_t y,
@@ -218,7 +225,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
218 struct drm_i915_private *dev_priv = dev->dev_private; 225 struct drm_i915_private *dev_priv = dev->dev_private;
219 struct intel_plane *intel_plane = to_intel_plane(plane); 226 struct intel_plane *intel_plane = to_intel_plane(plane);
220 int pipe = intel_plane->pipe, pixel_size; 227 int pipe = intel_plane->pipe, pixel_size;
221 u32 dvscntr, dvsscale = 0; 228 u32 dvscntr, dvsscale;
222 229
223 dvscntr = I915_READ(DVSCNTR(pipe)); 230 dvscntr = I915_READ(DVSCNTR(pipe));
224 231
@@ -262,8 +269,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
262 if (obj->tiling_mode != I915_TILING_NONE) 269 if (obj->tiling_mode != I915_TILING_NONE)
263 dvscntr |= DVS_TILED; 270 dvscntr |= DVS_TILED;
264 271
265 /* must disable */ 272 if (IS_GEN6(dev))
266 dvscntr |= DVS_TRICKLE_FEED_DISABLE; 273 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
267 dvscntr |= DVS_ENABLE; 274 dvscntr |= DVS_ENABLE;
268 275
269 /* Sizes are 0 based */ 276 /* Sizes are 0 based */
@@ -274,7 +281,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
274 281
275 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 282 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
276 283
277 if (crtc_w != src_w || crtc_h != src_h) 284 dvsscale = 0;
285 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
278 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 286 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
279 287
280 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 288 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -290,12 +298,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
290 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 298 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
291 I915_WRITE(DVSSCALE(pipe), dvsscale); 299 I915_WRITE(DVSSCALE(pipe), dvsscale);
292 I915_WRITE(DVSCNTR(pipe), dvscntr); 300 I915_WRITE(DVSCNTR(pipe), dvscntr);
293 I915_WRITE(DVSSURF(pipe), obj->gtt_offset); 301 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
294 POSTING_READ(DVSSURF(pipe)); 302 POSTING_READ(DVSSURF(pipe));
295} 303}
296 304
297static void 305static void
298snb_disable_plane(struct drm_plane *plane) 306ilk_disable_plane(struct drm_plane *plane)
299{ 307{
300 struct drm_device *dev = plane->dev; 308 struct drm_device *dev = plane->dev;
301 struct drm_i915_private *dev_priv = dev->dev_private; 309 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -306,7 +314,7 @@ snb_disable_plane(struct drm_plane *plane)
306 /* Disable the scaler */ 314 /* Disable the scaler */
307 I915_WRITE(DVSSCALE(pipe), 0); 315 I915_WRITE(DVSSCALE(pipe), 0);
308 /* Flush double buffered register updates */ 316 /* Flush double buffered register updates */
309 I915_WRITE(DVSSURF(pipe), 0); 317 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
310 POSTING_READ(DVSSURF(pipe)); 318 POSTING_READ(DVSSURF(pipe));
311} 319}
312 320
@@ -333,7 +341,7 @@ intel_disable_primary(struct drm_crtc *crtc)
333} 341}
334 342
335static int 343static int
336snb_update_colorkey(struct drm_plane *plane, 344ilk_update_colorkey(struct drm_plane *plane,
337 struct drm_intel_sprite_colorkey *key) 345 struct drm_intel_sprite_colorkey *key)
338{ 346{
339 struct drm_device *dev = plane->dev; 347 struct drm_device *dev = plane->dev;
@@ -362,7 +370,7 @@ snb_update_colorkey(struct drm_plane *plane,
362} 370}
363 371
364static void 372static void
365snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) 373ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
366{ 374{
367 struct drm_device *dev = plane->dev; 375 struct drm_device *dev = plane->dev;
368 struct drm_i915_private *dev_priv = dev->dev_private; 376 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -550,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
550 struct drm_file *file_priv) 558 struct drm_file *file_priv)
551{ 559{
552 struct drm_intel_sprite_colorkey *set = data; 560 struct drm_intel_sprite_colorkey *set = data;
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_mode_object *obj; 561 struct drm_mode_object *obj;
555 struct drm_plane *plane; 562 struct drm_plane *plane;
556 struct intel_plane *intel_plane; 563 struct intel_plane *intel_plane;
557 int ret = 0; 564 int ret = 0;
558 565
559 if (!dev_priv) 566 if (!drm_core_check_feature(dev, DRIVER_MODESET))
560 return -EINVAL; 567 return -ENODEV;
561 568
562 /* Make sure we don't try to enable both src & dest simultaneously */ 569 /* Make sure we don't try to enable both src & dest simultaneously */
563 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 570 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -584,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
584 struct drm_file *file_priv) 591 struct drm_file *file_priv)
585{ 592{
586 struct drm_intel_sprite_colorkey *get = data; 593 struct drm_intel_sprite_colorkey *get = data;
587 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_mode_object *obj; 594 struct drm_mode_object *obj;
589 struct drm_plane *plane; 595 struct drm_plane *plane;
590 struct intel_plane *intel_plane; 596 struct intel_plane *intel_plane;
591 int ret = 0; 597 int ret = 0;
592 598
593 if (!dev_priv) 599 if (!drm_core_check_feature(dev, DRIVER_MODESET))
594 return -EINVAL; 600 return -ENODEV;
595 601
596 mutex_lock(&dev->mode_config.mutex); 602 mutex_lock(&dev->mode_config.mutex);
597 603
@@ -616,6 +622,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
616 .destroy = intel_destroy_plane, 622 .destroy = intel_destroy_plane,
617}; 623};
618 624
625static uint32_t ilk_plane_formats[] = {
626 DRM_FORMAT_XRGB8888,
627 DRM_FORMAT_YUYV,
628 DRM_FORMAT_YVYU,
629 DRM_FORMAT_UYVY,
630 DRM_FORMAT_VYUY,
631};
632
619static uint32_t snb_plane_formats[] = { 633static uint32_t snb_plane_formats[] = {
620 DRM_FORMAT_XBGR8888, 634 DRM_FORMAT_XBGR8888,
621 DRM_FORMAT_XRGB8888, 635 DRM_FORMAT_XRGB8888,
@@ -630,34 +644,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
630{ 644{
631 struct intel_plane *intel_plane; 645 struct intel_plane *intel_plane;
632 unsigned long possible_crtcs; 646 unsigned long possible_crtcs;
647 const uint32_t *plane_formats;
648 int num_plane_formats;
633 int ret; 649 int ret;
634 650
635 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 651 if (INTEL_INFO(dev)->gen < 5)
636 return -ENODEV; 652 return -ENODEV;
637 653
638 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 654 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
639 if (!intel_plane) 655 if (!intel_plane)
640 return -ENOMEM; 656 return -ENOMEM;
641 657
642 if (IS_GEN6(dev)) { 658 switch (INTEL_INFO(dev)->gen) {
659 case 5:
660 case 6:
643 intel_plane->max_downscale = 16; 661 intel_plane->max_downscale = 16;
644 intel_plane->update_plane = snb_update_plane; 662 intel_plane->update_plane = ilk_update_plane;
645 intel_plane->disable_plane = snb_disable_plane; 663 intel_plane->disable_plane = ilk_disable_plane;
646 intel_plane->update_colorkey = snb_update_colorkey; 664 intel_plane->update_colorkey = ilk_update_colorkey;
647 intel_plane->get_colorkey = snb_get_colorkey; 665 intel_plane->get_colorkey = ilk_get_colorkey;
648 } else if (IS_GEN7(dev)) { 666
667 if (IS_GEN6(dev)) {
668 plane_formats = snb_plane_formats;
669 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
670 } else {
671 plane_formats = ilk_plane_formats;
672 num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
673 }
674 break;
675
676 case 7:
649 intel_plane->max_downscale = 2; 677 intel_plane->max_downscale = 2;
650 intel_plane->update_plane = ivb_update_plane; 678 intel_plane->update_plane = ivb_update_plane;
651 intel_plane->disable_plane = ivb_disable_plane; 679 intel_plane->disable_plane = ivb_disable_plane;
652 intel_plane->update_colorkey = ivb_update_colorkey; 680 intel_plane->update_colorkey = ivb_update_colorkey;
653 intel_plane->get_colorkey = ivb_get_colorkey; 681 intel_plane->get_colorkey = ivb_get_colorkey;
682
683 plane_formats = snb_plane_formats;
684 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
685 break;
686
687 default:
688 return -ENODEV;
654 } 689 }
655 690
656 intel_plane->pipe = pipe; 691 intel_plane->pipe = pipe;
657 possible_crtcs = (1 << pipe); 692 possible_crtcs = (1 << pipe);
658 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, 693 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
659 &intel_plane_funcs, snb_plane_formats, 694 &intel_plane_funcs,
660 ARRAY_SIZE(snb_plane_formats), false); 695 plane_formats, num_plane_formats,
696 false);
661 if (ret) 697 if (ret)
662 kfree(intel_plane); 698 kfree(intel_plane);
663 699
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef5464..3346612d2953 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -811,7 +811,7 @@ intel_tv_mode_lookup(const char *tv_format)
811{ 811{
812 int i; 812 int i;
813 813
814 for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) { 814 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
815 const struct tv_mode *tv_mode = &tv_modes[i]; 815 const struct tv_mode *tv_mode = &tv_modes[i];
816 816
817 if (!strcmp(tv_format, tv_mode->name)) 817 if (!strcmp(tv_format, tv_mode->name))
@@ -1153,6 +1153,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1153 DAC_B_0_7_V | 1153 DAC_B_0_7_V |
1154 DAC_C_0_7_V); 1154 DAC_C_0_7_V);
1155 1155
1156
1157 /*
1158 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
1159 * the TV is misdetected. This is hardware requirement.
1160 */
1161 if (IS_GM45(dev))
1162 tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
1163 TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
1164
1156 I915_WRITE(TV_CTL, tv_ctl); 1165 I915_WRITE(TV_CTL, tv_ctl);
1157 I915_WRITE(TV_DAC, tv_dac); 1166 I915_WRITE(TV_DAC, tv_dac);
1158 POSTING_READ(TV_DAC); 1167 POSTING_READ(TV_DAC);
@@ -1240,11 +1249,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1240 int type; 1249 int type;
1241 1250
1242 mode = reported_modes[0]; 1251 mode = reported_modes[0];
1243 drm_mode_set_crtcinfo(&mode, 0);
1244 1252
1245 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1253 if (force) {
1246 type = intel_tv_detect_type(intel_tv, connector);
1247 } else if (force) {
1248 struct intel_load_detect_pipe tmp; 1254 struct intel_load_detect_pipe tmp;
1249 1255
1250 if (intel_get_load_detect_pipe(&intel_tv->base, connector, 1256 if (intel_get_load_detect_pipe(&intel_tv->base, connector,
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 000000000000..d63013497f66
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
1config DRM_MGAG200
2 tristate "Kernel modesetting driver for MGA G200 server engines"
3 depends on DRM && PCI && EXPERIMENTAL
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 This is a KMS driver for the MGA G200 server chips, it
11 does not support the original MGA G200 or any of the desktop
12 chips. It requires 0.3.0 of the modesetting userspace driver,
13 and a version of mga driver that will fail on KMS enabled
14 devices.
15
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 000000000000..7db592eedbf1
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
1ccflags-y := -Iinclude/drm
2mgag200-y := mgag200_main.o mgag200_mode.o \
3 mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
4
5obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 000000000000..3c8e04f54713
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include <linux/module.h>
12#include <linux/console.h>
13#include "drmP.h"
14#include "drm.h"
15
16#include "mgag200_drv.h"
17
18#include "drm_pciids.h"
19
20/*
21 * This is the generic driver code. This binds the driver to the drm core,
22 * which then performs further device association and calls our graphics init
23 * functions
24 */
25int mgag200_modeset = -1;
26
27MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
28module_param_named(modeset, mgag200_modeset, int, 0400);
29
30static struct drm_driver driver;
31
32static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
33 { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
34 { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
35 { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
36 { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
37 { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
38 { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
39 {0,}
40};
41
42MODULE_DEVICE_TABLE(pci, pciidlist);
43
44static int __devinit
45mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46{
47 return drm_get_pci_dev(pdev, ent, &driver);
48}
49
50static void mga_pci_remove(struct pci_dev *pdev)
51{
52 struct drm_device *dev = pci_get_drvdata(pdev);
53
54 drm_put_dev(dev);
55}
56
57static const struct file_operations mgag200_driver_fops = {
58 .owner = THIS_MODULE,
59 .open = drm_open,
60 .release = drm_release,
61 .unlocked_ioctl = drm_ioctl,
62 .mmap = mgag200_mmap,
63 .poll = drm_poll,
64 .fasync = drm_fasync,
65 .read = drm_read,
66};
67
68static struct drm_driver driver = {
69 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
70 .load = mgag200_driver_load,
71 .unload = mgag200_driver_unload,
72 .fops = &mgag200_driver_fops,
73 .name = DRIVER_NAME,
74 .desc = DRIVER_DESC,
75 .date = DRIVER_DATE,
76 .major = DRIVER_MAJOR,
77 .minor = DRIVER_MINOR,
78 .patchlevel = DRIVER_PATCHLEVEL,
79
80 .gem_init_object = mgag200_gem_init_object,
81 .gem_free_object = mgag200_gem_free_object,
82 .dumb_create = mgag200_dumb_create,
83 .dumb_map_offset = mgag200_dumb_mmap_offset,
84 .dumb_destroy = mgag200_dumb_destroy,
85};
86
87static struct pci_driver mgag200_pci_driver = {
88 .name = DRIVER_NAME,
89 .id_table = pciidlist,
90 .probe = mga_pci_probe,
91 .remove = mga_pci_remove,
92};
93
94static int __init mgag200_init(void)
95{
96#ifdef CONFIG_VGA_CONSOLE
97 if (vgacon_text_force() && mgag200_modeset == -1)
98 return -EINVAL;
99#endif
100
101 if (mgag200_modeset == 0)
102 return -EINVAL;
103 return drm_pci_init(&driver, &mgag200_pci_driver);
104}
105
106static void __exit mgag200_exit(void)
107{
108 drm_pci_exit(&driver, &mgag200_pci_driver);
109}
110
111module_init(mgag200_init);
112module_exit(mgag200_exit);
113
114MODULE_AUTHOR(DRIVER_AUTHOR);
115MODULE_DESCRIPTION(DRIVER_DESC);
116MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 000000000000..6f13b3563234
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,276 @@
1/*
2 * Copyright 2010 Matt Turner.
3 * Copyright 2012 Red Hat
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
8 *
9 * Authors: Matthew Garrett
10 * Matt Turner
11 * Dave Airlie
12 */
13#ifndef __MGAG200_DRV_H__
14#define __MGAG200_DRV_H__
15
16#include <video/vga.h>
17
18#include "drm/drm_fb_helper.h"
19#include "ttm/ttm_bo_api.h"
20#include "ttm/ttm_bo_driver.h"
21#include "ttm/ttm_placement.h"
22#include "ttm/ttm_memory.h"
23#include "ttm/ttm_module.h"
24
25#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h>
27
28#include "mgag200_reg.h"
29
30#define DRIVER_AUTHOR "Matthew Garrett"
31
32#define DRIVER_NAME "mgag200"
33#define DRIVER_DESC "MGA G200 SE"
34#define DRIVER_DATE "20110418"
35
36#define DRIVER_MAJOR 1
37#define DRIVER_MINOR 0
38#define DRIVER_PATCHLEVEL 0
39
40#define MGAG200FB_CONN_LIMIT 1
41
42#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
43#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
44#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
45#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
46
47#define ATTR_INDEX 0x1fc0
48#define ATTR_DATA 0x1fc1
49
50#define WREG_ATTR(reg, v) \
51 do { \
52 RREG8(0x1fda); \
53 WREG8(ATTR_INDEX, reg); \
54 WREG8(ATTR_DATA, v); \
55 } while (0) \
56
57#define WREG_SEQ(reg, v) \
58 do { \
59 WREG8(MGAREG_SEQ_INDEX, reg); \
60 WREG8(MGAREG_SEQ_DATA, v); \
61 } while (0) \
62
63#define WREG_CRT(reg, v) \
64 do { \
65 WREG8(MGAREG_CRTC_INDEX, reg); \
66 WREG8(MGAREG_CRTC_DATA, v); \
67 } while (0) \
68
69
70#define WREG_ECRT(reg, v) \
71 do { \
72 WREG8(MGAREG_CRTCEXT_INDEX, reg); \
73 WREG8(MGAREG_CRTCEXT_DATA, v); \
74 } while (0) \
75
76#define GFX_INDEX 0x1fce
77#define GFX_DATA 0x1fcf
78
79#define WREG_GFX(reg, v) \
80 do { \
81 WREG8(GFX_INDEX, reg); \
82 WREG8(GFX_DATA, v); \
83 } while (0) \
84
85#define DAC_INDEX 0x3c00
86#define DAC_DATA 0x3c0a
87
88#define WREG_DAC(reg, v) \
89 do { \
90 WREG8(DAC_INDEX, reg); \
91 WREG8(DAC_DATA, v); \
92 } while (0) \
93
94#define MGA_MISC_OUT 0x1fc2
95#define MGA_MISC_IN 0x1fcc
96
97#define MGAG200_MAX_FB_HEIGHT 4096
98#define MGAG200_MAX_FB_WIDTH 4096
99
100#define MATROX_DPMS_CLEARED (-1)
101
102#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
103#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
104#define to_mga_connector(x) container_of(x, struct mga_connector, base)
105#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
106
107struct mga_framebuffer {
108 struct drm_framebuffer base;
109 struct drm_gem_object *obj;
110};
111
112struct mga_fbdev {
113 struct drm_fb_helper helper;
114 struct mga_framebuffer mfb;
115 struct list_head fbdev_list;
116 void *sysram;
117 int size;
118 struct ttm_bo_kmap_obj mapping;
119};
120
121struct mga_crtc {
122 struct drm_crtc base;
123 u8 lut_r[256], lut_g[256], lut_b[256];
124 int last_dpms;
125 bool enabled;
126};
127
128struct mga_mode_info {
129 bool mode_config_initialized;
130 struct mga_crtc *crtc;
131};
132
133struct mga_encoder {
134 struct drm_encoder base;
135 int last_dpms;
136};
137
138
139struct mga_i2c_chan {
140 struct i2c_adapter adapter;
141 struct drm_device *dev;
142 struct i2c_algo_bit_data bit;
143 int data, clock;
144};
145
146struct mga_connector {
147 struct drm_connector base;
148 struct mga_i2c_chan *i2c;
149};
150
151
152struct mga_mc {
153 resource_size_t vram_size;
154 resource_size_t vram_base;
155 resource_size_t vram_window;
156};
157
158enum mga_type {
159 G200_SE_A,
160 G200_SE_B,
161 G200_WB,
162 G200_EV,
163 G200_EH,
164 G200_ER,
165};
166
167#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
168
169struct mga_device {
170 struct drm_device *dev;
171 unsigned long flags;
172
173 resource_size_t rmmio_base;
174 resource_size_t rmmio_size;
175 void __iomem *rmmio;
176
177 drm_local_map_t *framebuffer;
178
179 struct mga_mc mc;
180 struct mga_mode_info mode_info;
181
182 struct mga_fbdev *mfbdev;
183
184 bool suspended;
185 int num_crtc;
186 enum mga_type type;
187 int has_sdram;
188 struct drm_display_mode mode;
189
190 int bpp_shifts[4];
191
192 int fb_mtrr;
193
194 struct {
195 struct drm_global_reference mem_global_ref;
196 struct ttm_bo_global_ref bo_global_ref;
197 struct ttm_bo_device bdev;
198 atomic_t validate_sequence;
199 } ttm;
200
201 u32 reg_1e24; /* SE model number */
202};
203
204
205struct mgag200_bo {
206 struct ttm_buffer_object bo;
207 struct ttm_placement placement;
208 struct ttm_bo_kmap_obj kmap;
209 struct drm_gem_object gem;
210 u32 placements[3];
211 int pin_count;
212};
213#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
214
215static inline struct mgag200_bo *
216mgag200_bo(struct ttm_buffer_object *bo)
217{
218 return container_of(bo, struct mgag200_bo, bo);
219}
220 /* mga_crtc.c */
221void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
222 u16 blue, int regno);
223void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
224 u16 *blue, int regno);
225
226 /* mgag200_mode.c */
227int mgag200_modeset_init(struct mga_device *mdev);
228void mgag200_modeset_fini(struct mga_device *mdev);
229
230 /* mga_fbdev.c */
231int mgag200_fbdev_init(struct mga_device *mdev);
232void mgag200_fbdev_fini(struct mga_device *mdev);
233
234 /* mgag200_main.c */
235int mgag200_framebuffer_init(struct drm_device *dev,
236 struct mga_framebuffer *mfb,
237 struct drm_mode_fb_cmd2 *mode_cmd,
238 struct drm_gem_object *obj);
239
240
241int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
242int mgag200_driver_unload(struct drm_device *dev);
243int mgag200_gem_create(struct drm_device *dev,
244 u32 size, bool iskernel,
245 struct drm_gem_object **obj);
246int mgag200_gem_init_object(struct drm_gem_object *obj);
247int mgag200_dumb_create(struct drm_file *file,
248 struct drm_device *dev,
249 struct drm_mode_create_dumb *args);
250int mgag200_dumb_destroy(struct drm_file *file,
251 struct drm_device *dev,
252 uint32_t handle);
253void mgag200_gem_free_object(struct drm_gem_object *obj);
254int
255mgag200_dumb_mmap_offset(struct drm_file *file,
256 struct drm_device *dev,
257 uint32_t handle,
258 uint64_t *offset);
259 /* mga_i2c.c */
260struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
261void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
262
263#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
264void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
265
266int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
267void mgag200_bo_unreserve(struct mgag200_bo *bo);
268int mgag200_bo_create(struct drm_device *dev, int size, int align,
269 uint32_t flags, struct mgag200_bo **pastbo);
270int mgag200_mm_init(struct mga_device *mdev);
271void mgag200_mm_fini(struct mga_device *mdev);
272int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
273int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
274int mgag200_bo_unpin(struct mgag200_bo *bo);
275int mgag200_bo_push_sysram(struct mgag200_bo *bo);
276#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 000000000000..880d3369760e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright 2010 Matt Turner.
3 * Copyright 2012 Red Hat
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
8 *
9 * Authors: Matthew Garrett
10 * Matt Turner
11 * Dave Airlie
12 */
13#include <linux/module.h>
14#include "drmP.h"
15#include "drm.h"
16#include "drm_fb_helper.h"
17
18#include <linux/fb.h>
19
20#include "mgag200_drv.h"
21
22static void mga_dirty_update(struct mga_fbdev *mfbdev,
23 int x, int y, int width, int height)
24{
25 int i;
26 struct drm_gem_object *obj;
27 struct mgag200_bo *bo;
28 int src_offset, dst_offset;
29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 int ret;
31 bool unmap = false;
32
33 obj = mfbdev->mfb.obj;
34 bo = gem_to_mga_bo(obj);
35
36 ret = mgag200_bo_reserve(bo, true);
37 if (ret) {
38 DRM_ERROR("failed to reserve fb bo\n");
39 return;
40 }
41
42 if (!bo->kmap.virtual) {
43 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
44 if (ret) {
45 DRM_ERROR("failed to kmap fb updates\n");
46 mgag200_bo_unreserve(bo);
47 return;
48 }
49 unmap = true;
50 }
51 for (i = y; i < y + height; i++) {
52 /* assume equal stride for now */
53 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
54 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
55
56 }
57 if (unmap)
58 ttm_bo_kunmap(&bo->kmap);
59
60 mgag200_bo_unreserve(bo);
61}
62
63static void mga_fillrect(struct fb_info *info,
64 const struct fb_fillrect *rect)
65{
66 struct mga_fbdev *mfbdev = info->par;
67 sys_fillrect(info, rect);
68 mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
69 rect->height);
70}
71
72static void mga_copyarea(struct fb_info *info,
73 const struct fb_copyarea *area)
74{
75 struct mga_fbdev *mfbdev = info->par;
76 sys_copyarea(info, area);
77 mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
78 area->height);
79}
80
81static void mga_imageblit(struct fb_info *info,
82 const struct fb_image *image)
83{
84 struct mga_fbdev *mfbdev = info->par;
85 sys_imageblit(info, image);
86 mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
87 image->height);
88}
89
90
91static struct fb_ops mgag200fb_ops = {
92 .owner = THIS_MODULE,
93 .fb_check_var = drm_fb_helper_check_var,
94 .fb_set_par = drm_fb_helper_set_par,
95 .fb_fillrect = mga_fillrect,
96 .fb_copyarea = mga_copyarea,
97 .fb_imageblit = mga_imageblit,
98 .fb_pan_display = drm_fb_helper_pan_display,
99 .fb_blank = drm_fb_helper_blank,
100 .fb_setcmap = drm_fb_helper_setcmap,
101};
102
103static int mgag200fb_create_object(struct mga_fbdev *afbdev,
104 struct drm_mode_fb_cmd2 *mode_cmd,
105 struct drm_gem_object **gobj_p)
106{
107 struct drm_device *dev = afbdev->helper.dev;
108 u32 bpp, depth;
109 u32 size;
110 struct drm_gem_object *gobj;
111
112 int ret = 0;
113 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
114
115 size = mode_cmd->pitches[0] * mode_cmd->height;
116 ret = mgag200_gem_create(dev, size, true, &gobj);
117 if (ret)
118 return ret;
119
120 *gobj_p = gobj;
121 return ret;
122}
123
124static int mgag200fb_create(struct mga_fbdev *mfbdev,
125 struct drm_fb_helper_surface_size *sizes)
126{
127 struct drm_device *dev = mfbdev->helper.dev;
128 struct drm_mode_fb_cmd2 mode_cmd;
129 struct mga_device *mdev = dev->dev_private;
130 struct fb_info *info;
131 struct drm_framebuffer *fb;
132 struct drm_gem_object *gobj = NULL;
133 struct device *device = &dev->pdev->dev;
134 struct mgag200_bo *bo;
135 int ret;
136 void *sysram;
137 int size;
138
139 mode_cmd.width = sizes->surface_width;
140 mode_cmd.height = sizes->surface_height;
141 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
142
143 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
144 sizes->surface_depth);
145 size = mode_cmd.pitches[0] * mode_cmd.height;
146
147 ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
148 if (ret) {
149 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
150 return ret;
151 }
152 bo = gem_to_mga_bo(gobj);
153
154 sysram = vmalloc(size);
155 if (!sysram)
156 return -ENOMEM;
157
158 info = framebuffer_alloc(0, device);
159 if (info == NULL)
160 return -ENOMEM;
161
162 info->par = mfbdev;
163
164 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
165 if (ret)
166 return ret;
167
168 mfbdev->sysram = sysram;
169 mfbdev->size = size;
170
171 fb = &mfbdev->mfb.base;
172
173 /* setup helper */
174 mfbdev->helper.fb = fb;
175 mfbdev->helper.fbdev = info;
176
177 ret = fb_alloc_cmap(&info->cmap, 256, 0);
178 if (ret) {
179 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
180 ret = -ENOMEM;
181 goto out;
182 }
183
184 strcpy(info->fix.id, "mgadrmfb");
185
186 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
187 info->fbops = &mgag200fb_ops;
188
189 /* setup aperture base/size for vesafb takeover */
190 info->apertures = alloc_apertures(1);
191 if (!info->apertures) {
192 ret = -ENOMEM;
193 goto out;
194 }
195 info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
196 info->apertures->ranges[0].size = mdev->mc.vram_size;
197
198 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
199 drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
200 sizes->fb_height);
201
202 info->screen_base = sysram;
203 info->screen_size = size;
204 info->pixmap.flags = FB_PIXMAP_SYSTEM;
205
206 DRM_DEBUG_KMS("allocated %dx%d\n",
207 fb->width, fb->height);
208 return 0;
209out:
210 return ret;
211}
212
213static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
214 struct drm_fb_helper_surface_size
215 *sizes)
216{
217 struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
218 int new_fb = 0;
219 int ret;
220
221 if (!helper->fb) {
222 ret = mgag200fb_create(mfbdev, sizes);
223 if (ret)
224 return ret;
225 new_fb = 1;
226 }
227 return new_fb;
228}
229
230static int mga_fbdev_destroy(struct drm_device *dev,
231 struct mga_fbdev *mfbdev)
232{
233 struct fb_info *info;
234 struct mga_framebuffer *mfb = &mfbdev->mfb;
235
236 if (mfbdev->helper.fbdev) {
237 info = mfbdev->helper.fbdev;
238
239 unregister_framebuffer(info);
240 if (info->cmap.len)
241 fb_dealloc_cmap(&info->cmap);
242 framebuffer_release(info);
243 }
244
245 if (mfb->obj) {
246 drm_gem_object_unreference_unlocked(mfb->obj);
247 mfb->obj = NULL;
248 }
249 drm_fb_helper_fini(&mfbdev->helper);
250 vfree(mfbdev->sysram);
251 drm_framebuffer_cleanup(&mfb->base);
252
253 return 0;
254}
255
256static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
257 .gamma_set = mga_crtc_fb_gamma_set,
258 .gamma_get = mga_crtc_fb_gamma_get,
259 .fb_probe = mga_fb_find_or_create_single,
260};
261
262int mgag200_fbdev_init(struct mga_device *mdev)
263{
264 struct mga_fbdev *mfbdev;
265 int ret;
266
267 mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
268 if (!mfbdev)
269 return -ENOMEM;
270
271 mdev->mfbdev = mfbdev;
272 mfbdev->helper.funcs = &mga_fb_helper_funcs;
273
274 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
275 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
276 if (ret) {
277 kfree(mfbdev);
278 return ret;
279 }
280 drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
281 drm_fb_helper_initial_config(&mfbdev->helper, 32);
282
283 return 0;
284}
285
286void mgag200_fbdev_fini(struct mga_device *mdev)
287{
288 if (!mdev->mfbdev)
289 return;
290
291 mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
292 kfree(mdev->mfbdev);
293 mdev->mfbdev = NULL;
294}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 000000000000..dd3568a1b6b0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include <linux/export.h>
29#include <linux/i2c.h>
30#include <linux/i2c-algo-bit.h>
31#include "drmP.h"
32#include "drm.h"
33
34#include "mgag200_drv.h"
35
36static int mga_i2c_read_gpio(struct mga_device *mdev)
37{
38 WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
39 return RREG8(DAC_DATA);
40}
41
42static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
43{
44 int tmp;
45
46 WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
47 tmp = (RREG8(DAC_DATA) & mask) | val;
48 WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
49 WREG_DAC(MGA1064_GEN_IO_DATA, 0);
50}
51
52static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
53{
54 if (state)
55 state = 0;
56 else
57 state = mask;
58 mga_i2c_set_gpio(mdev, ~mask, state);
59}
60
61static void mga_gpio_setsda(void *data, int state)
62{
63 struct mga_i2c_chan *i2c = data;
64 struct mga_device *mdev = i2c->dev->dev_private;
65 mga_i2c_set(mdev, i2c->data, state);
66}
67
68static void mga_gpio_setscl(void *data, int state)
69{
70 struct mga_i2c_chan *i2c = data;
71 struct mga_device *mdev = i2c->dev->dev_private;
72 mga_i2c_set(mdev, i2c->clock, state);
73}
74
75static int mga_gpio_getsda(void *data)
76{
77 struct mga_i2c_chan *i2c = data;
78 struct mga_device *mdev = i2c->dev->dev_private;
79 return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
80}
81
82static int mga_gpio_getscl(void *data)
83{
84 struct mga_i2c_chan *i2c = data;
85 struct mga_device *mdev = i2c->dev->dev_private;
86 return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
87}
88
89struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
90{
91 struct mga_device *mdev = dev->dev_private;
92 struct mga_i2c_chan *i2c;
93 int ret;
94 int data, clock;
95
96 WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
97 WREG_DAC(MGA1064_GEN_IO_CTL, 0);
98
99 switch (mdev->type) {
100 case G200_SE_A:
101 case G200_SE_B:
102 case G200_EV:
103 case G200_WB:
104 data = 1;
105 clock = 2;
106 break;
107 case G200_EH:
108 case G200_ER:
109 data = 2;
110 clock = 1;
111 break;
112 default:
113 data = 2;
114 clock = 8;
115 break;
116 }
117
118 i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
119 if (!i2c)
120 return NULL;
121
122 i2c->data = data;
123 i2c->clock = clock;
124 i2c->adapter.owner = THIS_MODULE;
125 i2c->adapter.class = I2C_CLASS_DDC;
126 i2c->adapter.dev.parent = &dev->pdev->dev;
127 i2c->dev = dev;
128 i2c_set_adapdata(&i2c->adapter, i2c);
129 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
130
131 i2c->adapter.algo_data = &i2c->bit;
132
133 i2c->bit.udelay = 10;
134 i2c->bit.timeout = 2;
135 i2c->bit.data = i2c;
136 i2c->bit.setsda = mga_gpio_setsda;
137 i2c->bit.setscl = mga_gpio_setscl;
138 i2c->bit.getsda = mga_gpio_getsda;
139 i2c->bit.getscl = mga_gpio_getscl;
140
141 ret = i2c_bit_add_bus(&i2c->adapter);
142 if (ret) {
143 kfree(i2c);
144 i2c = NULL;
145 }
146 return i2c;
147}
148
149void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
150{
151 if (!i2c)
152 return;
153 i2c_del_adapter(&i2c->adapter);
154 kfree(i2c);
155}
156
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 000000000000..636a81cd2f37
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright 2010 Matt Turner.
3 * Copyright 2012 Red Hat
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
8 *
9 * Authors: Matthew Garrett
10 * Matt Turner
11 * Dave Airlie
12 */
13#include "drmP.h"
14#include "drm.h"
15#include "drm_crtc_helper.h"
16#include "mgag200_drv.h"
17
18static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
19{
20 struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
21 if (mga_fb->obj)
22 drm_gem_object_unreference_unlocked(mga_fb->obj);
23 drm_framebuffer_cleanup(fb);
24 kfree(fb);
25}
26
27static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
28 struct drm_file *file_priv,
29 unsigned int *handle)
30{
31 return 0;
32}
33
34static const struct drm_framebuffer_funcs mga_fb_funcs = {
35 .destroy = mga_user_framebuffer_destroy,
36 .create_handle = mga_user_framebuffer_create_handle,
37};
38
39int mgag200_framebuffer_init(struct drm_device *dev,
40 struct mga_framebuffer *gfb,
41 struct drm_mode_fb_cmd2 *mode_cmd,
42 struct drm_gem_object *obj)
43{
44 int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
45 if (ret) {
46 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
47 return ret;
48 }
49 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
50 gfb->obj = obj;
51 return 0;
52}
53
54static struct drm_framebuffer *
55mgag200_user_framebuffer_create(struct drm_device *dev,
56 struct drm_file *filp,
57 struct drm_mode_fb_cmd2 *mode_cmd)
58{
59 struct drm_gem_object *obj;
60 struct mga_framebuffer *mga_fb;
61 int ret;
62
63 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
64 if (obj == NULL)
65 return ERR_PTR(-ENOENT);
66
67 mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
68 if (!mga_fb) {
69 drm_gem_object_unreference_unlocked(obj);
70 return ERR_PTR(-ENOMEM);
71 }
72
73 ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
74 if (ret) {
75 drm_gem_object_unreference_unlocked(obj);
76 kfree(mga_fb);
77 return ERR_PTR(ret);
78 }
79 return &mga_fb->base;
80}
81
82static const struct drm_mode_config_funcs mga_mode_funcs = {
83 .fb_create = mgag200_user_framebuffer_create,
84};
85
86/* Unmap the framebuffer from the core and release the memory */
87static void mga_vram_fini(struct mga_device *mdev)
88{
89 pci_iounmap(mdev->dev->pdev, mdev->rmmio);
90 mdev->rmmio = NULL;
91 if (mdev->mc.vram_base)
92 release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
93}
94
95static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
96{
97 int offset;
98 int orig;
99 int test1, test2;
100 int orig1, orig2;
101
102 /* Probe */
103 orig = ioread16(mem);
104 iowrite16(0, mem);
105
106 for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
107 orig1 = ioread8(mem + offset);
108 orig2 = ioread8(mem + offset + 0x100);
109
110 iowrite16(0xaa55, mem + offset);
111 iowrite16(0xaa55, mem + offset + 0x100);
112
113 test1 = ioread16(mem + offset);
114 test2 = ioread16(mem);
115
116 iowrite16(orig1, mem + offset);
117 iowrite16(orig2, mem + offset + 0x100);
118
119 if (test1 != 0xaa55) {
120 break;
121 }
122
123 if (test2) {
124 break;
125 }
126 }
127
128 iowrite16(orig, mem);
129 return offset - 65536;
130}
131
132/* Map the framebuffer from the card and configure the core */
133static int mga_vram_init(struct mga_device *mdev)
134{
135 void __iomem *mem;
136 struct apertures_struct *aper = alloc_apertures(1);
137
138 /* BAR 0 is VRAM */
139 mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
140 mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
141
142 aper->ranges[0].base = mdev->mc.vram_base;
143 aper->ranges[0].size = mdev->mc.vram_window;
144 aper->count = 1;
145
146 remove_conflicting_framebuffers(aper, "mgafb", true);
147
148 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
149 "mgadrmfb_vram")) {
150 DRM_ERROR("can't reserve VRAM\n");
151 return -ENXIO;
152 }
153
154 mem = pci_iomap(mdev->dev->pdev, 0, 0);
155
156 mdev->mc.vram_size = mga_probe_vram(mdev, mem);
157
158 pci_iounmap(mdev->dev->pdev, mem);
159
160 return 0;
161}
162
163static int mgag200_device_init(struct drm_device *dev,
164 uint32_t flags)
165{
166 struct mga_device *mdev = dev->dev_private;
167 int ret, option;
168
169 mdev->type = flags;
170
171 /* Hardcode the number of CRTCs to 1 */
172 mdev->num_crtc = 1;
173
174 pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
175 mdev->has_sdram = !(option & (1 << 14));
176
177 /* BAR 0 is the framebuffer, BAR 1 contains registers */
178 mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
179 mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
180
181 if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
182 "mgadrmfb_mmio")) {
183 DRM_ERROR("can't reserve mmio registers\n");
184 return -ENOMEM;
185 }
186
187 mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
188 if (mdev->rmmio == NULL)
189 return -ENOMEM;
190
191 /* stash G200 SE model number for later use */
192 if (IS_G200_SE(mdev))
193 mdev->reg_1e24 = RREG32(0x1e24);
194
195 ret = mga_vram_init(mdev);
196 if (ret) {
197 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
198 return ret;
199 }
200
201 mdev->bpp_shifts[0] = 0;
202 mdev->bpp_shifts[1] = 1;
203 mdev->bpp_shifts[2] = 0;
204 mdev->bpp_shifts[3] = 2;
205 return 0;
206}
207
208void mgag200_device_fini(struct mga_device *mdev)
209{
210 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
211 mga_vram_fini(mdev);
212}
213
214/*
215 * Functions here will be called by the core once it's bound the driver to
216 * a PCI device
217 */
218
219
220int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
221{
222 struct mga_device *mdev;
223 int r;
224
225 mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
226 if (mdev == NULL)
227 return -ENOMEM;
228 dev->dev_private = (void *)mdev;
229 mdev->dev = dev;
230
231 r = mgag200_device_init(dev, flags);
232 if (r) {
233 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
234 goto out;
235 }
236 r = mgag200_mm_init(mdev);
237 if (r)
238 goto out;
239
240 drm_mode_config_init(dev);
241 dev->mode_config.funcs = (void *)&mga_mode_funcs;
242 dev->mode_config.min_width = 0;
243 dev->mode_config.min_height = 0;
244 dev->mode_config.preferred_depth = 24;
245 dev->mode_config.prefer_shadow = 1;
246
247 r = mgag200_modeset_init(mdev);
248 if (r)
249 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
250out:
251 if (r)
252 mgag200_driver_unload(dev);
253 return r;
254}
255
256int mgag200_driver_unload(struct drm_device *dev)
257{
258 struct mga_device *mdev = dev->dev_private;
259
260 if (mdev == NULL)
261 return 0;
262 mgag200_modeset_fini(mdev);
263 mgag200_fbdev_fini(mdev);
264 drm_mode_config_cleanup(dev);
265 mgag200_mm_fini(mdev);
266 mgag200_device_fini(mdev);
267 kfree(mdev);
268 dev->dev_private = NULL;
269 return 0;
270}
271
272int mgag200_gem_create(struct drm_device *dev,
273 u32 size, bool iskernel,
274 struct drm_gem_object **obj)
275{
276 struct mgag200_bo *astbo;
277 int ret;
278
279 *obj = NULL;
280
281 size = roundup(size, PAGE_SIZE);
282 if (size == 0)
283 return -EINVAL;
284
285 ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
286 if (ret) {
287 if (ret != -ERESTARTSYS)
288 DRM_ERROR("failed to allocate GEM object\n");
289 return ret;
290 }
291 *obj = &astbo->gem;
292 return 0;
293}
294
295int mgag200_dumb_create(struct drm_file *file,
296 struct drm_device *dev,
297 struct drm_mode_create_dumb *args)
298{
299 int ret;
300 struct drm_gem_object *gobj;
301 u32 handle;
302
303 args->pitch = args->width * ((args->bpp + 7) / 8);
304 args->size = args->pitch * args->height;
305
306 ret = mgag200_gem_create(dev, args->size, false,
307 &gobj);
308 if (ret)
309 return ret;
310
311 ret = drm_gem_handle_create(file, gobj, &handle);
312 drm_gem_object_unreference_unlocked(gobj);
313 if (ret)
314 return ret;
315
316 args->handle = handle;
317 return 0;
318}
319
320int mgag200_dumb_destroy(struct drm_file *file,
321 struct drm_device *dev,
322 uint32_t handle)
323{
324 return drm_gem_handle_delete(file, handle);
325}
326
327int mgag200_gem_init_object(struct drm_gem_object *obj)
328{
329 BUG();
330 return 0;
331}
332
333void mgag200_bo_unref(struct mgag200_bo **bo)
334{
335 struct ttm_buffer_object *tbo;
336
337 if ((*bo) == NULL)
338 return;
339
340 tbo = &((*bo)->bo);
341 ttm_bo_unref(&tbo);
342 if (tbo == NULL)
343 *bo = NULL;
344
345}
346
347void mgag200_gem_free_object(struct drm_gem_object *obj)
348{
349 struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
350
351 if (!mgag200_bo)
352 return;
353 mgag200_bo_unref(&mgag200_bo);
354}
355
356
357static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
358{
359 return bo->bo.addr_space_offset;
360}
361
362int
363mgag200_dumb_mmap_offset(struct drm_file *file,
364 struct drm_device *dev,
365 uint32_t handle,
366 uint64_t *offset)
367{
368 struct drm_gem_object *obj;
369 int ret;
370 struct mgag200_bo *bo;
371
372 mutex_lock(&dev->struct_mutex);
373 obj = drm_gem_object_lookup(dev, file, handle);
374 if (obj == NULL) {
375 ret = -ENOENT;
376 goto out_unlock;
377 }
378
379 bo = gem_to_mga_bo(obj);
380 *offset = mgag200_bo_mmap_offset(bo);
381
382 drm_gem_object_unreference(obj);
383 ret = 0;
384out_unlock:
385 mutex_unlock(&dev->struct_mutex);
386 return ret;
387
388}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 000000000000..d303061b251e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1533 @@
1/*
2 * Copyright 2010 Matt Turner.
3 * Copyright 2012 Red Hat
4 *
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
8 *
9 * Authors: Matthew Garrett
10 * Matt Turner
11 * Dave Airlie
12 */
13
14#include <linux/delay.h>
15
16#include "drmP.h"
17#include "drm.h"
18#include "drm_crtc_helper.h"
19
20#include "mgag200_drv.h"
21
22#define MGAG200_LUT_SIZE 256
23
24/*
25 * This file contains setup code for the CRTC.
26 */
27
28static void mga_crtc_load_lut(struct drm_crtc *crtc)
29{
30 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
31 struct drm_device *dev = crtc->dev;
32 struct mga_device *mdev = dev->dev_private;
33 int i;
34
35 if (!crtc->enabled)
36 return;
37
38 WREG8(DAC_INDEX + MGA1064_INDEX, 0);
39
40 for (i = 0; i < MGAG200_LUT_SIZE; i++) {
41 /* VGA registers */
42 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
43 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
44 WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
45 }
46}
47
48static inline void mga_wait_vsync(struct mga_device *mdev)
49{
50 unsigned int count = 0;
51 unsigned int status = 0;
52
53 do {
54 status = RREG32(MGAREG_Status);
55 count++;
56 } while ((status & 0x08) && (count < 250000));
57 count = 0;
58 status = 0;
59 do {
60 status = RREG32(MGAREG_Status);
61 count++;
62 } while (!(status & 0x08) && (count < 250000));
63}
64
65static inline void mga_wait_busy(struct mga_device *mdev)
66{
67 unsigned int count = 0;
68 unsigned int status = 0;
69 do {
70 status = RREG8(MGAREG_Status + 2);
71 count++;
72 } while ((status & 0x01) && (count < 500000));
73}
74
75/*
76 * The core passes the desired mode to the CRTC code to see whether any
77 * CRTC-specific modifications need to be made to it. We're in a position
78 * to just pass that straight through, so this does nothing
79 */
80static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
81 struct drm_display_mode *mode,
82 struct drm_display_mode *adjusted_mode)
83{
84 return true;
85}
86
87static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
88{
89 unsigned int vcomax, vcomin, pllreffreq;
90 unsigned int delta, tmpdelta, permitteddelta;
91 unsigned int testp, testm, testn;
92 unsigned int p, m, n;
93 unsigned int computed;
94
95 m = n = p = 0;
96 vcomax = 320000;
97 vcomin = 160000;
98 pllreffreq = 25000;
99
100 delta = 0xffffffff;
101 permitteddelta = clock * 5 / 1000;
102
103 for (testp = 8; testp > 0; testp /= 2) {
104 if (clock * testp > vcomax)
105 continue;
106 if (clock * testp < vcomin)
107 continue;
108
109 for (testn = 17; testn < 256; testn++) {
110 for (testm = 1; testm < 32; testm++) {
111 computed = (pllreffreq * testn) /
112 (testm * testp);
113 if (computed > clock)
114 tmpdelta = computed - clock;
115 else
116 tmpdelta = clock - computed;
117 if (tmpdelta < delta) {
118 delta = tmpdelta;
119 m = testm - 1;
120 n = testn - 1;
121 p = testp - 1;
122 }
123 }
124 }
125 }
126
127 if (delta > permitteddelta) {
128 printk(KERN_WARNING "PLL delta too large\n");
129 return 1;
130 }
131
132 WREG_DAC(MGA1064_PIX_PLLC_M, m);
133 WREG_DAC(MGA1064_PIX_PLLC_N, n);
134 WREG_DAC(MGA1064_PIX_PLLC_P, p);
135 return 0;
136}
137
138static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
139{
140 unsigned int vcomax, vcomin, pllreffreq;
141 unsigned int delta, tmpdelta, permitteddelta;
142 unsigned int testp, testm, testn;
143 unsigned int p, m, n;
144 unsigned int computed;
145 int i, j, tmpcount, vcount;
146 bool pll_locked = false;
147 u8 tmp;
148
149 m = n = p = 0;
150 vcomax = 550000;
151 vcomin = 150000;
152 pllreffreq = 48000;
153
154 delta = 0xffffffff;
155 permitteddelta = clock * 5 / 1000;
156
157 for (testp = 1; testp < 9; testp++) {
158 if (clock * testp > vcomax)
159 continue;
160 if (clock * testp < vcomin)
161 continue;
162
163 for (testm = 1; testm < 17; testm++) {
164 for (testn = 1; testn < 151; testn++) {
165 computed = (pllreffreq * testn) /
166 (testm * testp);
167 if (computed > clock)
168 tmpdelta = computed - clock;
169 else
170 tmpdelta = clock - computed;
171 if (tmpdelta < delta) {
172 delta = tmpdelta;
173 n = testn - 1;
174 m = (testm - 1) | ((n >> 1) & 0x80);
175 p = testp - 1;
176 }
177 }
178 }
179 }
180
181 for (i = 0; i <= 32 && pll_locked == false; i++) {
182 if (i > 0) {
183 WREG8(MGAREG_CRTC_INDEX, 0x1e);
184 tmp = RREG8(MGAREG_CRTC_DATA);
185 if (tmp < 0xff)
186 WREG8(MGAREG_CRTC_DATA, tmp+1);
187 }
188
189 /* set pixclkdis to 1 */
190 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
191 tmp = RREG8(DAC_DATA);
192 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
193 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
194
195 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
196 tmp = RREG8(DAC_DATA);
197 tmp |= MGA1064_REMHEADCTL_CLKDIS;
198 WREG_DAC(MGA1064_REMHEADCTL, tmp);
199
200 /* select PLL Set C */
201 tmp = RREG8(MGAREG_MEM_MISC_READ);
202 tmp |= 0x3 << 2;
203 WREG8(MGAREG_MEM_MISC_WRITE, tmp);
204
205 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
206 tmp = RREG8(DAC_DATA);
207 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
208 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
209
210 udelay(500);
211
212 /* reset the PLL */
213 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
214 tmp = RREG8(DAC_DATA);
215 tmp &= ~0x04;
216 WREG_DAC(MGA1064_VREF_CTL, tmp);
217
218 udelay(50);
219
220 /* program pixel pll register */
221 WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
222 WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
223 WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
224
225 udelay(50);
226
227 /* turn pll on */
228 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
229 tmp = RREG8(DAC_DATA);
230 tmp |= 0x04;
231 WREG_DAC(MGA1064_VREF_CTL, tmp);
232
233 udelay(500);
234
235 /* select the pixel pll */
236 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
237 tmp = RREG8(DAC_DATA);
238 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
239 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
240 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
241
242 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
243 tmp = RREG8(DAC_DATA);
244 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
245 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
246 WREG_DAC(MGA1064_REMHEADCTL, tmp);
247
248 /* reset dotclock rate bit */
249 WREG8(MGAREG_SEQ_INDEX, 1);
250 tmp = RREG8(MGAREG_SEQ_DATA);
251 tmp &= ~0x8;
252 WREG8(MGAREG_SEQ_DATA, tmp);
253
254 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
255 tmp = RREG8(DAC_DATA);
256 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
257 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
258
259 vcount = RREG8(MGAREG_VCOUNT);
260
261 for (j = 0; j < 30 && pll_locked == false; j++) {
262 tmpcount = RREG8(MGAREG_VCOUNT);
263 if (tmpcount < vcount)
264 vcount = 0;
265 if ((tmpcount - vcount) > 2)
266 pll_locked = true;
267 else
268 udelay(5);
269 }
270 }
271 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
272 tmp = RREG8(DAC_DATA);
273 tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
274 WREG_DAC(MGA1064_REMHEADCTL, tmp);
275 return 0;
276}
277
278static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
279{
280 unsigned int vcomax, vcomin, pllreffreq;
281 unsigned int delta, tmpdelta, permitteddelta;
282 unsigned int testp, testm, testn;
283 unsigned int p, m, n;
284 unsigned int computed;
285 u8 tmp;
286
287 m = n = p = 0;
288 vcomax = 550000;
289 vcomin = 150000;
290 pllreffreq = 50000;
291
292 delta = 0xffffffff;
293 permitteddelta = clock * 5 / 1000;
294
295 for (testp = 16; testp > 0; testp--) {
296 if (clock * testp > vcomax)
297 continue;
298 if (clock * testp < vcomin)
299 continue;
300
301 for (testn = 1; testn < 257; testn++) {
302 for (testm = 1; testm < 17; testm++) {
303 computed = (pllreffreq * testn) /
304 (testm * testp);
305 if (computed > clock)
306 tmpdelta = computed - clock;
307 else
308 tmpdelta = clock - computed;
309 if (tmpdelta < delta) {
310 delta = tmpdelta;
311 n = testn - 1;
312 m = testm - 1;
313 p = testp - 1;
314 }
315 }
316 }
317 }
318
319 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
320 tmp = RREG8(DAC_DATA);
321 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
322 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
323
324 tmp = RREG8(MGAREG_MEM_MISC_READ);
325 tmp |= 0x3 << 2;
326 WREG8(MGAREG_MEM_MISC_WRITE, tmp);
327
328 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
329 tmp = RREG8(DAC_DATA);
330 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
331
332 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
333 tmp = RREG8(DAC_DATA);
334 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
335 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
336
337 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
338 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
339 WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
340
341 udelay(50);
342
343 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
344 tmp = RREG8(DAC_DATA);
345 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
346 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
347
348 udelay(500);
349
350 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
351 tmp = RREG8(DAC_DATA);
352 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
353 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
354 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
355
356 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
357 tmp = RREG8(DAC_DATA);
358 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
359
360 tmp = RREG8(MGAREG_MEM_MISC_READ);
361 tmp |= (0x3 << 2);
362 WREG8(MGAREG_MEM_MISC_WRITE, tmp);
363
364 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
365 tmp = RREG8(DAC_DATA);
366 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
367 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
368
369 return 0;
370}
371
372static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
373{
374 unsigned int vcomax, vcomin, pllreffreq;
375 unsigned int delta, tmpdelta, permitteddelta;
376 unsigned int testp, testm, testn;
377 unsigned int p, m, n;
378 unsigned int computed;
379 int i, j, tmpcount, vcount;
380 u8 tmp;
381 bool pll_locked = false;
382
383 m = n = p = 0;
384 vcomax = 800000;
385 vcomin = 400000;
386 pllreffreq = 3333;
387
388 delta = 0xffffffff;
389 permitteddelta = clock * 5 / 1000;
390
391 for (testp = 16; testp > 0; testp--) {
392 if (clock * testp > vcomax)
393 continue;
394 if (clock * testp < vcomin)
395 continue;
396
397 for (testm = 1; testm < 33; testm++) {
398 for (testn = 1; testn < 257; testn++) {
399 computed = (pllreffreq * testn) /
400 (testm * testp);
401 if (computed > clock)
402 tmpdelta = computed - clock;
403 else
404 tmpdelta = clock - computed;
405 if (tmpdelta < delta) {
406 delta = tmpdelta;
407 n = testn - 1;
408 m = (testm - 1) | ((n >> 1) & 0x80);
409 p = testp - 1;
410 }
411 if ((clock * testp) >= 600000)
412 p |= 80;
413 }
414 }
415 }
416 for (i = 0; i <= 32 && pll_locked == false; i++) {
417 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
418 tmp = RREG8(DAC_DATA);
419 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
420 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
421
422 tmp = RREG8(MGAREG_MEM_MISC_READ);
423 tmp |= 0x3 << 2;
424 WREG8(MGAREG_MEM_MISC_WRITE, tmp);
425
426 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
427 tmp = RREG8(DAC_DATA);
428 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
429 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
430
431 udelay(500);
432
433 WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
434 WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
435 WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
436
437 udelay(500);
438
439 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
440 tmp = RREG8(DAC_DATA);
441 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
442 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
443 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
444
445 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
446 tmp = RREG8(DAC_DATA);
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
448 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
449 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
450
451 vcount = RREG8(MGAREG_VCOUNT);
452
453 for (j = 0; j < 30 && pll_locked == false; j++) {
454 tmpcount = RREG8(MGAREG_VCOUNT);
455 if (tmpcount < vcount)
456 vcount = 0;
457 if ((tmpcount - vcount) > 2)
458 pll_locked = true;
459 else
460 udelay(5);
461 }
462 }
463
464 return 0;
465}
466
467static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
468{
469 unsigned int vcomax, vcomin, pllreffreq;
470 unsigned int delta, tmpdelta;
471 unsigned int testr, testn, testm, testo;
472 unsigned int p, m, n;
473 unsigned int computed;
474 int tmp;
475
476 m = n = p = 0;
477 vcomax = 1488000;
478 vcomin = 1056000;
479 pllreffreq = 48000;
480
481 delta = 0xffffffff;
482
483 for (testr = 0; testr < 4; testr++) {
484 if (delta == 0)
485 break;
486 for (testn = 5; testn < 129; testn++) {
487 if (delta == 0)
488 break;
489 for (testm = 3; testm >= 0; testm--) {
490 if (delta == 0)
491 break;
492 for (testo = 5; testo < 33; testo++) {
493 computed = pllreffreq * (testn + 1) /
494 (testr + 1);
495 if (computed < vcomin)
496 continue;
497 if (computed > vcomax)
498 continue;
499 if (computed > clock)
500 tmpdelta = computed - clock;
501 else
502 tmpdelta = clock - computed;
503 if (tmpdelta < delta) {
504 delta = tmpdelta;
505 m = testm | (testo << 3);
506 n = testn;
507 p = testr | (testr << 3);
508 }
509 }
510 }
511 }
512 }
513
514 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
515 tmp = RREG8(DAC_DATA);
516 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
517 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
518
519 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
520 tmp = RREG8(DAC_DATA);
521 tmp |= MGA1064_REMHEADCTL_CLKDIS;
522 WREG_DAC(MGA1064_REMHEADCTL, tmp);
523
524 tmp = RREG8(MGAREG_MEM_MISC_READ);
525 tmp |= (0x3<<2) | 0xc0;
526 WREG8(MGAREG_MEM_MISC_WRITE, tmp);
527
528 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
529 tmp = RREG8(DAC_DATA);
530 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
531 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
532 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
533
534 udelay(500);
535
536 WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
537 WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
538 WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
539
540 udelay(50);
541
542 return 0;
543}
544
545static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
546{
547 switch(mdev->type) {
548 case G200_SE_A:
549 case G200_SE_B:
550 return mga_g200se_set_plls(mdev, clock);
551 break;
552 case G200_WB:
553 return mga_g200wb_set_plls(mdev, clock);
554 break;
555 case G200_EV:
556 return mga_g200ev_set_plls(mdev, clock);
557 break;
558 case G200_EH:
559 return mga_g200eh_set_plls(mdev, clock);
560 break;
561 case G200_ER:
562 return mga_g200er_set_plls(mdev, clock);
563 break;
564 }
565 return 0;
566}
567
568static void mga_g200wb_prepare(struct drm_crtc *crtc)
569{
570 struct mga_device *mdev = crtc->dev->dev_private;
571 u8 tmp;
572 int iter_max;
573
574 /* 1- The first step is to warn the BMC of an upcoming mode change.
575 * We are putting the misc<0> to output.*/
576
577 WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
578 tmp = RREG8(DAC_DATA);
579 tmp |= 0x10;
580 WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
581
582 /* we are putting a 1 on the misc<0> line */
583 WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
584 tmp = RREG8(DAC_DATA);
585 tmp |= 0x10;
586 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
587
588 /* 2- Second step to mask and further scan request
589 * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
590 */
591 WREG8(DAC_INDEX, MGA1064_SPAREREG);
592 tmp = RREG8(DAC_DATA);
593 tmp |= 0x80;
594 WREG_DAC(MGA1064_SPAREREG, tmp);
595
596 /* 3a- the third step is to verifu if there is an active scan
597 * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
598 */
599 iter_max = 300;
600 while (!(tmp & 0x1) && iter_max) {
601 WREG8(DAC_INDEX, MGA1064_SPAREREG);
602 tmp = RREG8(DAC_DATA);
603 udelay(1000);
604 iter_max--;
605 }
606
607 /* 3b- this step occurs only if the remove is actually scanning
608 * we are waiting for the end of the frame which is a 1 on
609 * remvsyncsts (XSPAREREG<1>)
610 */
611 if (iter_max) {
612 iter_max = 300;
613 while ((tmp & 0x2) && iter_max) {
614 WREG8(DAC_INDEX, MGA1064_SPAREREG);
615 tmp = RREG8(DAC_DATA);
616 udelay(1000);
617 iter_max--;
618 }
619 }
620}
621
622static void mga_g200wb_commit(struct drm_crtc *crtc)
623{
624 u8 tmp;
625 struct mga_device *mdev = crtc->dev->dev_private;
626
627 /* 1- The first step is to ensure that the vrsten and hrsten are set */
628 WREG8(MGAREG_CRTCEXT_INDEX, 1);
629 tmp = RREG8(MGAREG_CRTCEXT_DATA);
630 WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
631
632 /* 2- second step is to assert the rstlvl2 */
633 WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
634 tmp = RREG8(DAC_DATA);
635 tmp |= 0x8;
636 WREG8(DAC_DATA, tmp);
637
638 /* wait 10 us */
639 udelay(10);
640
641 /* 3- deassert rstlvl2 */
642 tmp &= ~0x08;
643 WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
644 WREG8(DAC_DATA, tmp);
645
646 /* 4- remove mask of scan request */
647 WREG8(DAC_INDEX, MGA1064_SPAREREG);
648 tmp = RREG8(DAC_DATA);
649 tmp &= ~0x80;
650 WREG8(DAC_DATA, tmp);
651
652 /* 5- put back a 0 on the misc<0> line */
653 WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
654 tmp = RREG8(DAC_DATA);
655 tmp &= ~0x10;
656 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
657}
658
659
660void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
661{
662 struct mga_device *mdev = crtc->dev->dev_private;
663 u32 addr;
664 int count;
665
666 while (RREG8(0x1fda) & 0x08);
667 while (!(RREG8(0x1fda) & 0x08));
668
669 count = RREG8(MGAREG_VCOUNT) + 2;
670 while (RREG8(MGAREG_VCOUNT) < count);
671
672 addr = offset >> 2;
673 WREG_CRT(0x0d, (u8)(addr & 0xff));
674 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
675 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
676}
677
678
679/* ast is different - we will force move buffers out of VRAM */
680static int mga_crtc_do_set_base(struct drm_crtc *crtc,
681 struct drm_framebuffer *fb,
682 int x, int y, int atomic)
683{
684 struct mga_device *mdev = crtc->dev->dev_private;
685 struct drm_gem_object *obj;
686 struct mga_framebuffer *mga_fb;
687 struct mgag200_bo *bo;
688 int ret;
689 u64 gpu_addr;
690
691 /* push the previous fb to system ram */
692 if (!atomic && fb) {
693 mga_fb = to_mga_framebuffer(fb);
694 obj = mga_fb->obj;
695 bo = gem_to_mga_bo(obj);
696 ret = mgag200_bo_reserve(bo, false);
697 if (ret)
698 return ret;
699 mgag200_bo_push_sysram(bo);
700 mgag200_bo_unreserve(bo);
701 }
702
703 mga_fb = to_mga_framebuffer(crtc->fb);
704 obj = mga_fb->obj;
705 bo = gem_to_mga_bo(obj);
706
707 ret = mgag200_bo_reserve(bo, false);
708 if (ret)
709 return ret;
710
711 ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
712 if (ret) {
713 mgag200_bo_unreserve(bo);
714 return ret;
715 }
716
717 if (&mdev->mfbdev->mfb == mga_fb) {
718 /* if pushing console in kmap it */
719 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
720 if (ret)
721 DRM_ERROR("failed to kmap fbcon\n");
722
723 }
724 mgag200_bo_unreserve(bo);
725
726 DRM_INFO("mga base %llx\n", gpu_addr);
727
728 mga_set_start_address(crtc, (u32)gpu_addr);
729
730 return 0;
731}
732
733static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
734 struct drm_framebuffer *old_fb)
735{
736 return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
737}
738
739static int mga_crtc_mode_set(struct drm_crtc *crtc,
740 struct drm_display_mode *mode,
741 struct drm_display_mode *adjusted_mode,
742 int x, int y, struct drm_framebuffer *old_fb)
743{
744 struct drm_device *dev = crtc->dev;
745 struct mga_device *mdev = dev->dev_private;
746 int hdisplay, hsyncstart, hsyncend, htotal;
747 int vdisplay, vsyncstart, vsyncend, vtotal;
748 int pitch;
749 int option = 0, option2 = 0;
750 int i;
751 unsigned char misc = 0;
752 unsigned char ext_vga[6];
753 unsigned char ext_vga_index24;
754 unsigned char dac_index90 = 0;
755 u8 bppshift;
756
757 static unsigned char dacvalue[] = {
758 /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
759 /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
760 /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
761 /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
762 /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
763 /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
764 /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
765 /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
766 /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
767 /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
768 };
769
770 bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
771
772 switch (mdev->type) {
773 case G200_SE_A:
774 case G200_SE_B:
775 dacvalue[MGA1064_VREF_CTL] = 0x03;
776 dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
777 dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
778 MGA1064_MISC_CTL_VGA8 |
779 MGA1064_MISC_CTL_DAC_RAM_CS;
780 if (mdev->has_sdram)
781 option = 0x40049120;
782 else
783 option = 0x4004d120;
784 option2 = 0x00008000;
785 break;
786 case G200_WB:
787 dacvalue[MGA1064_VREF_CTL] = 0x07;
788 option = 0x41049120;
789 option2 = 0x0000b000;
790 break;
791 case G200_EV:
792 dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
793 dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
794 MGA1064_MISC_CTL_DAC_RAM_CS;
795 option = 0x00000120;
796 option2 = 0x0000b000;
797 break;
798 case G200_EH:
799 dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
800 MGA1064_MISC_CTL_DAC_RAM_CS;
801 option = 0x00000120;
802 option2 = 0x0000b000;
803 break;
804 case G200_ER:
805 dac_index90 = 0;
806 break;
807 }
808
809 switch (crtc->fb->bits_per_pixel) {
810 case 8:
811 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
812 break;
813 case 16:
814 if (crtc->fb->depth == 15)
815 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
816 else
817 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
818 break;
819 case 24:
820 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
821 break;
822 case 32:
823 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
824 break;
825 }
826
827 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
828 misc |= 0x40;
829 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
830 misc |= 0x80;
831
832
833 for (i = 0; i < sizeof(dacvalue); i++) {
834 if ((i <= 0x03) ||
835 (i == 0x07) ||
836 (i == 0x0b) ||
837 (i == 0x0f) ||
838 ((i >= 0x13) && (i <= 0x17)) ||
839 (i == 0x1b) ||
840 (i == 0x1c) ||
841 ((i >= 0x1f) && (i <= 0x29)) ||
842 ((i >= 0x30) && (i <= 0x37)))
843 continue;
844 if (IS_G200_SE(mdev) &&
845 ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
846 continue;
847 if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
848 (i >= 0x44) && (i <= 0x4e))
849 continue;
850
851 WREG_DAC(i, dacvalue[i]);
852 }
853
854 if (mdev->type == G200_ER) {
855 WREG_DAC(0x90, dac_index90);
856 }
857
858
859 if (option)
860 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
861 if (option2)
862 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
863
864 WREG_SEQ(2, 0xf);
865 WREG_SEQ(3, 0);
866 WREG_SEQ(4, 0xe);
867
868 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
869 if (crtc->fb->bits_per_pixel == 24)
870 pitch = pitch >> (4 - bppshift);
871 else
872 pitch = pitch >> (4 - bppshift);
873
874 hdisplay = mode->hdisplay / 8 - 1;
875 hsyncstart = mode->hsync_start / 8 - 1;
876 hsyncend = mode->hsync_end / 8 - 1;
877 htotal = mode->htotal / 8 - 1;
878
879 /* Work around hardware quirk */
880 if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
881 htotal++;
882
883 vdisplay = mode->vdisplay - 1;
884 vsyncstart = mode->vsync_start - 1;
885 vsyncend = mode->vsync_end - 1;
886 vtotal = mode->vtotal - 2;
887
888 WREG_GFX(0, 0);
889 WREG_GFX(1, 0);
890 WREG_GFX(2, 0);
891 WREG_GFX(3, 0);
892 WREG_GFX(4, 0);
893 WREG_GFX(5, 0x40);
894 WREG_GFX(6, 0x5);
895 WREG_GFX(7, 0xf);
896 WREG_GFX(8, 0xf);
897
898 WREG_CRT(0, htotal - 4);
899 WREG_CRT(1, hdisplay);
900 WREG_CRT(2, hdisplay);
901 WREG_CRT(3, (htotal & 0x1F) | 0x80);
902 WREG_CRT(4, hsyncstart);
903 WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
904 WREG_CRT(6, vtotal & 0xFF);
905 WREG_CRT(7, ((vtotal & 0x100) >> 8) |
906 ((vdisplay & 0x100) >> 7) |
907 ((vsyncstart & 0x100) >> 6) |
908 ((vdisplay & 0x100) >> 5) |
909 ((vdisplay & 0x100) >> 4) | /* linecomp */
910 ((vtotal & 0x200) >> 4)|
911 ((vdisplay & 0x200) >> 3) |
912 ((vsyncstart & 0x200) >> 2));
913 WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
914 ((vdisplay & 0x200) >> 3));
915 WREG_CRT(10, 0);
916 WREG_CRT(11, 0);
917 WREG_CRT(12, 0);
918 WREG_CRT(13, 0);
919 WREG_CRT(14, 0);
920 WREG_CRT(15, 0);
921 WREG_CRT(16, vsyncstart & 0xFF);
922 WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
923 WREG_CRT(18, vdisplay & 0xFF);
924 WREG_CRT(19, pitch & 0xFF);
925 WREG_CRT(20, 0);
926 WREG_CRT(21, vdisplay & 0xFF);
927 WREG_CRT(22, (vtotal + 1) & 0xFF);
928 WREG_CRT(23, 0xc3);
929 WREG_CRT(24, vdisplay & 0xFF);
930
931 ext_vga[0] = 0;
932 ext_vga[5] = 0;
933
934 /* TODO interlace */
935
936 ext_vga[0] |= (pitch & 0x300) >> 4;
937 ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
938 ((hdisplay & 0x100) >> 7) |
939 ((hsyncstart & 0x100) >> 6) |
940 (htotal & 0x40);
941 ext_vga[2] = ((vtotal & 0xc00) >> 10) |
942 ((vdisplay & 0x400) >> 8) |
943 ((vdisplay & 0xc00) >> 7) |
944 ((vsyncstart & 0xc00) >> 5) |
945 ((vdisplay & 0x400) >> 3);
946 if (crtc->fb->bits_per_pixel == 24)
947 ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
948 else
949 ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
950 ext_vga[4] = 0;
951 if (mdev->type == G200_WB)
952 ext_vga[1] |= 0x88;
953
954 ext_vga_index24 = 0x05;
955
956 /* Set pixel clocks */
957 misc = 0x2d;
958 WREG8(MGA_MISC_OUT, misc);
959
960 mga_crtc_set_plls(mdev, mode->clock);
961
962 for (i = 0; i < 6; i++) {
963 WREG_ECRT(i, ext_vga[i]);
964 }
965
966 if (mdev->type == G200_ER)
967 WREG_ECRT(24, ext_vga_index24);
968
969 if (mdev->type == G200_EV) {
970 WREG_ECRT(6, 0);
971 }
972
973 WREG_ECRT(0, ext_vga[0]);
974 /* Enable mga pixel clock */
975 misc = 0x2d;
976
977 WREG8(MGA_MISC_OUT, misc);
978
979 if (adjusted_mode)
980 memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
981
982 mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
983
984 /* reset tagfifo */
985 if (mdev->type == G200_ER) {
986 u32 mem_ctl = RREG32(MGAREG_MEMCTL);
987 u8 seq1;
988
989 /* screen off */
990 WREG8(MGAREG_SEQ_INDEX, 0x01);
991 seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
992 WREG8(MGAREG_SEQ_DATA, seq1);
993
994 WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
995 udelay(1000);
996 WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
997
998 WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
999 }
1000
1001
1002 if (IS_G200_SE(mdev)) {
1003 if (mdev->reg_1e24 >= 0x02) {
1004 u8 hi_pri_lvl;
1005 u32 bpp;
1006 u32 mb;
1007
1008 if (crtc->fb->bits_per_pixel > 16)
1009 bpp = 32;
1010 else if (crtc->fb->bits_per_pixel > 8)
1011 bpp = 16;
1012 else
1013 bpp = 8;
1014
1015 mb = (mode->clock * bpp) / 1000;
1016 if (mb > 3100)
1017 hi_pri_lvl = 0;
1018 else if (mb > 2600)
1019 hi_pri_lvl = 1;
1020 else if (mb > 1900)
1021 hi_pri_lvl = 2;
1022 else if (mb > 1160)
1023 hi_pri_lvl = 3;
1024 else if (mb > 440)
1025 hi_pri_lvl = 4;
1026 else
1027 hi_pri_lvl = 5;
1028
1029 WREG8(0x1fde, 0x06);
1030 WREG8(0x1fdf, hi_pri_lvl);
1031 } else {
1032 if (mdev->reg_1e24 >= 0x01)
1033 WREG8(0x1fdf, 0x03);
1034 else
1035 WREG8(0x1fdf, 0x04);
1036 }
1037 }
1038 return 0;
1039}
1040
1041#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
1042static int mga_suspend(struct drm_crtc *crtc)
1043{
1044 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1045 struct drm_device *dev = crtc->dev;
1046 struct mga_device *mdev = dev->dev_private;
1047 struct pci_dev *pdev = dev->pdev;
1048 int option;
1049
1050 if (mdev->suspended)
1051 return 0;
1052
1053 WREG_SEQ(1, 0x20);
1054 WREG_ECRT(1, 0x30);
1055 /* Disable the pixel clock */
1056 WREG_DAC(0x1a, 0x05);
1057 /* Power down the DAC */
1058 WREG_DAC(0x1e, 0x18);
1059 /* Power down the pixel PLL */
1060 WREG_DAC(0x1a, 0x0d);
1061
1062 /* Disable PLLs and clocks */
1063 pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
1064 option &= ~(0x1F8024);
1065 pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
1066 pci_set_power_state(pdev, PCI_D3hot);
1067 pci_disable_device(pdev);
1068
1069 mdev->suspended = true;
1070
1071 return 0;
1072}
1073
1074static int mga_resume(struct drm_crtc *crtc)
1075{
1076 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1077 struct drm_device *dev = crtc->dev;
1078 struct mga_device *mdev = dev->dev_private;
1079 struct pci_dev *pdev = dev->pdev;
1080 int option;
1081
1082 if (!mdev->suspended)
1083 return 0;
1084
1085 pci_set_power_state(pdev, PCI_D0);
1086 pci_enable_device(pdev);
1087
1088 /* Disable sysclk */
1089 pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
1090 option &= ~(0x4);
1091 pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
1092
1093 mdev->suspended = false;
1094
1095 return 0;
1096}
1097
1098#endif
1099
1100static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
1101{
1102 struct drm_device *dev = crtc->dev;
1103 struct mga_device *mdev = dev->dev_private;
1104 u8 seq1 = 0, crtcext1 = 0;
1105
1106 switch (mode) {
1107 case DRM_MODE_DPMS_ON:
1108 seq1 = 0;
1109 crtcext1 = 0;
1110 mga_crtc_load_lut(crtc);
1111 break;
1112 case DRM_MODE_DPMS_STANDBY:
1113 seq1 = 0x20;
1114 crtcext1 = 0x10;
1115 break;
1116 case DRM_MODE_DPMS_SUSPEND:
1117 seq1 = 0x20;
1118 crtcext1 = 0x20;
1119 break;
1120 case DRM_MODE_DPMS_OFF:
1121 seq1 = 0x20;
1122 crtcext1 = 0x30;
1123 break;
1124 }
1125
1126#if 0
1127 if (mode == DRM_MODE_DPMS_OFF) {
1128 mga_suspend(crtc);
1129 }
1130#endif
1131 WREG8(MGAREG_SEQ_INDEX, 0x01);
1132 seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
1133 mga_wait_vsync(mdev);
1134 mga_wait_busy(mdev);
1135 WREG8(MGAREG_SEQ_DATA, seq1);
1136 msleep(20);
1137 WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
1138 crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
1139 WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
1140
1141#if 0
1142 if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
1143 mga_resume(crtc);
1144 drm_helper_resume_force_mode(dev);
1145 }
1146#endif
1147}
1148
1149/*
1150 * This is called before a mode is programmed. A typical use might be to
1151 * enable DPMS during the programming to avoid seeing intermediate stages,
1152 * but that's not relevant to us
1153 */
1154static void mga_crtc_prepare(struct drm_crtc *crtc)
1155{
1156 struct drm_device *dev = crtc->dev;
1157 struct mga_device *mdev = dev->dev_private;
1158 u8 tmp;
1159
1160 /* mga_resume(crtc);*/
1161
1162 WREG8(MGAREG_CRTC_INDEX, 0x11);
1163 tmp = RREG8(MGAREG_CRTC_DATA);
1164 WREG_CRT(0x11, tmp | 0x80);
1165
1166 if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
1167 WREG_SEQ(0, 1);
1168 msleep(50);
1169 WREG_SEQ(1, 0x20);
1170 msleep(20);
1171 } else {
1172 WREG8(MGAREG_SEQ_INDEX, 0x1);
1173 tmp = RREG8(MGAREG_SEQ_DATA);
1174
1175 /* start sync reset */
1176 WREG_SEQ(0, 1);
1177 WREG_SEQ(1, tmp | 0x20);
1178 }
1179
1180 if (mdev->type == G200_WB)
1181 mga_g200wb_prepare(crtc);
1182
1183 WREG_CRT(17, 0);
1184}
1185
1186/*
1187 * This is called after a mode is programmed. It should reverse anything done
1188 * by the prepare function
1189 */
1190static void mga_crtc_commit(struct drm_crtc *crtc)
1191{
1192 struct drm_device *dev = crtc->dev;
1193 struct mga_device *mdev = dev->dev_private;
1194 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1195 u8 tmp;
1196
1197 if (mdev->type == G200_WB)
1198 mga_g200wb_commit(crtc);
1199
1200 if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
1201 msleep(50);
1202 WREG_SEQ(1, 0x0);
1203 msleep(20);
1204 WREG_SEQ(0, 0x3);
1205 } else {
1206 WREG8(MGAREG_SEQ_INDEX, 0x1);
1207 tmp = RREG8(MGAREG_SEQ_DATA);
1208
1209 tmp &= ~0x20;
1210 WREG_SEQ(0x1, tmp);
1211 WREG_SEQ(0, 3);
1212 }
1213 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1214}
1215
1216/*
1217 * The core can pass us a set of gamma values to program. We actually only
1218 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
1219 * but it's a requirement that we provide the function
1220 */
1221static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1222 u16 *blue, uint32_t start, uint32_t size)
1223{
1224 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1225 int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
1226 int i;
1227
1228 for (i = start; i < end; i++) {
1229 mga_crtc->lut_r[i] = red[i] >> 8;
1230 mga_crtc->lut_g[i] = green[i] >> 8;
1231 mga_crtc->lut_b[i] = blue[i] >> 8;
1232 }
1233 mga_crtc_load_lut(crtc);
1234}
1235
1236/* Simple cleanup function */
1237static void mga_crtc_destroy(struct drm_crtc *crtc)
1238{
1239 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1240
1241 drm_crtc_cleanup(crtc);
1242 kfree(mga_crtc);
1243}
1244
1245/* These provide the minimum set of functions required to handle a CRTC */
1246static const struct drm_crtc_funcs mga_crtc_funcs = {
1247 .gamma_set = mga_crtc_gamma_set,
1248 .set_config = drm_crtc_helper_set_config,
1249 .destroy = mga_crtc_destroy,
1250};
1251
1252static const struct drm_crtc_helper_funcs mga_helper_funcs = {
1253 .dpms = mga_crtc_dpms,
1254 .mode_fixup = mga_crtc_mode_fixup,
1255 .mode_set = mga_crtc_mode_set,
1256 .mode_set_base = mga_crtc_mode_set_base,
1257 .prepare = mga_crtc_prepare,
1258 .commit = mga_crtc_commit,
1259 .load_lut = mga_crtc_load_lut,
1260};
1261
1262/* CRTC setup */
1263static void mga_crtc_init(struct drm_device *dev)
1264{
1265 struct mga_device *mdev = dev->dev_private;
1266 struct mga_crtc *mga_crtc;
1267 int i;
1268
1269 mga_crtc = kzalloc(sizeof(struct mga_crtc) +
1270 (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
1271 GFP_KERNEL);
1272
1273 if (mga_crtc == NULL)
1274 return;
1275
1276 drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
1277
1278 drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
1279 mdev->mode_info.crtc = mga_crtc;
1280
1281 for (i = 0; i < MGAG200_LUT_SIZE; i++) {
1282 mga_crtc->lut_r[i] = i;
1283 mga_crtc->lut_g[i] = i;
1284 mga_crtc->lut_b[i] = i;
1285 }
1286
1287 drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
1288}
1289
1290/** Sets the color ramps on behalf of fbcon */
1291void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
1292 u16 blue, int regno)
1293{
1294 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1295
1296 mga_crtc->lut_r[regno] = red >> 8;
1297 mga_crtc->lut_g[regno] = green >> 8;
1298 mga_crtc->lut_b[regno] = blue >> 8;
1299}
1300
1301/** Gets the color ramps on behalf of fbcon */
1302void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
1303 u16 *blue, int regno)
1304{
1305 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1306
1307 *red = (u16)mga_crtc->lut_r[regno] << 8;
1308 *green = (u16)mga_crtc->lut_g[regno] << 8;
1309 *blue = (u16)mga_crtc->lut_b[regno] << 8;
1310}
1311
1312/*
1313 * The encoder comes after the CRTC in the output pipeline, but before
1314 * the connector. It's responsible for ensuring that the digital
1315 * stream is appropriately converted into the output format. Setup is
1316 * very simple in this case - all we have to do is inform qemu of the
1317 * colour depth in order to ensure that it displays appropriately
1318 */
1319
1320/*
1321 * These functions are analagous to those in the CRTC code, but are intended
1322 * to handle any encoder-specific limitations
1323 */
1324static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
1325 struct drm_display_mode *mode,
1326 struct drm_display_mode *adjusted_mode)
1327{
1328 return true;
1329}
1330
1331static void mga_encoder_mode_set(struct drm_encoder *encoder,
1332 struct drm_display_mode *mode,
1333 struct drm_display_mode *adjusted_mode)
1334{
1335
1336}
1337
1338static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
1339{
1340 return;
1341}
1342
1343static void mga_encoder_prepare(struct drm_encoder *encoder)
1344{
1345}
1346
1347static void mga_encoder_commit(struct drm_encoder *encoder)
1348{
1349}
1350
1351void mga_encoder_destroy(struct drm_encoder *encoder)
1352{
1353 struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
1354 drm_encoder_cleanup(encoder);
1355 kfree(mga_encoder);
1356}
1357
1358static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
1359 .dpms = mga_encoder_dpms,
1360 .mode_fixup = mga_encoder_mode_fixup,
1361 .mode_set = mga_encoder_mode_set,
1362 .prepare = mga_encoder_prepare,
1363 .commit = mga_encoder_commit,
1364};
1365
1366static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
1367 .destroy = mga_encoder_destroy,
1368};
1369
1370static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
1371{
1372 struct drm_encoder *encoder;
1373 struct mga_encoder *mga_encoder;
1374
1375 mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
1376 if (!mga_encoder)
1377 return NULL;
1378
1379 encoder = &mga_encoder->base;
1380 encoder->possible_crtcs = 0x1;
1381
1382 drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
1383 DRM_MODE_ENCODER_DAC);
1384 drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
1385
1386 return encoder;
1387}
1388
1389
1390static int mga_vga_get_modes(struct drm_connector *connector)
1391{
1392 struct mga_connector *mga_connector = to_mga_connector(connector);
1393 struct edid *edid;
1394 int ret = 0;
1395
1396 edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
1397 if (edid) {
1398 drm_mode_connector_update_edid_property(connector, edid);
1399 ret = drm_add_edid_modes(connector, edid);
1400 connector->display_info.raw_edid = NULL;
1401 kfree(edid);
1402 }
1403 return ret;
1404}
1405
1406static int mga_vga_mode_valid(struct drm_connector *connector,
1407 struct drm_display_mode *mode)
1408{
1409 /* FIXME: Add bandwidth and g200se limitations */
1410
1411 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
1412 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
1413 mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
1414 mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
1415 return MODE_BAD;
1416 }
1417
1418 return MODE_OK;
1419}
1420
1421struct drm_encoder *mga_connector_best_encoder(struct drm_connector
1422 *connector)
1423{
1424 int enc_id = connector->encoder_ids[0];
1425 struct drm_mode_object *obj;
1426 struct drm_encoder *encoder;
1427
1428 /* pick the encoder ids */
1429 if (enc_id) {
1430 obj =
1431 drm_mode_object_find(connector->dev, enc_id,
1432 DRM_MODE_OBJECT_ENCODER);
1433 if (!obj)
1434 return NULL;
1435 encoder = obj_to_encoder(obj);
1436 return encoder;
1437 }
1438 return NULL;
1439}
1440
1441static enum drm_connector_status mga_vga_detect(struct drm_connector
1442 *connector, bool force)
1443{
1444 return connector_status_connected;
1445}
1446
1447static void mga_connector_destroy(struct drm_connector *connector)
1448{
1449 struct mga_connector *mga_connector = to_mga_connector(connector);
1450 mgag200_i2c_destroy(mga_connector->i2c);
1451 drm_connector_cleanup(connector);
1452 kfree(connector);
1453}
1454
1455struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
1456 .get_modes = mga_vga_get_modes,
1457 .mode_valid = mga_vga_mode_valid,
1458 .best_encoder = mga_connector_best_encoder,
1459};
1460
1461struct drm_connector_funcs mga_vga_connector_funcs = {
1462 .dpms = drm_helper_connector_dpms,
1463 .detect = mga_vga_detect,
1464 .fill_modes = drm_helper_probe_single_connector_modes,
1465 .destroy = mga_connector_destroy,
1466};
1467
1468static struct drm_connector *mga_vga_init(struct drm_device *dev)
1469{
1470 struct drm_connector *connector;
1471 struct mga_connector *mga_connector;
1472
1473 mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
1474 if (!mga_connector)
1475 return NULL;
1476
1477 connector = &mga_connector->base;
1478
1479 drm_connector_init(dev, connector,
1480 &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
1481
1482 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
1483
1484 mga_connector->i2c = mgag200_i2c_create(dev);
1485 if (!mga_connector->i2c)
1486 DRM_ERROR("failed to add ddc bus\n");
1487
1488 return connector;
1489}
1490
1491
1492int mgag200_modeset_init(struct mga_device *mdev)
1493{
1494 struct drm_encoder *encoder;
1495 struct drm_connector *connector;
1496 int ret;
1497
1498 mdev->mode_info.mode_config_initialized = true;
1499
1500 mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
1501 mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
1502
1503 mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
1504
1505 mga_crtc_init(mdev->dev);
1506
1507 encoder = mga_encoder_init(mdev->dev);
1508 if (!encoder) {
1509 DRM_ERROR("mga_encoder_init failed\n");
1510 return -1;
1511 }
1512
1513 connector = mga_vga_init(mdev->dev);
1514 if (!connector) {
1515 DRM_ERROR("mga_vga_init failed\n");
1516 return -1;
1517 }
1518
1519 drm_mode_connector_attach_encoder(connector, encoder);
1520
1521 ret = mgag200_fbdev_init(mdev);
1522 if (ret) {
1523 DRM_ERROR("mga_fbdev_init failed\n");
1524 return ret;
1525 }
1526
1527 return 0;
1528}
1529
1530void mgag200_modeset_fini(struct mga_device *mdev)
1531{
1532
1533}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 000000000000..fb24d8655feb
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
1/*
2 * MGA Millennium (MGA2064W) functions
3 * MGA Mystique (MGA1064SG) functions
4 *
5 * Copyright 1996 The XFree86 Project, Inc.
6 *
7 * Authors
8 * Dirk Hohndel
9 * hohndel@XFree86.Org
10 * David Dawes
11 * dawes@XFree86.Org
12 * Contributors:
13 * Guy DESBIEF, Aix-en-provence, France
14 * g.desbief@aix.pacwan.net
15 * MGA1064SG Mystique register file
16 */
17
18
19#ifndef _MGA_REG_H_
20#define _MGA_REG_H_
21
22#define MGAREG_DWGCTL 0x1c00
23#define MGAREG_MACCESS 0x1c04
24/* the following is a mystique only register */
25#define MGAREG_MCTLWTST 0x1c08
26#define MGAREG_ZORG 0x1c0c
27
28#define MGAREG_PAT0 0x1c10
29#define MGAREG_PAT1 0x1c14
30#define MGAREG_PLNWT 0x1c1c
31
32#define MGAREG_BCOL 0x1c20
33#define MGAREG_FCOL 0x1c24
34
35#define MGAREG_SRC0 0x1c30
36#define MGAREG_SRC1 0x1c34
37#define MGAREG_SRC2 0x1c38
38#define MGAREG_SRC3 0x1c3c
39
40#define MGAREG_XYSTRT 0x1c40
41#define MGAREG_XYEND 0x1c44
42
43#define MGAREG_SHIFT 0x1c50
44/* the following is a mystique only register */
45#define MGAREG_DMAPAD 0x1c54
46#define MGAREG_SGN 0x1c58
47#define MGAREG_LEN 0x1c5c
48
49#define MGAREG_AR0 0x1c60
50#define MGAREG_AR1 0x1c64
51#define MGAREG_AR2 0x1c68
52#define MGAREG_AR3 0x1c6c
53#define MGAREG_AR4 0x1c70
54#define MGAREG_AR5 0x1c74
55#define MGAREG_AR6 0x1c78
56
57#define MGAREG_CXBNDRY 0x1c80
58#define MGAREG_FXBNDRY 0x1c84
59#define MGAREG_YDSTLEN 0x1c88
60#define MGAREG_PITCH 0x1c8c
61
62#define MGAREG_YDST 0x1c90
63#define MGAREG_YDSTORG 0x1c94
64#define MGAREG_YTOP 0x1c98
65#define MGAREG_YBOT 0x1c9c
66
67#define MGAREG_CXLEFT 0x1ca0
68#define MGAREG_CXRIGHT 0x1ca4
69#define MGAREG_FXLEFT 0x1ca8
70#define MGAREG_FXRIGHT 0x1cac
71
72#define MGAREG_XDST 0x1cb0
73
74#define MGAREG_DR0 0x1cc0
75#define MGAREG_DR1 0x1cc4
76#define MGAREG_DR2 0x1cc8
77#define MGAREG_DR3 0x1ccc
78
79#define MGAREG_DR4 0x1cd0
80#define MGAREG_DR5 0x1cd4
81#define MGAREG_DR6 0x1cd8
82#define MGAREG_DR7 0x1cdc
83
84#define MGAREG_DR8 0x1ce0
85#define MGAREG_DR9 0x1ce4
86#define MGAREG_DR10 0x1ce8
87#define MGAREG_DR11 0x1cec
88
89#define MGAREG_DR12 0x1cf0
90#define MGAREG_DR13 0x1cf4
91#define MGAREG_DR14 0x1cf8
92#define MGAREG_DR15 0x1cfc
93
94#define MGAREG_SRCORG 0x2cb4
95#define MGAREG_DSTORG 0x2cb8
96
97/* add or or this to one of the previous "power registers" to start
98 the drawing engine */
99
100#define MGAREG_EXEC 0x0100
101
102#define MGAREG_FIFOSTATUS 0x1e10
103#define MGAREG_Status 0x1e14
104#define MGAREG_CACHEFLUSH 0x1fff
105#define MGAREG_ICLEAR 0x1e18
106#define MGAREG_IEN 0x1e1c
107
108#define MGAREG_VCOUNT 0x1e20
109
110#define MGAREG_Reset 0x1e40
111
112#define MGAREG_OPMODE 0x1e54
113
114/* Warp Registers */
115#define MGAREG_WIADDR 0x1dc0
116#define MGAREG_WIADDR2 0x1dd8
117#define MGAREG_WGETMSB 0x1dc8
118#define MGAREG_WVRTXSZ 0x1dcc
119#define MGAREG_WACCEPTSEQ 0x1dd4
120#define MGAREG_WMISC 0x1e70
121
122#define MGAREG_MEMCTL 0x2e08
123
124/* OPMODE register additives */
125
126#define MGAOPM_DMA_GENERAL (0x00 << 2)
127#define MGAOPM_DMA_BLIT (0x01 << 2)
128#define MGAOPM_DMA_VECTOR (0x10 << 2)
129
130/* MACCESS register additives */
131#define MGAMAC_PW8 0x00
132#define MGAMAC_PW16 0x01
133#define MGAMAC_PW24 0x03 /* not a typo */
134#define MGAMAC_PW32 0x02 /* not a typo */
135#define MGAMAC_BYPASS332 0x10000000
136#define MGAMAC_NODITHER 0x40000000
137#define MGAMAC_DIT555 0x80000000
138
139/* DWGCTL register additives */
140
141/* Lines */
142
143#define MGADWG_LINE_OPEN 0x00
144#define MGADWG_AUTOLINE_OPEN 0x01
145#define MGADWG_LINE_CLOSE 0x02
146#define MGADWG_AUTOLINE_CLOSE 0x03
147
148/* Trapezoids */
149#define MGADWG_TRAP 0x04
150#define MGADWG_TEXTURE_TRAP 0x06
151
152/* BitBlts */
153
154#define MGADWG_BITBLT 0x08
155#define MGADWG_FBITBLT 0x0c
156#define MGADWG_ILOAD 0x09
157#define MGADWG_ILOAD_SCALE 0x0d
158#define MGADWG_ILOAD_FILTER 0x0f
159#define MGADWG_ILOAD_HIQH 0x07
160#define MGADWG_ILOAD_HIQHV 0x0e
161#define MGADWG_IDUMP 0x0a
162
163/* atype access to WRAM */
164
165#define MGADWG_RPL ( 0x00 << 4 )
166#define MGADWG_RSTR ( 0x01 << 4 )
167#define MGADWG_ZI ( 0x03 << 4 )
168#define MGADWG_BLK ( 0x04 << 4 )
169#define MGADWG_I ( 0x07 << 4 )
170
171/* specifies whether bit blits are linear or xy */
172#define MGADWG_LINEAR ( 0x01 << 7 )
173
174/* z drawing mode. use MGADWG_NOZCMP for always */
175
176#define MGADWG_NOZCMP ( 0x00 << 8 )
177#define MGADWG_ZE ( 0x02 << 8 )
178#define MGADWG_ZNE ( 0x03 << 8 )
179#define MGADWG_ZLT ( 0x04 << 8 )
180#define MGADWG_ZLTE ( 0x05 << 8 )
181#define MGADWG_GT ( 0x06 << 8 )
182#define MGADWG_GTE ( 0x07 << 8 )
183
184/* use this to force colour expansion circuitry to do its stuff */
185
186#define MGADWG_SOLID ( 0x01 << 11 )
187
188/* ar register at zero */
189
190#define MGADWG_ARZERO ( 0x01 << 12 )
191
192#define MGADWG_SGNZERO ( 0x01 << 13 )
193
194#define MGADWG_SHIFTZERO ( 0x01 << 14 )
195
196/* See table on 4-43 for bop ALU operations */
197
198/* See table on 4-44 for translucidity masks */
199
200#define MGADWG_BMONOLEF ( 0x00 << 25 )
201#define MGADWG_BMONOWF ( 0x04 << 25 )
202#define MGADWG_BPLAN ( 0x01 << 25 )
203
204/* note that if bfcol is specified and you're doing a bitblt, it causes
205 a fbitblt to be performed, so check that you obey the fbitblt rules */
206
207#define MGADWG_BFCOL ( 0x02 << 25 )
208#define MGADWG_BUYUV ( 0x0e << 25 )
209#define MGADWG_BU32BGR ( 0x03 << 25 )
210#define MGADWG_BU32RGB ( 0x07 << 25 )
211#define MGADWG_BU24BGR ( 0x0b << 25 )
212#define MGADWG_BU24RGB ( 0x0f << 25 )
213
214#define MGADWG_PATTERN ( 0x01 << 29 )
215#define MGADWG_TRANSC ( 0x01 << 30 )
216#define MGAREG_MISC_WRITE 0x3c2
217#define MGAREG_MISC_READ 0x3cc
218#define MGAREG_MEM_MISC_WRITE 0x1fc2
219#define MGAREG_MEM_MISC_READ 0x1fcc
220
221#define MGAREG_MISC_IOADSEL (0x1 << 0)
222#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
223#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
224#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
225#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
226#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
227#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
228#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
229
230/* MMIO VGA registers */
231#define MGAREG_SEQ_INDEX 0x1fc4
232#define MGAREG_SEQ_DATA 0x1fc5
233#define MGAREG_CRTC_INDEX 0x1fd4
234#define MGAREG_CRTC_DATA 0x1fd5
235#define MGAREG_CRTCEXT_INDEX 0x1fde
236#define MGAREG_CRTCEXT_DATA 0x1fdf
237
238
239
240/* MGA bits for registers PCI_OPTION_REG */
241#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
242#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
243#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
244#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
245
246#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
247#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
248#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
249
250#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
251#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
252
253/* MGA registers in PCI config space */
254#define PCI_MGA_INDEX 0x44
255#define PCI_MGA_DATA 0x48
256#define PCI_MGA_OPTION 0x40
257#define PCI_MGA_OPTION2 0x50
258#define PCI_MGA_OPTION3 0x54
259
260#define RAMDAC_OFFSET 0x3c00
261
262/* TVP3026 direct registers */
263
264#define TVP3026_INDEX 0x00
265#define TVP3026_WADR_PAL 0x00
266#define TVP3026_COL_PAL 0x01
267#define TVP3026_PIX_RD_MSK 0x02
268#define TVP3026_RADR_PAL 0x03
269#define TVP3026_CUR_COL_ADDR 0x04
270#define TVP3026_CUR_COL_DATA 0x05
271#define TVP3026_DATA 0x0a
272#define TVP3026_CUR_RAM 0x0b
273#define TVP3026_CUR_XLOW 0x0c
274#define TVP3026_CUR_XHI 0x0d
275#define TVP3026_CUR_YLOW 0x0e
276#define TVP3026_CUR_YHI 0x0f
277
278/* TVP3026 indirect registers */
279
280#define TVP3026_SILICON_REV 0x01
281#define TVP3026_CURSOR_CTL 0x06
282#define TVP3026_LATCH_CTL 0x0f
283#define TVP3026_TRUE_COLOR_CTL 0x18
284#define TVP3026_MUX_CTL 0x19
285#define TVP3026_CLK_SEL 0x1a
286#define TVP3026_PAL_PAGE 0x1c
287#define TVP3026_GEN_CTL 0x1d
288#define TVP3026_MISC_CTL 0x1e
289#define TVP3026_GEN_IO_CTL 0x2a
290#define TVP3026_GEN_IO_DATA 0x2b
291#define TVP3026_PLL_ADDR 0x2c
292#define TVP3026_PIX_CLK_DATA 0x2d
293#define TVP3026_MEM_CLK_DATA 0x2e
294#define TVP3026_LOAD_CLK_DATA 0x2f
295#define TVP3026_KEY_RED_LOW 0x32
296#define TVP3026_KEY_RED_HI 0x33
297#define TVP3026_KEY_GREEN_LOW 0x34
298#define TVP3026_KEY_GREEN_HI 0x35
299#define TVP3026_KEY_BLUE_LOW 0x36
300#define TVP3026_KEY_BLUE_HI 0x37
301#define TVP3026_KEY_CTL 0x38
302#define TVP3026_MCLK_CTL 0x39
303#define TVP3026_SENSE_TEST 0x3a
304#define TVP3026_TEST_DATA 0x3b
305#define TVP3026_CRC_LSB 0x3c
306#define TVP3026_CRC_MSB 0x3d
307#define TVP3026_CRC_CTL 0x3e
308#define TVP3026_ID 0x3f
309#define TVP3026_RESET 0xff
310
311
312/* MGA1064 DAC Register file */
313/* MGA1064 direct registers */
314
315#define MGA1064_INDEX 0x00
316#define MGA1064_WADR_PAL 0x00
317#define MGA1064_SPAREREG 0x00
318#define MGA1064_COL_PAL 0x01
319#define MGA1064_PIX_RD_MSK 0x02
320#define MGA1064_RADR_PAL 0x03
321#define MGA1064_DATA 0x0a
322
323#define MGA1064_CUR_XLOW 0x0c
324#define MGA1064_CUR_XHI 0x0d
325#define MGA1064_CUR_YLOW 0x0e
326#define MGA1064_CUR_YHI 0x0f
327
328/* MGA1064 indirect registers */
329#define MGA1064_DVI_PIPE_CTL 0x03
330#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
331#define MGA1064_CURSOR_BASE_ADR_HI 0x05
332#define MGA1064_CURSOR_CTL 0x06
333#define MGA1064_CURSOR_COL0_RED 0x08
334#define MGA1064_CURSOR_COL0_GREEN 0x09
335#define MGA1064_CURSOR_COL0_BLUE 0x0a
336
337#define MGA1064_CURSOR_COL1_RED 0x0c
338#define MGA1064_CURSOR_COL1_GREEN 0x0d
339#define MGA1064_CURSOR_COL1_BLUE 0x0e
340
341#define MGA1064_CURSOR_COL2_RED 0x010
342#define MGA1064_CURSOR_COL2_GREEN 0x011
343#define MGA1064_CURSOR_COL2_BLUE 0x012
344
345#define MGA1064_VREF_CTL 0x018
346
347#define MGA1064_MUL_CTL 0x19
348#define MGA1064_MUL_CTL_8bits 0x0
349#define MGA1064_MUL_CTL_15bits 0x01
350#define MGA1064_MUL_CTL_16bits 0x02
351#define MGA1064_MUL_CTL_24bits 0x03
352#define MGA1064_MUL_CTL_32bits 0x04
353#define MGA1064_MUL_CTL_2G8V16bits 0x05
354#define MGA1064_MUL_CTL_G16V16bits 0x06
355#define MGA1064_MUL_CTL_32_24bits 0x07
356
357#define MGA1064_PIX_CLK_CTL 0x1a
358#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
359#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
360#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
361#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
362#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
363#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
364
365#define MGA1064_GEN_CTL 0x1d
366#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
367#define MGA1064_MISC_CTL 0x1e
368#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
369#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
370#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
371#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
372#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
373#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
374
375#define MGA1064_GEN_IO_CTL2 0x29
376#define MGA1064_GEN_IO_CTL 0x2a
377#define MGA1064_GEN_IO_DATA 0x2b
378#define MGA1064_SYS_PLL_M 0x2c
379#define MGA1064_SYS_PLL_N 0x2d
380#define MGA1064_SYS_PLL_P 0x2e
381#define MGA1064_SYS_PLL_STAT 0x2f
382
383#define MGA1064_REMHEADCTL 0x30
384#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
385#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
386#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
387#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
388#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
389
390#define MGA1064_REMHEADCTL2 0x31
391
392#define MGA1064_ZOOM_CTL 0x38
393#define MGA1064_SENSE_TST 0x3a
394
395#define MGA1064_CRC_LSB 0x3c
396#define MGA1064_CRC_MSB 0x3d
397#define MGA1064_CRC_CTL 0x3e
398#define MGA1064_COL_KEY_MSK_LSB 0x40
399#define MGA1064_COL_KEY_MSK_MSB 0x41
400#define MGA1064_COL_KEY_LSB 0x42
401#define MGA1064_COL_KEY_MSB 0x43
402#define MGA1064_PIX_PLLA_M 0x44
403#define MGA1064_PIX_PLLA_N 0x45
404#define MGA1064_PIX_PLLA_P 0x46
405#define MGA1064_PIX_PLLB_M 0x48
406#define MGA1064_PIX_PLLB_N 0x49
407#define MGA1064_PIX_PLLB_P 0x4a
408#define MGA1064_PIX_PLLC_M 0x4c
409#define MGA1064_PIX_PLLC_N 0x4d
410#define MGA1064_PIX_PLLC_P 0x4e
411
412#define MGA1064_PIX_PLL_STAT 0x4f
413
414/*Added for G450 dual head*/
415
416#define MGA1064_VID_PLL_STAT 0x8c
417#define MGA1064_VID_PLL_P 0x8D
418#define MGA1064_VID_PLL_M 0x8E
419#define MGA1064_VID_PLL_N 0x8F
420
421/* Modified PLL for G200 Winbond (G200WB) */
422#define MGA1064_WB_PIX_PLLC_M 0xb7
423#define MGA1064_WB_PIX_PLLC_N 0xb6
424#define MGA1064_WB_PIX_PLLC_P 0xb8
425
426/* Modified PLL for G200 Maxim (G200EV) */
427#define MGA1064_EV_PIX_PLLC_M 0xb6
428#define MGA1064_EV_PIX_PLLC_N 0xb7
429#define MGA1064_EV_PIX_PLLC_P 0xb8
430
431/* Modified PLL for G200 EH */
432#define MGA1064_EH_PIX_PLLC_M 0xb6
433#define MGA1064_EH_PIX_PLLC_N 0xb7
434#define MGA1064_EH_PIX_PLLC_P 0xb8
435
436/* Modified PLL for G200 Maxim (G200ER) */
437#define MGA1064_ER_PIX_PLLC_M 0xb7
438#define MGA1064_ER_PIX_PLLC_N 0xb6
439#define MGA1064_ER_PIX_PLLC_P 0xb8
440
441#define MGA1064_DISP_CTL 0x8a
442#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
443#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
444#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
445#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
446#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
447#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
448#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
449#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
450#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
451#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
452#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
453#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
454#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
455
456#define MGA1064_SYNC_CTL 0x8b
457
458#define MGA1064_PWR_CTL 0xa0
459#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
460#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
461#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
462#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
463#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
464
465#define MGA1064_PAN_CTL 0xa2
466
467/* Using crtc2 */
468#define MGAREG2_C2CTL 0x10
469#define MGAREG2_C2HPARAM 0x14
470#define MGAREG2_C2HSYNC 0x18
471#define MGAREG2_C2VPARAM 0x1c
472#define MGAREG2_C2VSYNC 0x20
473#define MGAREG2_C2STARTADD0 0x28
474
475#define MGAREG2_C2OFFSET 0x40
476#define MGAREG2_C2DATACTL 0x4c
477
478#define MGAREG_C2CTL 0x3c10
479#define MGAREG_C2CTL_C2_EN 0x01
480
481#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
482#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
483
484#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
485#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
486#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
487#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
488#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
489#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
490#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
491
492#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
493#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
494
495#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
496#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
497
498#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
499#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
500#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
501
502#define MGAREG_C2HPARAM 0x3c14
503#define MGAREG_C2HSYNC 0x3c18
504#define MGAREG_C2VPARAM 0x3c1c
505#define MGAREG_C2VSYNC 0x3c20
506#define MGAREG_C2STARTADD0 0x3c28
507
508#define MGAREG_C2OFFSET 0x3c40
509#define MGAREG_C2DATACTL 0x3c4c
510
511/* video register */
512
513#define MGAREG_BESA1C3ORG 0x3d60
514#define MGAREG_BESA1CORG 0x3d10
515#define MGAREG_BESA1ORG 0x3d00
516#define MGAREG_BESCTL 0x3d20
517#define MGAREG_BESGLOBCTL 0x3dc0
518#define MGAREG_BESHCOORD 0x3d28
519#define MGAREG_BESHISCAL 0x3d30
520#define MGAREG_BESHSRCEND 0x3d3c
521#define MGAREG_BESHSRCLST 0x3d50
522#define MGAREG_BESHSRCST 0x3d38
523#define MGAREG_BESLUMACTL 0x3d40
524#define MGAREG_BESPITCH 0x3d24
525#define MGAREG_BESV1SRCLST 0x3d54
526#define MGAREG_BESV1WGHT 0x3d48
527#define MGAREG_BESVCOORD 0x3d2c
528#define MGAREG_BESVISCAL 0x3d34
529
530/* texture engine registers */
531
532#define MGAREG_TMR0 0x2c00
533#define MGAREG_TMR1 0x2c04
534#define MGAREG_TMR2 0x2c08
535#define MGAREG_TMR3 0x2c0c
536#define MGAREG_TMR4 0x2c10
537#define MGAREG_TMR5 0x2c14
538#define MGAREG_TMR6 0x2c18
539#define MGAREG_TMR7 0x2c1c
540#define MGAREG_TMR8 0x2c20
541#define MGAREG_TEXORG 0x2c24
542#define MGAREG_TEXWIDTH 0x2c28
543#define MGAREG_TEXHEIGHT 0x2c2c
544#define MGAREG_TEXCTL 0x2c30
545# define MGA_TW4 (0x00000000)
546# define MGA_TW8 (0x00000001)
547# define MGA_TW15 (0x00000002)
548# define MGA_TW16 (0x00000003)
549# define MGA_TW12 (0x00000004)
550# define MGA_TW32 (0x00000006)
551# define MGA_TW8A (0x00000007)
552# define MGA_TW8AL (0x00000008)
553# define MGA_TW422 (0x0000000A)
554# define MGA_TW422UYVY (0x0000000B)
555# define MGA_PITCHLIN (0x00000100)
556# define MGA_NOPERSPECTIVE (0x00200000)
557# define MGA_TAKEY (0x02000000)
558# define MGA_TAMASK (0x04000000)
559# define MGA_CLAMPUV (0x18000000)
560# define MGA_TEXMODULATE (0x20000000)
561#define MGAREG_TEXCTL2 0x2c3c
562# define MGA_G400_TC2_MAGIC (0x00008000)
563# define MGA_TC2_DECALBLEND (0x00000001)
564# define MGA_TC2_IDECAL (0x00000002)
565# define MGA_TC2_DECALDIS (0x00000004)
566# define MGA_TC2_CKSTRANSDIS (0x00000010)
567# define MGA_TC2_BORDEREN (0x00000020)
568# define MGA_TC2_SPECEN (0x00000040)
569# define MGA_TC2_DUALTEX (0x00000080)
570# define MGA_TC2_TABLEFOG (0x00000100)
571# define MGA_TC2_BUMPMAP (0x00000200)
572# define MGA_TC2_SELECT_TMU1 (0x80000000)
573#define MGAREG_TEXTRANS 0x2c34
574#define MGAREG_TEXTRANSHIGH 0x2c38
575#define MGAREG_TEXFILTER 0x2c58
576# define MGA_MIN_NRST (0x00000000)
577# define MGA_MIN_BILIN (0x00000002)
578# define MGA_MIN_ANISO (0x0000000D)
579# define MGA_MAG_NRST (0x00000000)
580# define MGA_MAG_BILIN (0x00000020)
581# define MGA_FILTERALPHA (0x00100000)
582#define MGAREG_ALPHASTART 0x2c70
583#define MGAREG_ALPHAXINC 0x2c74
584#define MGAREG_ALPHAYINC 0x2c78
585#define MGAREG_ALPHACTRL 0x2c7c
586# define MGA_SRC_ZERO (0x00000000)
587# define MGA_SRC_ONE (0x00000001)
588# define MGA_SRC_DST_COLOR (0x00000002)
589# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
590# define MGA_SRC_ALPHA (0x00000004)
591# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
592# define MGA_SRC_DST_ALPHA (0x00000006)
593# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
594# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
595# define MGA_SRC_BLEND_MASK (0x0000000f)
596# define MGA_DST_ZERO (0x00000000)
597# define MGA_DST_ONE (0x00000010)
598# define MGA_DST_SRC_COLOR (0x00000020)
599# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
600# define MGA_DST_SRC_ALPHA (0x00000040)
601# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
602# define MGA_DST_DST_ALPHA (0x00000060)
603# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
604# define MGA_DST_BLEND_MASK (0x00000070)
605# define MGA_ALPHACHANNEL (0x00000100)
606# define MGA_VIDEOALPHA (0x00000200)
607# define MGA_DIFFUSEDALPHA (0x01000000)
608# define MGA_MODULATEDALPHA (0x02000000)
609#define MGAREG_TDUALSTAGE0 (0x2CF8)
610#define MGAREG_TDUALSTAGE1 (0x2CFC)
611# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
612# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
613# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
614# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
615# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
616# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
617# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
618# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
619# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
620# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
621# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
622# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
623# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
624# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
625# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
626# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
627# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
628# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
629# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
630# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
631# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
632# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
633# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
634# define MGA_TDS_COLOR_ADD2X (0x00040000)
635# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
636# define MGA_TDS_COLOR_BLEND (0x00100000)
637# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
638# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
639# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
640# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
641# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
642# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
643# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
644# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
645# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
646# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
647# define MGA_TDS_ALPHA_ADD (0x08000000)
648# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
649# define MGA_TDS_ALPHA_ADD2X (0x20000000)
650# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
651# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
652# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
653# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
654
655#define MGAREG_DWGSYNC 0x2c4c
656
657#define MGAREG_AGP_PLL 0x1e4c
658#define MGA_AGP2XPLL_ENABLE 0x1
659#define MGA_AGP2XPLL_DISABLE 0x0
660
661#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 000000000000..b223dcb7a710
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,452 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25/*
26 * Authors: Dave Airlie <airlied@redhat.com>
27 */
28#include "drmP.h"
29#include "mgag200_drv.h"
30#include <ttm/ttm_page_alloc.h>
31
32static inline struct mga_device *
33mgag200_bdev(struct ttm_bo_device *bd)
34{
35 return container_of(bd, struct mga_device, ttm.bdev);
36}
37
38static int
39mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
40{
41 return ttm_mem_global_init(ref->object);
42}
43
44static void
45mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
46{
47 ttm_mem_global_release(ref->object);
48}
49
50static int mgag200_ttm_global_init(struct mga_device *ast)
51{
52 struct drm_global_reference *global_ref;
53 int r;
54
55 global_ref = &ast->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &mgag200_ttm_mem_global_init;
59 global_ref->release = &mgag200_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
61 if (r != 0) {
62 DRM_ERROR("Failed setting up TTM memory accounting "
63 "subsystem.\n");
64 return r;
65 }
66
67 ast->ttm.bo_global_ref.mem_glob =
68 ast->ttm.mem_global_ref.object;
69 global_ref = &ast->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
75 if (r != 0) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&ast->ttm.mem_global_ref);
78 return r;
79 }
80 return 0;
81}
82
83void
84mgag200_ttm_global_release(struct mga_device *ast)
85{
86 if (ast->ttm.mem_global_ref.release == NULL)
87 return;
88
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&ast->ttm.mem_global_ref);
91 ast->ttm.mem_global_ref.release = NULL;
92}
93
94
95static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
96{
97 struct mgag200_bo *bo;
98
99 bo = container_of(tbo, struct mgag200_bo, bo);
100
101 drm_gem_object_release(&bo->gem);
102 kfree(bo);
103}
104
105bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
106{
107 if (bo->destroy == &mgag200_bo_ttm_destroy)
108 return true;
109 return false;
110}
111
112static int
113mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
115{
116 switch (type) {
117 case TTM_PL_SYSTEM:
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
121 break;
122 case TTM_PL_VRAM:
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
127 TTM_PL_FLAG_WC;
128 man->default_caching = TTM_PL_FLAG_WC;
129 break;
130 default:
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
132 return -EINVAL;
133 }
134 return 0;
135}
136
137static void
138mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
139{
140 struct mgag200_bo *mgabo = mgag200_bo(bo);
141
142 if (!mgag200_ttm_bo_is_mgag200_bo(bo))
143 return;
144
145 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
146 *pl = mgabo->placement;
147}
148
149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{
151 return 0;
152}
153
154static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
155 struct ttm_mem_reg *mem)
156{
157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158 struct mga_device *mdev = mgag200_bdev(bdev);
159
160 mem->bus.addr = NULL;
161 mem->bus.offset = 0;
162 mem->bus.size = mem->num_pages << PAGE_SHIFT;
163 mem->bus.base = 0;
164 mem->bus.is_iomem = false;
165 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
166 return -EINVAL;
167 switch (mem->mem_type) {
168 case TTM_PL_SYSTEM:
169 /* system memory */
170 return 0;
171 case TTM_PL_VRAM:
172 mem->bus.offset = mem->start << PAGE_SHIFT;
173 mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
174 mem->bus.is_iomem = true;
175 break;
176 default:
177 return -EINVAL;
178 break;
179 }
180 return 0;
181}
182
183static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
184{
185}
186
187static int mgag200_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem)
191{
192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
194 return r;
195}
196
197
198static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
199{
200 ttm_tt_fini(tt);
201 kfree(tt);
202}
203
204static struct ttm_backend_func mgag200_tt_backend_func = {
205 .destroy = &mgag200_ttm_backend_destroy,
206};
207
208
209struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
210 unsigned long size, uint32_t page_flags,
211 struct page *dummy_read_page)
212{
213 struct ttm_tt *tt;
214
215 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
216 if (tt == NULL)
217 return NULL;
218 tt->func = &mgag200_tt_backend_func;
219 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
220 kfree(tt);
221 return NULL;
222 }
223 return tt;
224}
225
226static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
227{
228 return ttm_pool_populate(ttm);
229}
230
231static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
232{
233 ttm_pool_unpopulate(ttm);
234}
235
236struct ttm_bo_driver mgag200_bo_driver = {
237 .ttm_tt_create = mgag200_ttm_tt_create,
238 .ttm_tt_populate = mgag200_ttm_tt_populate,
239 .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
240 .init_mem_type = mgag200_bo_init_mem_type,
241 .evict_flags = mgag200_bo_evict_flags,
242 .move = mgag200_bo_move,
243 .verify_access = mgag200_bo_verify_access,
244 .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
245 .io_mem_free = &mgag200_ttm_io_mem_free,
246};
247
248int mgag200_mm_init(struct mga_device *mdev)
249{
250 int ret;
251 struct drm_device *dev = mdev->dev;
252 struct ttm_bo_device *bdev = &mdev->ttm.bdev;
253
254 ret = mgag200_ttm_global_init(mdev);
255 if (ret)
256 return ret;
257
258 ret = ttm_bo_device_init(&mdev->ttm.bdev,
259 mdev->ttm.bo_global_ref.ref.object,
260 &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
261 true);
262 if (ret) {
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264 return ret;
265 }
266
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
268 if (ret) {
269 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
270 return ret;
271 }
272
273 mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
274 pci_resource_len(dev->pdev, 0),
275 DRM_MTRR_WC);
276
277 return 0;
278}
279
280void mgag200_mm_fini(struct mga_device *mdev)
281{
282 struct drm_device *dev = mdev->dev;
283 ttm_bo_device_release(&mdev->ttm.bdev);
284
285 mgag200_ttm_global_release(mdev);
286
287 if (mdev->fb_mtrr >= 0) {
288 drm_mtrr_del(mdev->fb_mtrr,
289 pci_resource_start(dev->pdev, 0),
290 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
291 mdev->fb_mtrr = -1;
292 }
293}
294
295void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
296{
297 u32 c = 0;
298 bo->placement.fpfn = 0;
299 bo->placement.lpfn = 0;
300 bo->placement.placement = bo->placements;
301 bo->placement.busy_placement = bo->placements;
302 if (domain & TTM_PL_FLAG_VRAM)
303 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
304 if (domain & TTM_PL_FLAG_SYSTEM)
305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
306 if (!c)
307 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
308 bo->placement.num_placement = c;
309 bo->placement.num_busy_placement = c;
310}
311
312int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
313{
314 int ret;
315
316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
317 if (ret) {
318 if (ret != -ERESTARTSYS)
319 DRM_ERROR("reserve failed %p\n", bo);
320 return ret;
321 }
322 return 0;
323}
324
325void mgag200_bo_unreserve(struct mgag200_bo *bo)
326{
327 ttm_bo_unreserve(&bo->bo);
328}
329
330int mgag200_bo_create(struct drm_device *dev, int size, int align,
331 uint32_t flags, struct mgag200_bo **pmgabo)
332{
333 struct mga_device *mdev = dev->dev_private;
334 struct mgag200_bo *mgabo;
335 size_t acc_size;
336 int ret;
337
338 mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
339 if (!mgabo)
340 return -ENOMEM;
341
342 ret = drm_gem_object_init(dev, &mgabo->gem, size);
343 if (ret) {
344 kfree(mgabo);
345 return ret;
346 }
347
348 mgabo->gem.driver_private = NULL;
349 mgabo->bo.bdev = &mdev->ttm.bdev;
350
351 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
352
353 acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
354 sizeof(struct mgag200_bo));
355
356 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
357 ttm_bo_type_device, &mgabo->placement,
358 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
359 NULL, mgag200_bo_ttm_destroy);
360 if (ret)
361 return ret;
362
363 *pmgabo = mgabo;
364 return 0;
365}
366
367static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
368{
369 return bo->bo.offset;
370}
371
372int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
373{
374 int i, ret;
375
376 if (bo->pin_count) {
377 bo->pin_count++;
378 if (gpu_addr)
379 *gpu_addr = mgag200_bo_gpu_offset(bo);
380 }
381
382 mgag200_ttm_placement(bo, pl_flag);
383 for (i = 0; i < bo->placement.num_placement; i++)
384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
386 if (ret)
387 return ret;
388
389 bo->pin_count = 1;
390 if (gpu_addr)
391 *gpu_addr = mgag200_bo_gpu_offset(bo);
392 return 0;
393}
394
395int mgag200_bo_unpin(struct mgag200_bo *bo)
396{
397 int i, ret;
398 if (!bo->pin_count) {
399 DRM_ERROR("unpin bad %p\n", bo);
400 return 0;
401 }
402 bo->pin_count--;
403 if (bo->pin_count)
404 return 0;
405
406 for (i = 0; i < bo->placement.num_placement ; i++)
407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
409 if (ret)
410 return ret;
411
412 return 0;
413}
414
415int mgag200_bo_push_sysram(struct mgag200_bo *bo)
416{
417 int i, ret;
418 if (!bo->pin_count) {
419 DRM_ERROR("unpin bad %p\n", bo);
420 return 0;
421 }
422 bo->pin_count--;
423 if (bo->pin_count)
424 return 0;
425
426 if (bo->kmap.virtual)
427 ttm_bo_kunmap(&bo->kmap);
428
429 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
430 for (i = 0; i < bo->placement.num_placement ; i++)
431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
432
433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
434 if (ret) {
435 DRM_ERROR("pushing to VRAM failed\n");
436 return ret;
437 }
438 return 0;
439}
440
441int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
442{
443 struct drm_file *file_priv;
444 struct mga_device *mdev;
445
446 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
447 return drm_mmap(filp, vma);
448
449 file_priv = filp->private_data;
450 mdev = file_priv->minor->dev->dev_private;
451 return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
452}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1a2ad7eb1734..fe5267d06ab5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,10 +16,13 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
16 nv04_mc.o nv40_mc.o nv50_mc.o \ 16 nv04_mc.o nv40_mc.o nv50_mc.o \
17 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ 17 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
18 nv50_fb.o nvc0_fb.o \ 18 nv50_fb.o nvc0_fb.o \
19 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ 19 nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
20 nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
21 nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
22 nv04_software.o nv50_software.o nvc0_software.o \
20 nv04_graph.o nv10_graph.o nv20_graph.o \ 23 nv04_graph.o nv10_graph.o nv20_graph.o \
21 nv40_graph.o nv50_graph.o nvc0_graph.o \ 24 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
22 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 25 nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
23 nv84_crypt.o nv98_crypt.o \ 26 nv84_crypt.o nv98_crypt.o \
24 nva3_copy.o nvc0_copy.o \ 27 nva3_copy.o nvc0_copy.o \
25 nv31_mpeg.o nv50_mpeg.o \ 28 nv31_mpeg.o nv50_mpeg.o \
@@ -37,7 +40,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
37 nv50_calc.o \ 40 nv50_calc.o \
38 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \ 41 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
39 nv50_vram.o nvc0_vram.o \ 42 nv50_vram.o nvc0_vram.o \
40 nv50_vm.o nvc0_vm.o 43 nv50_vm.o nvc0_vm.o nouveau_prime.o
41 44
42nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 45nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
43nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 46nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 284bd25d5d21..fc841e87b343 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -338,7 +338,8 @@ void nouveau_switcheroo_optimus_dsm(void)
338 338
339void nouveau_unregister_dsm_handler(void) 339void nouveau_unregister_dsm_handler(void)
340{ 340{
341 vga_switcheroo_unregister_handler(); 341 if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
342 vga_switcheroo_unregister_handler();
342} 343}
343 344
344/* retrieve the ROM in 4k blocks */ 345/* retrieve the ROM in 4k blocks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0be4a815e706..2f11e16a81a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -30,6 +30,7 @@
30#include "nouveau_gpio.h" 30#include "nouveau_gpio.h"
31 31
32#include <linux/io-mapping.h> 32#include <linux/io-mapping.h>
33#include <linux/firmware.h>
33 34
34/* these defines are made up */ 35/* these defines are made up */
35#define NV_CIO_CRE_44_HEADA 0x0 36#define NV_CIO_CRE_44_HEADA 0x0
@@ -195,35 +196,24 @@ static void
195bios_shadow_acpi(struct nvbios *bios) 196bios_shadow_acpi(struct nvbios *bios)
196{ 197{
197 struct pci_dev *pdev = bios->dev->pdev; 198 struct pci_dev *pdev = bios->dev->pdev;
198 int ptr, len, ret; 199 int cnt = 65536 / ROM_BIOS_PAGE;
199 u8 data[3]; 200 int ret;
200 201
201 if (!nouveau_acpi_rom_supported(pdev)) 202 if (!nouveau_acpi_rom_supported(pdev))
202 return; 203 return;
203 204
204 ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data)); 205 bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
205 if (ret != sizeof(data))
206 return;
207
208 bios->length = min(data[2] * 512, 65536);
209 bios->data = kmalloc(bios->length, GFP_KERNEL);
210 if (!bios->data) 206 if (!bios->data)
211 return; 207 return;
212 208
213 len = bios->length; 209 bios->length = 0;
214 ptr = 0; 210 while (cnt--) {
215 while (len) { 211 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
216 int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len; 212 ROM_BIOS_PAGE);
217 213 if (ret != ROM_BIOS_PAGE)
218 ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
219 if (ret != size) {
220 kfree(bios->data);
221 bios->data = NULL;
222 return; 214 return;
223 }
224 215
225 len -= size; 216 bios->length += ROM_BIOS_PAGE;
226 ptr += size;
227 } 217 }
228} 218}
229 219
@@ -249,8 +239,12 @@ bios_shadow(struct drm_device *dev)
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 239 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nvbios *bios = &dev_priv->vbios; 240 struct nvbios *bios = &dev_priv->vbios;
251 struct methods *mthd, *best; 241 struct methods *mthd, *best;
242 const struct firmware *fw;
243 char fname[32];
244 int ret;
252 245
253 if (nouveau_vbios) { 246 if (nouveau_vbios) {
247 /* try to match one of the built-in methods */
254 mthd = shadow_methods; 248 mthd = shadow_methods;
255 do { 249 do {
256 if (strcasecmp(nouveau_vbios, mthd->desc)) 250 if (strcasecmp(nouveau_vbios, mthd->desc))
@@ -263,6 +257,22 @@ bios_shadow(struct drm_device *dev)
263 return true; 257 return true;
264 } while ((++mthd)->shadow); 258 } while ((++mthd)->shadow);
265 259
260 /* attempt to load firmware image */
261 snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
262 ret = request_firmware(&fw, fname, &dev->pdev->dev);
263 if (ret == 0) {
264 bios->length = fw->size;
265 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
266 release_firmware(fw);
267
268 NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
269 if (score_vbios(bios, 1))
270 return true;
271
272 kfree(bios->data);
273 bios->data = NULL;
274 }
275
266 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); 276 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
267 } 277 }
268 278
@@ -273,6 +283,7 @@ bios_shadow(struct drm_device *dev)
273 mthd->score = score_vbios(bios, mthd->rw); 283 mthd->score = score_vbios(bios, mthd->rw);
274 mthd->size = bios->length; 284 mthd->size = bios->length;
275 mthd->data = bios->data; 285 mthd->data = bios->data;
286 bios->data = NULL;
276 } while (mthd->score != 3 && (++mthd)->shadow); 287 } while (mthd->score != 3 && (++mthd)->shadow);
277 288
278 mthd = shadow_methods; 289 mthd = shadow_methods;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7d15a774f9c9..7f80ed523562 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -35,6 +35,8 @@
35#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include "nouveau_mm.h" 36#include "nouveau_mm.h"
37#include "nouveau_vm.h" 37#include "nouveau_vm.h"
38#include "nouveau_fence.h"
39#include "nouveau_ramht.h"
38 40
39#include <linux/log2.h> 41#include <linux/log2.h>
40#include <linux/slab.h> 42#include <linux/slab.h>
@@ -89,12 +91,17 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
89int 91int
90nouveau_bo_new(struct drm_device *dev, int size, int align, 92nouveau_bo_new(struct drm_device *dev, int size, int align,
91 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 93 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
94 struct sg_table *sg,
92 struct nouveau_bo **pnvbo) 95 struct nouveau_bo **pnvbo)
93{ 96{
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_bo *nvbo; 98 struct nouveau_bo *nvbo;
96 size_t acc_size; 99 size_t acc_size;
97 int ret; 100 int ret;
101 int type = ttm_bo_type_device;
102
103 if (sg)
104 type = ttm_bo_type_sg;
98 105
99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 106 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
100 if (!nvbo) 107 if (!nvbo)
@@ -120,8 +127,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
120 sizeof(struct nouveau_bo)); 127 sizeof(struct nouveau_bo));
121 128
122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 129 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
123 ttm_bo_type_device, &nvbo->placement, 130 type, &nvbo->placement,
124 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 131 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
125 nouveau_bo_del_ttm); 132 nouveau_bo_del_ttm);
126 if (ret) { 133 if (ret) {
127 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 134 /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -473,7 +480,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
473 struct nouveau_fence *fence = NULL; 480 struct nouveau_fence *fence = NULL;
474 int ret; 481 int ret;
475 482
476 ret = nouveau_fence_new(chan, &fence, true); 483 ret = nouveau_fence_new(chan, &fence);
477 if (ret) 484 if (ret)
478 return ret; 485 return ret;
479 486
@@ -484,6 +491,76 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
484} 491}
485 492
486static int 493static int
494nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
495 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
496{
497 struct nouveau_mem *node = old_mem->mm_node;
498 int ret = RING_SPACE(chan, 10);
499 if (ret == 0) {
500 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
501 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
502 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
503 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
504 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
505 OUT_RING (chan, PAGE_SIZE);
506 OUT_RING (chan, PAGE_SIZE);
507 OUT_RING (chan, PAGE_SIZE);
508 OUT_RING (chan, new_mem->num_pages);
509 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
510 }
511 return ret;
512}
513
514static int
515nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
516{
517 int ret = RING_SPACE(chan, 2);
518 if (ret == 0) {
519 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
520 OUT_RING (chan, handle);
521 }
522 return ret;
523}
524
525static int
526nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528{
529 struct nouveau_mem *node = old_mem->mm_node;
530 u64 src_offset = node->vma[0].offset;
531 u64 dst_offset = node->vma[1].offset;
532 u32 page_count = new_mem->num_pages;
533 int ret;
534
535 page_count = new_mem->num_pages;
536 while (page_count) {
537 int line_count = (page_count > 8191) ? 8191 : page_count;
538
539 ret = RING_SPACE(chan, 11);
540 if (ret)
541 return ret;
542
543 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
544 OUT_RING (chan, upper_32_bits(src_offset));
545 OUT_RING (chan, lower_32_bits(src_offset));
546 OUT_RING (chan, upper_32_bits(dst_offset));
547 OUT_RING (chan, lower_32_bits(dst_offset));
548 OUT_RING (chan, PAGE_SIZE);
549 OUT_RING (chan, PAGE_SIZE);
550 OUT_RING (chan, PAGE_SIZE);
551 OUT_RING (chan, line_count);
552 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
553 OUT_RING (chan, 0x00000110);
554
555 page_count -= line_count;
556 src_offset += (PAGE_SIZE * line_count);
557 dst_offset += (PAGE_SIZE * line_count);
558 }
559
560 return 0;
561}
562
563static int
487nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 564nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
488 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 565 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
489{ 566{
@@ -501,17 +578,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
501 if (ret) 578 if (ret)
502 return ret; 579 return ret;
503 580
504 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2); 581 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
505 OUT_RING (chan, upper_32_bits(dst_offset)); 582 OUT_RING (chan, upper_32_bits(dst_offset));
506 OUT_RING (chan, lower_32_bits(dst_offset)); 583 OUT_RING (chan, lower_32_bits(dst_offset));
507 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6); 584 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
508 OUT_RING (chan, upper_32_bits(src_offset)); 585 OUT_RING (chan, upper_32_bits(src_offset));
509 OUT_RING (chan, lower_32_bits(src_offset)); 586 OUT_RING (chan, lower_32_bits(src_offset));
510 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 587 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
511 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 588 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
512 OUT_RING (chan, PAGE_SIZE); /* line_length */ 589 OUT_RING (chan, PAGE_SIZE); /* line_length */
513 OUT_RING (chan, line_count); 590 OUT_RING (chan, line_count);
514 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1); 591 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
515 OUT_RING (chan, 0x00100110); 592 OUT_RING (chan, 0x00100110);
516 593
517 page_count -= line_count; 594 page_count -= line_count;
@@ -523,6 +600,102 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
523} 600}
524 601
525static int 602static int
603nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
604 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
605{
606 struct nouveau_mem *node = old_mem->mm_node;
607 u64 src_offset = node->vma[0].offset;
608 u64 dst_offset = node->vma[1].offset;
609 u32 page_count = new_mem->num_pages;
610 int ret;
611
612 page_count = new_mem->num_pages;
613 while (page_count) {
614 int line_count = (page_count > 8191) ? 8191 : page_count;
615
616 ret = RING_SPACE(chan, 11);
617 if (ret)
618 return ret;
619
620 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
621 OUT_RING (chan, upper_32_bits(src_offset));
622 OUT_RING (chan, lower_32_bits(src_offset));
623 OUT_RING (chan, upper_32_bits(dst_offset));
624 OUT_RING (chan, lower_32_bits(dst_offset));
625 OUT_RING (chan, PAGE_SIZE);
626 OUT_RING (chan, PAGE_SIZE);
627 OUT_RING (chan, PAGE_SIZE);
628 OUT_RING (chan, line_count);
629 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
630 OUT_RING (chan, 0x00000110);
631
632 page_count -= line_count;
633 src_offset += (PAGE_SIZE * line_count);
634 dst_offset += (PAGE_SIZE * line_count);
635 }
636
637 return 0;
638}
639
640static int
641nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
642 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
643{
644 struct nouveau_mem *node = old_mem->mm_node;
645 int ret = RING_SPACE(chan, 7);
646 if (ret == 0) {
647 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
648 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
649 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
650 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
651 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
652 OUT_RING (chan, 0x00000000 /* COPY */);
653 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
654 }
655 return ret;
656}
657
658static int
659nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
660 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
661{
662 struct nouveau_mem *node = old_mem->mm_node;
663 int ret = RING_SPACE(chan, 7);
664 if (ret == 0) {
665 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
666 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
667 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
668 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
669 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
670 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
671 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
672 }
673 return ret;
674}
675
676static int
677nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{
679 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
680 &chan->m2mf_ntfy);
681 if (ret == 0) {
682 ret = RING_SPACE(chan, 6);
683 if (ret == 0) {
684 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
685 OUT_RING (chan, handle);
686 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
687 OUT_RING (chan, NvNotify0);
688 OUT_RING (chan, NvDmaFB);
689 OUT_RING (chan, NvDmaFB);
690 } else {
691 nouveau_ramht_remove(chan, NvNotify0);
692 }
693 }
694
695 return ret;
696}
697
698static int
526nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 699nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 700 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528{ 701{
@@ -546,7 +719,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
546 if (ret) 719 if (ret)
547 return ret; 720 return ret;
548 721
549 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); 722 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
550 OUT_RING (chan, 0); 723 OUT_RING (chan, 0);
551 OUT_RING (chan, 0); 724 OUT_RING (chan, 0);
552 OUT_RING (chan, stride); 725 OUT_RING (chan, stride);
@@ -559,7 +732,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
559 if (ret) 732 if (ret)
560 return ret; 733 return ret;
561 734
562 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 735 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
563 OUT_RING (chan, 1); 736 OUT_RING (chan, 1);
564 } 737 }
565 if (old_mem->mem_type == TTM_PL_VRAM && 738 if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -568,7 +741,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
568 if (ret) 741 if (ret)
569 return ret; 742 return ret;
570 743
571 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); 744 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
572 OUT_RING (chan, 0); 745 OUT_RING (chan, 0);
573 OUT_RING (chan, 0); 746 OUT_RING (chan, 0);
574 OUT_RING (chan, stride); 747 OUT_RING (chan, stride);
@@ -581,7 +754,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
581 if (ret) 754 if (ret)
582 return ret; 755 return ret;
583 756
584 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); 757 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
585 OUT_RING (chan, 1); 758 OUT_RING (chan, 1);
586 } 759 }
587 760
@@ -589,10 +762,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
589 if (ret) 762 if (ret)
590 return ret; 763 return ret;
591 764
592 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); 765 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
593 OUT_RING (chan, upper_32_bits(src_offset)); 766 OUT_RING (chan, upper_32_bits(src_offset));
594 OUT_RING (chan, upper_32_bits(dst_offset)); 767 OUT_RING (chan, upper_32_bits(dst_offset));
595 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); 768 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
596 OUT_RING (chan, lower_32_bits(src_offset)); 769 OUT_RING (chan, lower_32_bits(src_offset));
597 OUT_RING (chan, lower_32_bits(dst_offset)); 770 OUT_RING (chan, lower_32_bits(dst_offset));
598 OUT_RING (chan, stride); 771 OUT_RING (chan, stride);
@@ -601,7 +774,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
601 OUT_RING (chan, height); 774 OUT_RING (chan, height);
602 OUT_RING (chan, 0x00000101); 775 OUT_RING (chan, 0x00000101);
603 OUT_RING (chan, 0x00000000); 776 OUT_RING (chan, 0x00000000);
604 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 777 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
605 OUT_RING (chan, 0); 778 OUT_RING (chan, 0);
606 779
607 length -= amount; 780 length -= amount;
@@ -612,6 +785,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
612 return 0; 785 return 0;
613} 786}
614 787
788static int
789nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
790{
791 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
792 &chan->m2mf_ntfy);
793 if (ret == 0) {
794 ret = RING_SPACE(chan, 4);
795 if (ret == 0) {
796 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
797 OUT_RING (chan, handle);
798 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
799 OUT_RING (chan, NvNotify0);
800 }
801 }
802
803 return ret;
804}
805
615static inline uint32_t 806static inline uint32_t
616nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 807nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
617 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 808 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -634,7 +825,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
634 if (ret) 825 if (ret)
635 return ret; 826 return ret;
636 827
637 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 828 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
638 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); 829 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
639 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); 830 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
640 831
@@ -646,7 +837,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
646 if (ret) 837 if (ret)
647 return ret; 838 return ret;
648 839
649 BEGIN_RING(chan, NvSubM2MF, 840 BEGIN_NV04(chan, NvSubCopy,
650 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 841 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
651 OUT_RING (chan, src_offset); 842 OUT_RING (chan, src_offset);
652 OUT_RING (chan, dst_offset); 843 OUT_RING (chan, dst_offset);
@@ -656,7 +847,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
656 OUT_RING (chan, line_count); 847 OUT_RING (chan, line_count);
657 OUT_RING (chan, 0x00000101); 848 OUT_RING (chan, 0x00000101);
658 OUT_RING (chan, 0x00000000); 849 OUT_RING (chan, 0x00000000);
659 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 850 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
660 OUT_RING (chan, 0); 851 OUT_RING (chan, 0);
661 852
662 page_count -= line_count; 853 page_count -= line_count;
@@ -716,13 +907,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
716 goto out; 907 goto out;
717 } 908 }
718 909
719 if (dev_priv->card_type < NV_50) 910 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
720 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
721 else
722 if (dev_priv->card_type < NV_C0)
723 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
724 else
725 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
726 if (ret == 0) { 911 if (ret == 0) {
727 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 912 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
728 no_wait_reserve, 913 no_wait_reserve,
@@ -734,6 +919,49 @@ out:
734 return ret; 919 return ret;
735} 920}
736 921
922void
923nouveau_bo_move_init(struct nouveau_channel *chan)
924{
925 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
926 static const struct {
927 const char *name;
928 int engine;
929 u32 oclass;
930 int (*exec)(struct nouveau_channel *,
931 struct ttm_buffer_object *,
932 struct ttm_mem_reg *, struct ttm_mem_reg *);
933 int (*init)(struct nouveau_channel *, u32 handle);
934 } _methods[] = {
935 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
936 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
937 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
938 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
939 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
940 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
941 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
942 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
943 {},
944 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
945 }, *mthd = _methods;
946 const char *name = "CPU";
947 int ret;
948
949 do {
950 u32 handle = (mthd->engine << 16) | mthd->oclass;
951 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
952 if (ret == 0) {
953 ret = mthd->init(chan, handle);
954 if (ret == 0) {
955 dev_priv->ttm.move = mthd->exec;
956 name = mthd->name;
957 break;
958 }
959 }
960 } while ((++mthd)->exec);
961
962 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
963}
964
737static int 965static int
738nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 966nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
739 bool no_wait_reserve, bool no_wait_gpu, 967 bool no_wait_reserve, bool no_wait_gpu,
@@ -817,9 +1045,14 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
817 } else 1045 } else
818 if (new_mem && new_mem->mem_type == TTM_PL_TT && 1046 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
819 nvbo->page_shift == vma->vm->spg_shift) { 1047 nvbo->page_shift == vma->vm->spg_shift) {
820 nouveau_vm_map_sg(vma, 0, new_mem-> 1048 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
821 num_pages << PAGE_SHIFT, 1049 nouveau_vm_map_sg_table(vma, 0, new_mem->
822 new_mem->mm_node); 1050 num_pages << PAGE_SHIFT,
1051 new_mem->mm_node);
1052 else
1053 nouveau_vm_map_sg(vma, 0, new_mem->
1054 num_pages << PAGE_SHIFT,
1055 new_mem->mm_node);
823 } else { 1056 } else {
824 nouveau_vm_unmap(vma); 1057 nouveau_vm_unmap(vma);
825 } 1058 }
@@ -885,8 +1118,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
885 goto out; 1118 goto out;
886 } 1119 }
887 1120
888 /* Software copy if the card isn't up and running yet. */ 1121 /* CPU copy if we have no accelerated method available */
889 if (!dev_priv->channel) { 1122 if (!dev_priv->ttm.move) {
890 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1123 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
891 goto out; 1124 goto out;
892 } 1125 }
@@ -1030,26 +1263,10 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1030 1263
1031 nvbo->placement.fpfn = 0; 1264 nvbo->placement.fpfn = 0;
1032 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 1265 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1033 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); 1266 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1034 return nouveau_bo_validate(nvbo, false, true, false); 1267 return nouveau_bo_validate(nvbo, false, true, false);
1035} 1268}
1036 1269
1037void
1038nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1039{
1040 struct nouveau_fence *old_fence;
1041
1042 if (likely(fence))
1043 nouveau_fence_ref(fence);
1044
1045 spin_lock(&nvbo->bo.bdev->fence_lock);
1046 old_fence = nvbo->bo.sync_obj;
1047 nvbo->bo.sync_obj = fence;
1048 spin_unlock(&nvbo->bo.bdev->fence_lock);
1049
1050 nouveau_fence_unref(&old_fence);
1051}
1052
1053static int 1270static int
1054nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1271nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1055{ 1272{
@@ -1058,10 +1275,19 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1058 struct drm_device *dev; 1275 struct drm_device *dev;
1059 unsigned i; 1276 unsigned i;
1060 int r; 1277 int r;
1278 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1061 1279
1062 if (ttm->state != tt_unpopulated) 1280 if (ttm->state != tt_unpopulated)
1063 return 0; 1281 return 0;
1064 1282
1283 if (slave && ttm->sg) {
1284 /* make userspace faulting work */
1285 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1286 ttm_dma->dma_address, ttm->num_pages);
1287 ttm->state = tt_unbound;
1288 return 0;
1289 }
1290
1065 dev_priv = nouveau_bdev(ttm->bdev); 1291 dev_priv = nouveau_bdev(ttm->bdev);
1066 dev = dev_priv->dev; 1292 dev = dev_priv->dev;
1067 1293
@@ -1106,6 +1332,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1106 struct drm_nouveau_private *dev_priv; 1332 struct drm_nouveau_private *dev_priv;
1107 struct drm_device *dev; 1333 struct drm_device *dev;
1108 unsigned i; 1334 unsigned i;
1335 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1336
1337 if (slave)
1338 return;
1109 1339
1110 dev_priv = nouveau_bdev(ttm->bdev); 1340 dev_priv = nouveau_bdev(ttm->bdev);
1111 dev = dev_priv->dev; 1341 dev = dev_priv->dev;
@@ -1134,6 +1364,52 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1134 ttm_pool_unpopulate(ttm); 1364 ttm_pool_unpopulate(ttm);
1135} 1365}
1136 1366
1367void
1368nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1369{
1370 struct nouveau_fence *old_fence = NULL;
1371
1372 if (likely(fence))
1373 nouveau_fence_ref(fence);
1374
1375 spin_lock(&nvbo->bo.bdev->fence_lock);
1376 old_fence = nvbo->bo.sync_obj;
1377 nvbo->bo.sync_obj = fence;
1378 spin_unlock(&nvbo->bo.bdev->fence_lock);
1379
1380 nouveau_fence_unref(&old_fence);
1381}
1382
1383static void
1384nouveau_bo_fence_unref(void **sync_obj)
1385{
1386 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1387}
1388
1389static void *
1390nouveau_bo_fence_ref(void *sync_obj)
1391{
1392 return nouveau_fence_ref(sync_obj);
1393}
1394
1395static bool
1396nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1397{
1398 return nouveau_fence_done(sync_obj);
1399}
1400
1401static int
1402nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1403{
1404 return nouveau_fence_wait(sync_obj, lazy, intr);
1405}
1406
1407static int
1408nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1409{
1410 return 0;
1411}
1412
1137struct ttm_bo_driver nouveau_bo_driver = { 1413struct ttm_bo_driver nouveau_bo_driver = {
1138 .ttm_tt_create = &nouveau_ttm_tt_create, 1414 .ttm_tt_create = &nouveau_ttm_tt_create,
1139 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1415 .ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1144,11 +1420,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
1144 .move_notify = nouveau_bo_move_ntfy, 1420 .move_notify = nouveau_bo_move_ntfy,
1145 .move = nouveau_bo_move, 1421 .move = nouveau_bo_move,
1146 .verify_access = nouveau_bo_verify_access, 1422 .verify_access = nouveau_bo_verify_access,
1147 .sync_obj_signaled = __nouveau_fence_signalled, 1423 .sync_obj_signaled = nouveau_bo_fence_signalled,
1148 .sync_obj_wait = __nouveau_fence_wait, 1424 .sync_obj_wait = nouveau_bo_fence_wait,
1149 .sync_obj_flush = __nouveau_fence_flush, 1425 .sync_obj_flush = nouveau_bo_fence_flush,
1150 .sync_obj_unref = __nouveau_fence_unref, 1426 .sync_obj_unref = nouveau_bo_fence_unref,
1151 .sync_obj_ref = __nouveau_fence_ref, 1427 .sync_obj_ref = nouveau_bo_fence_ref,
1152 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1428 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1153 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1429 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1154 .io_mem_free = &nouveau_ttm_io_mem_free, 1430 .io_mem_free = &nouveau_ttm_io_mem_free,
@@ -1181,9 +1457,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1181 1457
1182 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) 1458 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1183 nouveau_vm_map(vma, nvbo->bo.mem.mm_node); 1459 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1184 else 1460 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1185 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 1461 if (node->sg)
1186 nouveau_vm_map_sg(vma, 0, size, node); 1462 nouveau_vm_map_sg_table(vma, 0, size, node);
1463 else
1464 nouveau_vm_map_sg(vma, 0, size, node);
1465 }
1187 1466
1188 list_add_tail(&vma->head, &nvbo->vma_list); 1467 list_add_tail(&vma->head, &nvbo->vma_list);
1189 vma->refcount = 1; 1468 vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 846afb0bfef4..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,7 +27,10 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
32#include "nouveau_fence.h"
33#include "nouveau_software.h"
31 34
32static int 35static int
33nouveau_channel_pushbuf_init(struct nouveau_channel *chan) 36nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
@@ -38,7 +41,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
38 int ret; 41 int ret;
39 42
40 /* allocate buffer object */ 43 /* allocate buffer object */
41 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); 44 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
42 if (ret) 45 if (ret)
43 goto out; 46 goto out;
44 47
@@ -117,8 +120,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
117 struct drm_file *file_priv, 120 struct drm_file *file_priv,
118 uint32_t vram_handle, uint32_t gart_handle) 121 uint32_t vram_handle, uint32_t gart_handle)
119{ 122{
123 struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
120 struct drm_nouveau_private *dev_priv = dev->dev_private; 125 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
122 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 126 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
123 struct nouveau_channel *chan; 127 struct nouveau_channel *chan;
124 unsigned long flags; 128 unsigned long flags;
@@ -155,10 +159,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
155 } 159 }
156 160
157 NV_DEBUG(dev, "initialising channel %d\n", chan->id); 161 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
162 162
163 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,20 +188,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
188 chan->user_put = 0x40; 188 chan->user_put = 0x40;
189 chan->user_get = 0x44; 189 chan->user_get = 0x44;
190 if (dev_priv->card_type >= NV_50) 190 if (dev_priv->card_type >= NV_50)
191 chan->user_get_hi = 0x60; 191 chan->user_get_hi = 0x60;
192 192
193 /* disable the fifo caches */ 193 /* create fifo context */
194 pfifo->reassign(dev, false); 194 ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
195
196 /* Construct initial RAMFC for new channel */
197 ret = pfifo->create_context(chan);
198 if (ret) { 195 if (ret) {
199 nouveau_channel_put(&chan); 196 nouveau_channel_put(&chan);
200 return ret; 197 return ret;
201 } 198 }
202 199
203 pfifo->reassign(dev, true);
204
205 /* Insert NOPs for NOUVEAU_DMA_SKIPS */ 200 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
206 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 201 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
207 if (ret) { 202 if (ret) {
@@ -211,9 +206,28 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
211 206
212 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 207 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
213 OUT_RING (chan, 0x00000000); 208 OUT_RING (chan, 0x00000000);
209
210 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
211 if (ret) {
212 nouveau_channel_put(&chan);
213 return ret;
214 }
215
216 if (dev_priv->card_type < NV_C0) {
217 ret = RING_SPACE(chan, 2);
218 if (ret) {
219 nouveau_channel_put(&chan);
220 return ret;
221 }
222
223 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
224 OUT_RING (chan, NvSw);
225 FIRE_RING (chan);
226 }
227
214 FIRE_RING(chan); 228 FIRE_RING(chan);
215 229
216 ret = nouveau_fence_channel_init(chan); 230 ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
217 if (ret) { 231 if (ret) {
218 nouveau_channel_put(&chan); 232 nouveau_channel_put(&chan);
219 return ret; 233 return ret;
@@ -268,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
268 struct nouveau_channel *chan = *pchan; 282 struct nouveau_channel *chan = *pchan;
269 struct drm_device *dev = chan->dev; 283 struct drm_device *dev = chan->dev;
270 struct drm_nouveau_private *dev_priv = dev->dev_private; 284 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
272 unsigned long flags; 285 unsigned long flags;
273 int i; 286 int i;
274 287
@@ -285,24 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
285 /* give it chance to idle */ 298 /* give it chance to idle */
286 nouveau_channel_idle(chan); 299 nouveau_channel_idle(chan);
287 300
288 /* ensure all outstanding fences are signaled. they should be if the
289 * above attempts at idling were OK, but if we failed this'll tell TTM
290 * we're done with the buffers.
291 */
292 nouveau_fence_channel_fini(chan);
293
294 /* boot it off the hardware */
295 pfifo->reassign(dev, false);
296
297 /* destroy the engine specific contexts */ 301 /* destroy the engine specific contexts */
298 pfifo->destroy_context(chan); 302 for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
299 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
300 if (chan->engctx[i]) 303 if (chan->engctx[i])
301 dev_priv->eng[i]->context_del(chan, i); 304 dev_priv->eng[i]->context_del(chan, i);
302 } 305 }
303 306
304 pfifo->reassign(dev, true);
305
306 /* aside from its resources, the channel should now be dead, 307 /* aside from its resources, the channel should now be dead,
307 * remove it from the channel list 308 * remove it from the channel list
308 */ 309 */
@@ -354,38 +355,37 @@ nouveau_channel_ref(struct nouveau_channel *chan,
354 *pchan = chan; 355 *pchan = chan;
355} 356}
356 357
357void 358int
358nouveau_channel_idle(struct nouveau_channel *chan) 359nouveau_channel_idle(struct nouveau_channel *chan)
359{ 360{
360 struct drm_device *dev = chan->dev; 361 struct drm_device *dev = chan->dev;
361 struct nouveau_fence *fence = NULL; 362 struct nouveau_fence *fence = NULL;
362 int ret; 363 int ret;
363 364
364 nouveau_fence_update(chan); 365 ret = nouveau_fence_new(chan, &fence);
365 366 if (!ret) {
366 if (chan->fence.sequence != chan->fence.sequence_ack) { 367 ret = nouveau_fence_wait(fence, false, false);
367 ret = nouveau_fence_new(chan, &fence, true); 368 nouveau_fence_unref(&fence);
368 if (!ret) {
369 ret = nouveau_fence_wait(fence, false, false);
370 nouveau_fence_unref(&fence);
371 }
372
373 if (ret)
374 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
375 } 369 }
370
371 if (ret)
372 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
373 return ret;
376} 374}
377 375
378/* cleans up all the fifos from file_priv */ 376/* cleans up all the fifos from file_priv */
379void 377void
380nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) 378nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
381{ 379{
382 struct drm_nouveau_private *dev_priv = dev->dev_private; 380 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
383 struct nouveau_engine *engine = &dev_priv->engine;
384 struct nouveau_channel *chan; 381 struct nouveau_channel *chan;
385 int i; 382 int i;
386 383
384 if (!pfifo)
385 return;
386
387 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 387 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
388 for (i = 0; i < engine->fifo.channels; i++) { 388 for (i = 0; i < pfifo->channels; i++) {
389 chan = nouveau_channel_get(file_priv, i); 389 chan = nouveau_channel_get(file_priv, i);
390 if (IS_ERR(chan)) 390 if (IS_ERR(chan))
391 continue; 391 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fa860358add1..7b11edb077d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,7 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
654 if (nv_connector->edid && connector->display_info.bpc) 654 if (nv_connector->edid && connector->display_info.bpc)
655 return; 655 return;
656 656
657 /* if not, we're out of options unless we're LVDS, default to 8bpc */ 657 /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
658 if (nv_connector->type == DCB_CONNECTOR_eDP) {
659 connector->display_info.bpc = 6;
660 return;
661 }
662
663 /* we're out of options unless we're LVDS, default to 8bpc */
658 if (nv_encoder->dcb->type != OUTPUT_LVDS) { 664 if (nv_encoder->dcb->type != OUTPUT_LVDS) {
659 connector->display_info.bpc = 8; 665 connector->display_info.bpc = 8;
660 return; 666 return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
67 nvchan_rd32(chan, 0x8c)); 67 nvchan_rd32(chan, 0x8c));
68 } 68 }
69 69
70 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
71 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
72 return 0; 70 return 0;
73} 71}
74 72
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a85e112863d1..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,7 +33,9 @@
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_software.h"
36#include "nouveau_gpio.h" 37#include "nouveau_gpio.h"
38#include "nouveau_fence.h"
37#include "nv50_display.h" 39#include "nv50_display.h"
38 40
39static void 41static void
@@ -300,7 +302,7 @@ nouveau_display_create(struct drm_device *dev)
300 disp->color_vibrance_property->values[1] = 200; /* -100..+100 */ 302 disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
301 } 303 }
302 304
303 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; 305 dev->mode_config.funcs = &nouveau_mode_config_funcs;
304 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); 306 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
305 307
306 dev->mode_config.min_width = 0; 308 dev->mode_config.min_width = 0;
@@ -325,14 +327,21 @@ nouveau_display_create(struct drm_device *dev)
325 327
326 ret = disp->create(dev); 328 ret = disp->create(dev);
327 if (ret) 329 if (ret)
328 return ret; 330 goto disp_create_err;
329 331
330 if (dev->mode_config.num_crtc) { 332 if (dev->mode_config.num_crtc) {
331 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 333 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
332 if (ret) 334 if (ret)
333 return ret; 335 goto vblank_err;
334 } 336 }
335 337
338 return 0;
339
340vblank_err:
341 disp->destroy(dev);
342disp_create_err:
343 drm_kms_helper_poll_fini(dev);
344 drm_mode_config_cleanup(dev);
336 return ret; 345 return ret;
337} 346}
338 347
@@ -425,6 +434,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
425 struct nouveau_page_flip_state *s, 434 struct nouveau_page_flip_state *s,
426 struct nouveau_fence **pfence) 435 struct nouveau_fence **pfence)
427{ 436{
437 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
428 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 438 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
429 struct drm_device *dev = chan->dev; 439 struct drm_device *dev = chan->dev;
430 unsigned long flags; 440 unsigned long flags;
@@ -432,7 +442,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
432 442
433 /* Queue it to the pending list */ 443 /* Queue it to the pending list */
434 spin_lock_irqsave(&dev->event_lock, flags); 444 spin_lock_irqsave(&dev->event_lock, flags);
435 list_add_tail(&s->head, &chan->nvsw.flip); 445 list_add_tail(&s->head, &swch->flip);
436 spin_unlock_irqrestore(&dev->event_lock, flags); 446 spin_unlock_irqrestore(&dev->event_lock, flags);
437 447
438 /* Synchronize with the old framebuffer */ 448 /* Synchronize with the old framebuffer */
@@ -446,17 +456,17 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
446 goto fail; 456 goto fail;
447 457
448 if (dev_priv->card_type < NV_C0) { 458 if (dev_priv->card_type < NV_C0) {
449 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 459 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
450 OUT_RING (chan, 0x00000000); 460 OUT_RING (chan, 0x00000000);
451 OUT_RING (chan, 0x00000000); 461 OUT_RING (chan, 0x00000000);
452 } else { 462 } else {
453 BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1); 463 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
454 OUT_RING (chan, ++chan->fence.sequence); 464 OUT_RING (chan, 0);
455 BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000); 465 BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
456 } 466 }
457 FIRE_RING (chan); 467 FIRE_RING (chan);
458 468
459 ret = nouveau_fence_new(chan, pfence, true); 469 ret = nouveau_fence_new(chan, pfence);
460 if (ret) 470 if (ret)
461 goto fail; 471 goto fail;
462 472
@@ -477,7 +487,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
477 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 487 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
478 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 488 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
479 struct nouveau_page_flip_state *s; 489 struct nouveau_page_flip_state *s;
480 struct nouveau_channel *chan; 490 struct nouveau_channel *chan = NULL;
481 struct nouveau_fence *fence; 491 struct nouveau_fence *fence;
482 int ret; 492 int ret;
483 493
@@ -500,7 +510,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
500 new_bo->bo.offset }; 510 new_bo->bo.offset };
501 511
502 /* Choose the channel the flip will be handled in */ 512 /* Choose the channel the flip will be handled in */
503 chan = nouveau_fence_channel(new_bo->bo.sync_obj); 513 fence = new_bo->bo.sync_obj;
514 if (fence)
515 chan = nouveau_channel_get_unlocked(fence->channel);
504 if (!chan) 516 if (!chan)
505 chan = nouveau_channel_get_unlocked(dev_priv->channel); 517 chan = nouveau_channel_get_unlocked(dev_priv->channel);
506 mutex_lock(&chan->mutex); 518 mutex_lock(&chan->mutex);
@@ -540,20 +552,20 @@ int
540nouveau_finish_page_flip(struct nouveau_channel *chan, 552nouveau_finish_page_flip(struct nouveau_channel *chan,
541 struct nouveau_page_flip_state *ps) 553 struct nouveau_page_flip_state *ps)
542{ 554{
555 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
543 struct drm_device *dev = chan->dev; 556 struct drm_device *dev = chan->dev;
544 struct nouveau_page_flip_state *s; 557 struct nouveau_page_flip_state *s;
545 unsigned long flags; 558 unsigned long flags;
546 559
547 spin_lock_irqsave(&dev->event_lock, flags); 560 spin_lock_irqsave(&dev->event_lock, flags);
548 561
549 if (list_empty(&chan->nvsw.flip)) { 562 if (list_empty(&swch->flip)) {
550 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); 563 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
551 spin_unlock_irqrestore(&dev->event_lock, flags); 564 spin_unlock_irqrestore(&dev->event_lock, flags);
552 return -EINVAL; 565 return -EINVAL;
553 } 566 }
554 567
555 s = list_first_entry(&chan->nvsw.flip, 568 s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
556 struct nouveau_page_flip_state, head);
557 if (s->event) { 569 if (s->event) {
558 struct drm_pending_vblank_event *e = s->event; 570 struct drm_pending_vblank_event *e = s->event;
559 struct timeval now; 571 struct timeval now;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 23d4edf992b7..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,12 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
48 48
49/* Hardcoded object assignments to subchannels (subchannel id). */ 49/* Hardcoded object assignments to subchannels (subchannel id). */
50enum { 50enum {
51 NvSubM2MF = 0, 51 NvSubCtxSurf2D = 0,
52 NvSubSw = 1, 52 NvSubSw = 1,
53 NvSub2D = 2, 53 NvSubImageBlit = 2,
54 NvSubCtxSurf2D = 2, 54 NvSub2D = 3,
55 NvSubGdiRect = 3, 55 NvSubGdiRect = 3,
56 NvSubImageBlit = 4 56 NvSubCopy = 4,
57}; 57};
58 58
59/* Object handles. */ 59/* Object handles. */
@@ -73,6 +73,7 @@ enum {
73 NvSema = 0x8000000f, 73 NvSema = 0x8000000f,
74 NvEvoSema0 = 0x80000010, 74 NvEvoSema0 = 0x80000010,
75 NvEvoSema1 = 0x80000011, 75 NvEvoSema1 = 0x80000011,
76 NvNotify1 = 0x80000012,
76 77
77 /* G80+ display objects */ 78 /* G80+ display objects */
78 NvEvoVRAM = 0x01000000, 79 NvEvoVRAM = 0x01000000,
@@ -127,15 +128,33 @@ extern void
127OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); 128OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
128 129
129static inline void 130static inline void
130BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size) 131BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
131{ 132{
132 OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2)); 133 OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
133} 134}
134 135
135static inline void 136static inline void
136BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) 137BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
137{ 138{
138 OUT_RING(chan, (subc << 13) | (size << 18) | mthd); 139 OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
140}
141
142static inline void
143BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
144{
145 OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
146}
147
148static inline void
149BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
150{
151 OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
152}
153
154static inline void
155BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
156{
157 OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
139} 158}
140 159
141#define WRITE_PUT(val) do { \ 160#define WRITE_PUT(val) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index d996134b1b28..7e289d2ad8e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -510,6 +510,25 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
510 nouveau_dp_link_train(encoder, datarate, func); 510 nouveau_dp_link_train(encoder, datarate, func);
511} 511}
512 512
513static void
514nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch,
515 u8 *dpcd)
516{
517 u8 buf[3];
518
519 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
520 return;
521
522 if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3))
523 NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n",
524 buf[0], buf[1], buf[2]);
525
526 if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3))
527 NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n",
528 buf[0], buf[1], buf[2]);
529
530}
531
513bool 532bool
514nouveau_dp_detect(struct drm_encoder *encoder) 533nouveau_dp_detect(struct drm_encoder *encoder)
515{ 534{
@@ -544,6 +563,8 @@ nouveau_dp_detect(struct drm_encoder *encoder)
544 NV_DEBUG_KMS(dev, "maximum: %dx%d\n", 563 NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
545 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); 564 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
546 565
566 nouveau_dp_probe_oui(dev, auxch, dpcd);
567
547 return true; 568 return true;
548} 569}
549 570
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 4f2030bd5676..cad254c8e387 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -33,6 +33,7 @@
33#include "nouveau_fb.h" 33#include "nouveau_fb.h"
34#include "nouveau_fbcon.h" 34#include "nouveau_fbcon.h"
35#include "nouveau_pm.h" 35#include "nouveau_pm.h"
36#include "nouveau_fifo.h"
36#include "nv50_display.h" 37#include "nv50_display.h"
37 38
38#include "drm_pciids.h" 39#include "drm_pciids.h"
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
175 struct drm_device *dev = pci_get_drvdata(pdev); 176 struct drm_device *dev = pci_get_drvdata(pdev);
176 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 struct drm_nouveau_private *dev_priv = dev->dev_private;
177 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 178 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
178 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 179 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
179 struct nouveau_channel *chan; 180 struct nouveau_channel *chan;
180 struct drm_crtc *crtc; 181 struct drm_crtc *crtc;
181 int ret, i, e; 182 int ret, i, e;
@@ -214,17 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
214 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 215 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
215 216
216 NV_INFO(dev, "Idling channels...\n"); 217 NV_INFO(dev, "Idling channels...\n");
217 for (i = 0; i < pfifo->channels; i++) { 218 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
218 chan = dev_priv->channels.ptr[i]; 219 chan = dev_priv->channels.ptr[i];
219 220
220 if (chan && chan->pushbuf_bo) 221 if (chan && chan->pushbuf_bo)
221 nouveau_channel_idle(chan); 222 nouveau_channel_idle(chan);
222 } 223 }
223 224
224 pfifo->reassign(dev, false);
225 pfifo->disable(dev);
226 pfifo->unload_context(dev);
227
228 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 225 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
229 if (!dev_priv->eng[e]) 226 if (!dev_priv->eng[e])
230 continue; 227 continue;
@@ -265,8 +262,6 @@ out_abort:
265 if (dev_priv->eng[e]) 262 if (dev_priv->eng[e])
266 dev_priv->eng[e]->init(dev, e); 263 dev_priv->eng[e]->init(dev, e);
267 } 264 }
268 pfifo->enable(dev);
269 pfifo->reassign(dev, true);
270 return ret; 265 return ret;
271} 266}
272 267
@@ -274,6 +269,7 @@ int
274nouveau_pci_resume(struct pci_dev *pdev) 269nouveau_pci_resume(struct pci_dev *pdev)
275{ 270{
276 struct drm_device *dev = pci_get_drvdata(pdev); 271 struct drm_device *dev = pci_get_drvdata(pdev);
272 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
277 struct drm_nouveau_private *dev_priv = dev->dev_private; 273 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_engine *engine = &dev_priv->engine; 274 struct nouveau_engine *engine = &dev_priv->engine;
279 struct drm_crtc *crtc; 275 struct drm_crtc *crtc;
@@ -321,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
321 if (dev_priv->eng[i]) 317 if (dev_priv->eng[i])
322 dev_priv->eng[i]->init(dev, i); 318 dev_priv->eng[i]->init(dev, i);
323 } 319 }
324 engine->fifo.init(dev);
325 320
326 nouveau_irq_postinstall(dev); 321 nouveau_irq_postinstall(dev);
327 322
@@ -330,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
330 struct nouveau_channel *chan; 325 struct nouveau_channel *chan;
331 int j; 326 int j;
332 327
333 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 328 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
334 chan = dev_priv->channels.ptr[i]; 329 chan = dev_priv->channels.ptr[i];
335 if (!chan || !chan->pushbuf_bo) 330 if (!chan || !chan->pushbuf_bo)
336 continue; 331 continue;
@@ -408,7 +403,7 @@ static struct drm_driver driver = {
408 .driver_features = 403 .driver_features =
409 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 404 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
410 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 405 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
411 DRIVER_MODESET, 406 DRIVER_MODESET | DRIVER_PRIME,
412 .load = nouveau_load, 407 .load = nouveau_load,
413 .firstopen = nouveau_firstopen, 408 .firstopen = nouveau_firstopen,
414 .lastclose = nouveau_lastclose, 409 .lastclose = nouveau_lastclose,
@@ -430,6 +425,12 @@ static struct drm_driver driver = {
430 .reclaim_buffers = drm_core_reclaim_buffers, 425 .reclaim_buffers = drm_core_reclaim_buffers,
431 .ioctls = nouveau_ioctls, 426 .ioctls = nouveau_ioctls,
432 .fops = &nouveau_driver_fops, 427 .fops = &nouveau_driver_fops,
428
429 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
430 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
431 .gem_prime_export = nouveau_gem_prime_export,
432 .gem_prime_import = nouveau_gem_prime_import,
433
433 .gem_init_object = nouveau_gem_object_new, 434 .gem_init_object = nouveau_gem_object_new,
434 .gem_free_object = nouveau_gem_object_del, 435 .gem_free_object = nouveau_gem_object_del,
435 .gem_open_object = nouveau_gem_object_open, 436 .gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3aef353a926c..634d222c93de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -70,7 +70,7 @@ struct nouveau_mem;
70 70
71#define MAX_NUM_DCB_ENTRIES 16 71#define MAX_NUM_DCB_ENTRIES 16
72 72
73#define NOUVEAU_MAX_CHANNEL_NR 128 73#define NOUVEAU_MAX_CHANNEL_NR 4096
74#define NOUVEAU_MAX_TILE_NR 15 74#define NOUVEAU_MAX_TILE_NR 15
75 75
76struct nouveau_mem { 76struct nouveau_mem {
@@ -86,6 +86,7 @@ struct nouveau_mem {
86 u32 memtype; 86 u32 memtype;
87 u64 offset; 87 u64 offset;
88 u64 size; 88 u64 size;
89 struct sg_table *sg;
89}; 90};
90 91
91struct nouveau_tile_reg { 92struct nouveau_tile_reg {
@@ -164,8 +165,10 @@ enum nouveau_flags {
164#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG 165#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
165#define NVOBJ_ENGINE_BSP 6 166#define NVOBJ_ENGINE_BSP 6
166#define NVOBJ_ENGINE_VP 7 167#define NVOBJ_ENGINE_VP 7
167#define NVOBJ_ENGINE_DISPLAY 15 168#define NVOBJ_ENGINE_FIFO 14
169#define NVOBJ_ENGINE_FENCE 15
168#define NVOBJ_ENGINE_NR 16 170#define NVOBJ_ENGINE_NR 16
171#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
169 172
170#define NVOBJ_FLAG_DONT_MAP (1 << 0) 173#define NVOBJ_FLAG_DONT_MAP (1 << 0)
171#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 174#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -233,17 +236,6 @@ struct nouveau_channel {
233 uint32_t user_get_hi; 236 uint32_t user_get_hi;
234 uint32_t user_put; 237 uint32_t user_put;
235 238
236 /* Fencing */
237 struct {
238 /* lock protects the pending list only */
239 spinlock_t lock;
240 struct list_head pending;
241 uint32_t sequence;
242 uint32_t sequence_ack;
243 atomic_t last_sequence_irq;
244 struct nouveau_vma vma;
245 } fence;
246
247 /* DMA push buffer */ 239 /* DMA push buffer */
248 struct nouveau_gpuobj *pushbuf; 240 struct nouveau_gpuobj *pushbuf;
249 struct nouveau_bo *pushbuf_bo; 241 struct nouveau_bo *pushbuf_bo;
@@ -257,8 +249,6 @@ struct nouveau_channel {
257 249
258 /* PFIFO context */ 250 /* PFIFO context */
259 struct nouveau_gpuobj *ramfc; 251 struct nouveau_gpuobj *ramfc;
260 struct nouveau_gpuobj *cache;
261 void *fifo_priv;
262 252
263 /* Execution engine contexts */ 253 /* Execution engine contexts */
264 void *engctx[NVOBJ_ENGINE_NR]; 254 void *engctx[NVOBJ_ENGINE_NR];
@@ -292,18 +282,6 @@ struct nouveau_channel {
292 int ib_put; 282 int ib_put;
293 } dma; 283 } dma;
294 284
295 uint32_t sw_subchannel[8];
296
297 struct nouveau_vma dispc_vma[4];
298 struct {
299 struct nouveau_gpuobj *vblsem;
300 uint32_t vblsem_head;
301 uint32_t vblsem_offset;
302 uint32_t vblsem_rval;
303 struct list_head vbl_wait;
304 struct list_head flip;
305 } nvsw;
306
307 struct { 285 struct {
308 bool active; 286 bool active;
309 char name[32]; 287 char name[32];
@@ -366,30 +344,6 @@ struct nouveau_fb_engine {
366 void (*free_tile_region)(struct drm_device *dev, int i); 344 void (*free_tile_region)(struct drm_device *dev, int i);
367}; 345};
368 346
369struct nouveau_fifo_engine {
370 void *priv;
371 int channels;
372
373 struct nouveau_gpuobj *playlist[2];
374 int cur_playlist;
375
376 int (*init)(struct drm_device *);
377 void (*takedown)(struct drm_device *);
378
379 void (*disable)(struct drm_device *);
380 void (*enable)(struct drm_device *);
381 bool (*reassign)(struct drm_device *, bool enable);
382 bool (*cache_pull)(struct drm_device *dev, bool enable);
383
384 int (*channel_id)(struct drm_device *);
385
386 int (*create_context)(struct nouveau_channel *);
387 void (*destroy_context)(struct nouveau_channel *);
388 int (*load_context)(struct nouveau_channel *);
389 int (*unload_context)(struct drm_device *);
390 void (*tlb_flush)(struct drm_device *dev);
391};
392
393struct nouveau_display_engine { 347struct nouveau_display_engine {
394 void *priv; 348 void *priv;
395 int (*early_init)(struct drm_device *); 349 int (*early_init)(struct drm_device *);
@@ -597,7 +551,6 @@ struct nouveau_engine {
597 struct nouveau_mc_engine mc; 551 struct nouveau_mc_engine mc;
598 struct nouveau_timer_engine timer; 552 struct nouveau_timer_engine timer;
599 struct nouveau_fb_engine fb; 553 struct nouveau_fb_engine fb;
600 struct nouveau_fifo_engine fifo;
601 struct nouveau_display_engine display; 554 struct nouveau_display_engine display;
602 struct nouveau_gpio_engine gpio; 555 struct nouveau_gpio_engine gpio;
603 struct nouveau_pm_engine pm; 556 struct nouveau_pm_engine pm;
@@ -740,6 +693,9 @@ struct drm_nouveau_private {
740 struct ttm_bo_global_ref bo_global_ref; 693 struct ttm_bo_global_ref bo_global_ref;
741 struct ttm_bo_device bdev; 694 struct ttm_bo_device bdev;
742 atomic_t validate_sequence; 695 atomic_t validate_sequence;
696 int (*move)(struct nouveau_channel *,
697 struct ttm_buffer_object *,
698 struct ttm_mem_reg *, struct ttm_mem_reg *);
743 } ttm; 699 } ttm;
744 700
745 struct { 701 struct {
@@ -977,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
977extern void nouveau_channel_put(struct nouveau_channel **); 933extern void nouveau_channel_put(struct nouveau_channel **);
978extern void nouveau_channel_ref(struct nouveau_channel *chan, 934extern void nouveau_channel_ref(struct nouveau_channel *chan,
979 struct nouveau_channel **pchan); 935 struct nouveau_channel **pchan);
980extern void nouveau_channel_idle(struct nouveau_channel *chan); 936extern int nouveau_channel_idle(struct nouveau_channel *chan);
981 937
982/* nouveau_object.c */ 938/* nouveau_object.c */
983#define NVOBJ_ENGINE_ADD(d, e, p) do { \ 939#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1209,56 +1165,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display);
1209extern int nvc0_fb_init(struct drm_device *); 1165extern int nvc0_fb_init(struct drm_device *);
1210extern void nvc0_fb_takedown(struct drm_device *); 1166extern void nvc0_fb_takedown(struct drm_device *);
1211 1167
1212/* nv04_fifo.c */
1213extern int nv04_fifo_init(struct drm_device *);
1214extern void nv04_fifo_fini(struct drm_device *);
1215extern void nv04_fifo_disable(struct drm_device *);
1216extern void nv04_fifo_enable(struct drm_device *);
1217extern bool nv04_fifo_reassign(struct drm_device *, bool);
1218extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
1219extern int nv04_fifo_channel_id(struct drm_device *);
1220extern int nv04_fifo_create_context(struct nouveau_channel *);
1221extern void nv04_fifo_destroy_context(struct nouveau_channel *);
1222extern int nv04_fifo_load_context(struct nouveau_channel *);
1223extern int nv04_fifo_unload_context(struct drm_device *);
1224extern void nv04_fifo_isr(struct drm_device *);
1225
1226/* nv10_fifo.c */
1227extern int nv10_fifo_init(struct drm_device *);
1228extern int nv10_fifo_channel_id(struct drm_device *);
1229extern int nv10_fifo_create_context(struct nouveau_channel *);
1230extern int nv10_fifo_load_context(struct nouveau_channel *);
1231extern int nv10_fifo_unload_context(struct drm_device *);
1232
1233/* nv40_fifo.c */
1234extern int nv40_fifo_init(struct drm_device *);
1235extern int nv40_fifo_create_context(struct nouveau_channel *);
1236extern int nv40_fifo_load_context(struct nouveau_channel *);
1237extern int nv40_fifo_unload_context(struct drm_device *);
1238
1239/* nv50_fifo.c */
1240extern int nv50_fifo_init(struct drm_device *);
1241extern void nv50_fifo_takedown(struct drm_device *);
1242extern int nv50_fifo_channel_id(struct drm_device *);
1243extern int nv50_fifo_create_context(struct nouveau_channel *);
1244extern void nv50_fifo_destroy_context(struct nouveau_channel *);
1245extern int nv50_fifo_load_context(struct nouveau_channel *);
1246extern int nv50_fifo_unload_context(struct drm_device *);
1247extern void nv50_fifo_tlb_flush(struct drm_device *dev);
1248
1249/* nvc0_fifo.c */
1250extern int nvc0_fifo_init(struct drm_device *);
1251extern void nvc0_fifo_takedown(struct drm_device *);
1252extern void nvc0_fifo_disable(struct drm_device *);
1253extern void nvc0_fifo_enable(struct drm_device *);
1254extern bool nvc0_fifo_reassign(struct drm_device *, bool);
1255extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
1256extern int nvc0_fifo_channel_id(struct drm_device *);
1257extern int nvc0_fifo_create_context(struct nouveau_channel *);
1258extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
1259extern int nvc0_fifo_load_context(struct nouveau_channel *);
1260extern int nvc0_fifo_unload_context(struct drm_device *);
1261
1262/* nv04_graph.c */ 1168/* nv04_graph.c */
1263extern int nv04_graph_create(struct drm_device *); 1169extern int nv04_graph_create(struct drm_device *);
1264extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); 1170extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
@@ -1277,18 +1183,23 @@ extern int nv20_graph_create(struct drm_device *);
1277 1183
1278/* nv40_graph.c */ 1184/* nv40_graph.c */
1279extern int nv40_graph_create(struct drm_device *); 1185extern int nv40_graph_create(struct drm_device *);
1280extern void nv40_grctx_init(struct nouveau_grctx *); 1186extern void nv40_grctx_init(struct drm_device *, u32 *size);
1187extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1281 1188
1282/* nv50_graph.c */ 1189/* nv50_graph.c */
1283extern int nv50_graph_create(struct drm_device *); 1190extern int nv50_graph_create(struct drm_device *);
1284extern int nv50_grctx_init(struct nouveau_grctx *);
1285extern struct nouveau_enum nv50_data_error_names[]; 1191extern struct nouveau_enum nv50_data_error_names[];
1286extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst); 1192extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1193extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
1194extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1287 1195
1288/* nvc0_graph.c */ 1196/* nvc0_graph.c */
1289extern int nvc0_graph_create(struct drm_device *); 1197extern int nvc0_graph_create(struct drm_device *);
1290extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst); 1198extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1291 1199
1200/* nve0_graph.c */
1201extern int nve0_graph_create(struct drm_device *);
1202
1292/* nv84_crypt.c */ 1203/* nv84_crypt.c */
1293extern int nv84_crypt_create(struct drm_device *); 1204extern int nv84_crypt_create(struct drm_device *);
1294 1205
@@ -1414,9 +1325,12 @@ extern int nv04_crtc_create(struct drm_device *, int index);
1414 1325
1415/* nouveau_bo.c */ 1326/* nouveau_bo.c */
1416extern struct ttm_bo_driver nouveau_bo_driver; 1327extern struct ttm_bo_driver nouveau_bo_driver;
1328extern void nouveau_bo_move_init(struct nouveau_channel *);
1417extern int nouveau_bo_new(struct drm_device *, int size, int align, 1329extern int nouveau_bo_new(struct drm_device *, int size, int align,
1418 uint32_t flags, uint32_t tile_mode, 1330 uint32_t flags, uint32_t tile_mode,
1419 uint32_t tile_flags, struct nouveau_bo **); 1331 uint32_t tile_flags,
1332 struct sg_table *sg,
1333 struct nouveau_bo **);
1420extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); 1334extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1421extern int nouveau_bo_unpin(struct nouveau_bo *); 1335extern int nouveau_bo_unpin(struct nouveau_bo *);
1422extern int nouveau_bo_map(struct nouveau_bo *); 1336extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1437,50 +1351,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1437 struct nouveau_vma *); 1351 struct nouveau_vma *);
1438extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); 1352extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1439 1353
1440/* nouveau_fence.c */
1441struct nouveau_fence;
1442extern int nouveau_fence_init(struct drm_device *);
1443extern void nouveau_fence_fini(struct drm_device *);
1444extern int nouveau_fence_channel_init(struct nouveau_channel *);
1445extern void nouveau_fence_channel_fini(struct nouveau_channel *);
1446extern void nouveau_fence_update(struct nouveau_channel *);
1447extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1448 bool emit);
1449extern int nouveau_fence_emit(struct nouveau_fence *);
1450extern void nouveau_fence_work(struct nouveau_fence *fence,
1451 void (*work)(void *priv, bool signalled),
1452 void *priv);
1453struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1454
1455extern bool __nouveau_fence_signalled(void *obj, void *arg);
1456extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1457extern int __nouveau_fence_flush(void *obj, void *arg);
1458extern void __nouveau_fence_unref(void **obj);
1459extern void *__nouveau_fence_ref(void *obj);
1460
1461static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
1462{
1463 return __nouveau_fence_signalled(obj, NULL);
1464}
1465static inline int
1466nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
1467{
1468 return __nouveau_fence_wait(obj, NULL, lazy, intr);
1469}
1470extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
1471static inline int nouveau_fence_flush(struct nouveau_fence *obj)
1472{
1473 return __nouveau_fence_flush(obj, NULL);
1474}
1475static inline void nouveau_fence_unref(struct nouveau_fence **obj)
1476{
1477 __nouveau_fence_unref((void **)obj);
1478}
1479static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1480{
1481 return __nouveau_fence_ref(obj);
1482}
1483
1484/* nouveau_gem.c */ 1354/* nouveau_gem.c */
1485extern int nouveau_gem_new(struct drm_device *, int size, int align, 1355extern int nouveau_gem_new(struct drm_device *, int size, int align,
1486 uint32_t domain, uint32_t tile_mode, 1356 uint32_t domain, uint32_t tile_mode,
@@ -1501,6 +1371,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1501extern int nouveau_gem_ioctl_info(struct drm_device *, void *, 1371extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1502 struct drm_file *); 1372 struct drm_file *);
1503 1373
1374extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
1375 struct drm_gem_object *obj, int flags);
1376extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
1377 struct dma_buf *dma_buf);
1378
1504/* nouveau_display.c */ 1379/* nouveau_display.c */
1505int nouveau_display_create(struct drm_device *dev); 1380int nouveau_display_create(struct drm_device *dev);
1506void nouveau_display_destroy(struct drm_device *dev); 1381void nouveau_display_destroy(struct drm_device *dev);
@@ -1772,6 +1647,7 @@ nv44_graph_class(struct drm_device *dev)
1772#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001 1647#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
1773#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002 1648#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
1774#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004 1649#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
1650#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
1775#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020 1651#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
1776#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 1652#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
1777#define NV10_SUBCHAN_REF_CNT 0x00000050 1653#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8113e9201ed9..153b9a15469b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -153,7 +153,7 @@ nouveau_fbcon_sync(struct fb_info *info)
153 struct drm_device *dev = nfbdev->dev; 153 struct drm_device *dev = nfbdev->dev;
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_channel *chan = dev_priv->channel; 155 struct nouveau_channel *chan = dev_priv->channel;
156 int ret, i; 156 int ret;
157 157
158 if (!chan || !chan->accel_done || in_interrupt() || 158 if (!chan || !chan->accel_done || in_interrupt() ||
159 info->state != FBINFO_STATE_RUNNING || 159 info->state != FBINFO_STATE_RUNNING ||
@@ -163,38 +163,8 @@ nouveau_fbcon_sync(struct fb_info *info)
163 if (!mutex_trylock(&chan->mutex)) 163 if (!mutex_trylock(&chan->mutex))
164 return 0; 164 return 0;
165 165
166 ret = RING_SPACE(chan, 4); 166 ret = nouveau_channel_idle(chan);
167 if (ret) {
168 mutex_unlock(&chan->mutex);
169 nouveau_fbcon_gpu_lockup(info);
170 return 0;
171 }
172
173 if (dev_priv->card_type >= NV_C0) {
174 BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
175 OUT_RING (chan, 0);
176 BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
177 OUT_RING (chan, 0);
178 } else {
179 BEGIN_RING(chan, 0, 0x0104, 1);
180 OUT_RING (chan, 0);
181 BEGIN_RING(chan, 0, 0x0100, 1);
182 OUT_RING (chan, 0);
183 }
184
185 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
186 FIRE_RING(chan);
187 mutex_unlock(&chan->mutex); 167 mutex_unlock(&chan->mutex);
188
189 ret = -EBUSY;
190 for (i = 0; i < 100000; i++) {
191 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
192 ret = 0;
193 break;
194 }
195 DRM_UDELAY(1);
196 }
197
198 if (ret) { 168 if (ret) {
199 nouveau_fbcon_gpu_lockup(info); 169 nouveau_fbcon_gpu_lockup(info);
200 return 0; 170 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c1dc20f6cb85..3c180493dab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,220 +32,100 @@
32 32
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_ramht.h" 34#include "nouveau_ramht.h"
35#include "nouveau_fence.h"
36#include "nouveau_software.h"
35#include "nouveau_dma.h" 37#include "nouveau_dma.h"
36 38
37#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 39void
38#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) 40nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
39
40struct nouveau_fence {
41 struct nouveau_channel *channel;
42 struct kref refcount;
43 struct list_head entry;
44
45 uint32_t sequence;
46 bool signalled;
47
48 void (*work)(void *priv, bool signalled);
49 void *priv;
50};
51
52struct nouveau_semaphore {
53 struct kref ref;
54 struct drm_device *dev;
55 struct drm_mm_node *mem;
56};
57
58static inline struct nouveau_fence *
59nouveau_fence(void *sync_obj)
60{ 41{
61 return (struct nouveau_fence *)sync_obj; 42 struct nouveau_fence *fence, *fnext;
43 spin_lock(&fctx->lock);
44 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
45 if (fence->work)
46 fence->work(fence->priv, false);
47 fence->channel = NULL;
48 list_del(&fence->head);
49 nouveau_fence_unref(&fence);
50 }
51 spin_unlock(&fctx->lock);
62} 52}
63 53
64static void 54void
65nouveau_fence_del(struct kref *ref) 55nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
66{ 56{
67 struct nouveau_fence *fence = 57 INIT_LIST_HEAD(&fctx->pending);
68 container_of(ref, struct nouveau_fence, refcount); 58 spin_lock_init(&fctx->lock);
69
70 nouveau_channel_ref(NULL, &fence->channel);
71 kfree(fence);
72} 59}
73 60
74void 61void
75nouveau_fence_update(struct nouveau_channel *chan) 62nouveau_fence_update(struct nouveau_channel *chan)
76{ 63{
77 struct drm_device *dev = chan->dev; 64 struct drm_device *dev = chan->dev;
78 struct nouveau_fence *tmp, *fence; 65 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
79 uint32_t sequence; 66 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
67 struct nouveau_fence *fence, *fnext;
80 68
81 spin_lock(&chan->fence.lock); 69 spin_lock(&fctx->lock);
82 70 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
83 /* Fetch the last sequence if the channel is still up and running */ 71 if (priv->read(chan) < fence->sequence)
84 if (likely(!list_empty(&chan->fence.pending))) {
85 if (USE_REFCNT(dev))
86 sequence = nvchan_rd32(chan, 0x48);
87 else
88 sequence = atomic_read(&chan->fence.last_sequence_irq);
89
90 if (chan->fence.sequence_ack == sequence)
91 goto out;
92 chan->fence.sequence_ack = sequence;
93 }
94
95 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
96 if (fence->sequence > chan->fence.sequence_ack)
97 break; 72 break;
98 73
99 fence->signalled = true;
100 list_del(&fence->entry);
101 if (fence->work) 74 if (fence->work)
102 fence->work(fence->priv, true); 75 fence->work(fence->priv, true);
103 76 fence->channel = NULL;
104 kref_put(&fence->refcount, nouveau_fence_del); 77 list_del(&fence->head);
105 }
106
107out:
108 spin_unlock(&chan->fence.lock);
109}
110
111int
112nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
113 bool emit)
114{
115 struct nouveau_fence *fence;
116 int ret = 0;
117
118 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
119 if (!fence)
120 return -ENOMEM;
121 kref_init(&fence->refcount);
122 nouveau_channel_ref(chan, &fence->channel);
123
124 if (emit)
125 ret = nouveau_fence_emit(fence);
126
127 if (ret)
128 nouveau_fence_unref(&fence); 78 nouveau_fence_unref(&fence);
129 *pfence = fence; 79 }
130 return ret; 80 spin_unlock(&fctx->lock);
131}
132
133struct nouveau_channel *
134nouveau_fence_channel(struct nouveau_fence *fence)
135{
136 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
137} 81}
138 82
139int 83int
140nouveau_fence_emit(struct nouveau_fence *fence) 84nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
141{ 85{
142 struct nouveau_channel *chan = fence->channel;
143 struct drm_device *dev = chan->dev; 86 struct drm_device *dev = chan->dev;
144 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
88 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
145 int ret; 89 int ret;
146 90
147 ret = RING_SPACE(chan, 2); 91 fence->channel = chan;
148 if (ret) 92 fence->timeout = jiffies + (3 * DRM_HZ);
149 return ret; 93 fence->sequence = ++fctx->sequence;
150
151 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
152 nouveau_fence_update(chan);
153 94
154 BUG_ON(chan->fence.sequence == 95 ret = priv->emit(fence);
155 chan->fence.sequence_ack - 1); 96 if (!ret) {
97 kref_get(&fence->kref);
98 spin_lock(&fctx->lock);
99 list_add_tail(&fence->head, &fctx->pending);
100 spin_unlock(&fctx->lock);
156 } 101 }
157 102
158 fence->sequence = ++chan->fence.sequence; 103 return ret;
159
160 kref_get(&fence->refcount);
161 spin_lock(&chan->fence.lock);
162 list_add_tail(&fence->entry, &chan->fence.pending);
163 spin_unlock(&chan->fence.lock);
164
165 if (USE_REFCNT(dev)) {
166 if (dev_priv->card_type < NV_C0)
167 BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
168 else
169 BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
170 } else {
171 BEGIN_RING(chan, NvSubSw, 0x0150, 1);
172 }
173 OUT_RING (chan, fence->sequence);
174 FIRE_RING(chan);
175
176 return 0;
177}
178
179void
180nouveau_fence_work(struct nouveau_fence *fence,
181 void (*work)(void *priv, bool signalled),
182 void *priv)
183{
184 BUG_ON(fence->work);
185
186 spin_lock(&fence->channel->fence.lock);
187
188 if (fence->signalled) {
189 work(priv, true);
190 } else {
191 fence->work = work;
192 fence->priv = priv;
193 }
194
195 spin_unlock(&fence->channel->fence.lock);
196}
197
198void
199__nouveau_fence_unref(void **sync_obj)
200{
201 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
202
203 if (fence)
204 kref_put(&fence->refcount, nouveau_fence_del);
205 *sync_obj = NULL;
206}
207
208void *
209__nouveau_fence_ref(void *sync_obj)
210{
211 struct nouveau_fence *fence = nouveau_fence(sync_obj);
212
213 kref_get(&fence->refcount);
214 return sync_obj;
215} 104}
216 105
217bool 106bool
218__nouveau_fence_signalled(void *sync_obj, void *sync_arg) 107nouveau_fence_done(struct nouveau_fence *fence)
219{ 108{
220 struct nouveau_fence *fence = nouveau_fence(sync_obj); 109 if (fence->channel)
221 struct nouveau_channel *chan = fence->channel; 110 nouveau_fence_update(fence->channel);
222 111 return !fence->channel;
223 if (fence->signalled)
224 return true;
225
226 nouveau_fence_update(chan);
227 return fence->signalled;
228} 112}
229 113
230int 114int
231__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 115nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
232{ 116{
233 unsigned long timeout = jiffies + (3 * DRM_HZ);
234 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 117 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
235 ktime_t t; 118 ktime_t t;
236 int ret = 0; 119 int ret = 0;
237 120
238 while (1) { 121 while (!nouveau_fence_done(fence)) {
239 if (__nouveau_fence_signalled(sync_obj, sync_arg)) 122 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
240 break;
241
242 if (time_after_eq(jiffies, timeout)) {
243 ret = -EBUSY; 123 ret = -EBUSY;
244 break; 124 break;
245 } 125 }
246 126
247 __set_current_state(intr ? TASK_INTERRUPTIBLE 127 __set_current_state(intr ? TASK_INTERRUPTIBLE :
248 : TASK_UNINTERRUPTIBLE); 128 TASK_UNINTERRUPTIBLE);
249 if (lazy) { 129 if (lazy) {
250 t = ktime_set(0, sleep_time); 130 t = ktime_set(0, sleep_time);
251 schedule_hrtimeout(&t, HRTIMER_MODE_REL); 131 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
@@ -261,354 +141,72 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
261 } 141 }
262 142
263 __set_current_state(TASK_RUNNING); 143 __set_current_state(TASK_RUNNING);
264
265 return ret; 144 return ret;
266} 145}
267 146
268static struct nouveau_semaphore *
269semaphore_alloc(struct drm_device *dev)
270{
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
272 struct nouveau_semaphore *sema;
273 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
274 int ret, i;
275
276 if (!USE_SEMA(dev))
277 return NULL;
278
279 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
280 if (!sema)
281 goto fail;
282
283 ret = drm_mm_pre_get(&dev_priv->fence.heap);
284 if (ret)
285 goto fail;
286
287 spin_lock(&dev_priv->fence.lock);
288 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
289 if (sema->mem)
290 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
291 spin_unlock(&dev_priv->fence.lock);
292
293 if (!sema->mem)
294 goto fail;
295
296 kref_init(&sema->ref);
297 sema->dev = dev;
298 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
299 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
300
301 return sema;
302fail:
303 kfree(sema);
304 return NULL;
305}
306
307static void
308semaphore_free(struct kref *ref)
309{
310 struct nouveau_semaphore *sema =
311 container_of(ref, struct nouveau_semaphore, ref);
312 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
313
314 spin_lock(&dev_priv->fence.lock);
315 drm_mm_put_block(sema->mem);
316 spin_unlock(&dev_priv->fence.lock);
317
318 kfree(sema);
319}
320
321static void
322semaphore_work(void *priv, bool signalled)
323{
324 struct nouveau_semaphore *sema = priv;
325 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
326
327 if (unlikely(!signalled))
328 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
329
330 kref_put(&sema->ref, semaphore_free);
331}
332
333static int
334semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
335{
336 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
337 struct nouveau_fence *fence = NULL;
338 u64 offset = chan->fence.vma.offset + sema->mem->start;
339 int ret;
340
341 if (dev_priv->chipset < 0x84) {
342 ret = RING_SPACE(chan, 4);
343 if (ret)
344 return ret;
345
346 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema);
348 OUT_RING (chan, offset);
349 OUT_RING (chan, 1);
350 } else
351 if (dev_priv->chipset < 0xc0) {
352 ret = RING_SPACE(chan, 7);
353 if (ret)
354 return ret;
355
356 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
357 OUT_RING (chan, chan->vram_handle);
358 BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
359 OUT_RING (chan, upper_32_bits(offset));
360 OUT_RING (chan, lower_32_bits(offset));
361 OUT_RING (chan, 1);
362 OUT_RING (chan, 1); /* ACQUIRE_EQ */
363 } else {
364 ret = RING_SPACE(chan, 5);
365 if (ret)
366 return ret;
367
368 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
369 OUT_RING (chan, upper_32_bits(offset));
370 OUT_RING (chan, lower_32_bits(offset));
371 OUT_RING (chan, 1);
372 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
373 }
374
375 /* Delay semaphore destruction until its work is done */
376 ret = nouveau_fence_new(chan, &fence, true);
377 if (ret)
378 return ret;
379
380 kref_get(&sema->ref);
381 nouveau_fence_work(fence, semaphore_work, sema);
382 nouveau_fence_unref(&fence);
383 return 0;
384}
385
386static int
387semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
388{
389 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
390 struct nouveau_fence *fence = NULL;
391 u64 offset = chan->fence.vma.offset + sema->mem->start;
392 int ret;
393
394 if (dev_priv->chipset < 0x84) {
395 ret = RING_SPACE(chan, 5);
396 if (ret)
397 return ret;
398
399 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
400 OUT_RING (chan, NvSema);
401 OUT_RING (chan, offset);
402 BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
403 OUT_RING (chan, 1);
404 } else
405 if (dev_priv->chipset < 0xc0) {
406 ret = RING_SPACE(chan, 7);
407 if (ret)
408 return ret;
409
410 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
411 OUT_RING (chan, chan->vram_handle);
412 BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
413 OUT_RING (chan, upper_32_bits(offset));
414 OUT_RING (chan, lower_32_bits(offset));
415 OUT_RING (chan, 1);
416 OUT_RING (chan, 2); /* RELEASE */
417 } else {
418 ret = RING_SPACE(chan, 5);
419 if (ret)
420 return ret;
421
422 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
423 OUT_RING (chan, upper_32_bits(offset));
424 OUT_RING (chan, lower_32_bits(offset));
425 OUT_RING (chan, 1);
426 OUT_RING (chan, 0x1002); /* RELEASE */
427 }
428
429 /* Delay semaphore destruction until its work is done */
430 ret = nouveau_fence_new(chan, &fence, true);
431 if (ret)
432 return ret;
433
434 kref_get(&sema->ref);
435 nouveau_fence_work(fence, semaphore_work, sema);
436 nouveau_fence_unref(&fence);
437 return 0;
438}
439
440int 147int
441nouveau_fence_sync(struct nouveau_fence *fence, 148nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
442 struct nouveau_channel *wchan)
443{ 149{
444 struct nouveau_channel *chan = nouveau_fence_channel(fence); 150 struct drm_device *dev = chan->dev;
445 struct drm_device *dev = wchan->dev; 151 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
446 struct nouveau_semaphore *sema; 152 struct nouveau_channel *prev;
447 int ret = 0; 153 int ret = 0;
448 154
449 if (likely(!chan || chan == wchan || 155 prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
450 nouveau_fence_signalled(fence))) 156 if (prev) {
451 goto out; 157 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
452 158 ret = priv->sync(fence, prev, chan);
453 sema = semaphore_alloc(dev); 159 if (unlikely(ret))
454 if (!sema) { 160 ret = nouveau_fence_wait(fence, true, false);
455 /* Early card or broken userspace, fall back to 161 }
456 * software sync. */ 162 nouveau_channel_put_unlocked(&prev);
457 ret = nouveau_fence_wait(fence, true, false);
458 goto out;
459 }
460
461 /* try to take chan's mutex, if we can't take it right away
462 * we have to fallback to software sync to prevent locking
463 * order issues
464 */
465 if (!mutex_trylock(&chan->mutex)) {
466 ret = nouveau_fence_wait(fence, true, false);
467 goto out_unref;
468 } 163 }
469 164
470 /* Make wchan wait until it gets signalled */
471 ret = semaphore_acquire(wchan, sema);
472 if (ret)
473 goto out_unlock;
474
475 /* Signal the semaphore from chan */
476 ret = semaphore_release(chan, sema);
477
478out_unlock:
479 mutex_unlock(&chan->mutex);
480out_unref:
481 kref_put(&sema->ref, semaphore_free);
482out:
483 if (chan)
484 nouveau_channel_put_unlocked(&chan);
485 return ret; 165 return ret;
486} 166}
487 167
488int 168static void
489__nouveau_fence_flush(void *sync_obj, void *sync_arg) 169nouveau_fence_del(struct kref *kref)
490{ 170{
491 return 0; 171 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
172 kfree(fence);
492} 173}
493 174
494int 175void
495nouveau_fence_channel_init(struct nouveau_channel *chan) 176nouveau_fence_unref(struct nouveau_fence **pfence)
496{ 177{
497 struct drm_device *dev = chan->dev; 178 if (*pfence)
498 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 kref_put(&(*pfence)->kref, nouveau_fence_del);
499 struct nouveau_gpuobj *obj = NULL; 180 *pfence = NULL;
500 int ret;
501
502 if (dev_priv->card_type < NV_C0) {
503 /* Create an NV_SW object for various sync purposes */
504 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
505 if (ret)
506 return ret;
507
508 ret = RING_SPACE(chan, 2);
509 if (ret)
510 return ret;
511
512 BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
513 OUT_RING (chan, NvSw);
514 FIRE_RING (chan);
515 }
516
517 /* Setup area of memory shared between all channels for x-chan sync */
518 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
519 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
520
521 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
522 mem->start << PAGE_SHIFT,
523 mem->size, NV_MEM_ACCESS_RW,
524 NV_MEM_TARGET_VRAM, &obj);
525 if (ret)
526 return ret;
527
528 ret = nouveau_ramht_insert(chan, NvSema, obj);
529 nouveau_gpuobj_ref(NULL, &obj);
530 if (ret)
531 return ret;
532 } else
533 if (USE_SEMA(dev)) {
534 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma);
537 if (ret)
538 return ret;
539 }
540
541 atomic_set(&chan->fence.last_sequence_irq, 0);
542 return 0;
543} 181}
544 182
545void 183struct nouveau_fence *
546nouveau_fence_channel_fini(struct nouveau_channel *chan) 184nouveau_fence_ref(struct nouveau_fence *fence)
547{ 185{
548 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 186 kref_get(&fence->kref);
549 struct nouveau_fence *tmp, *fence; 187 return fence;
550
551 spin_lock(&chan->fence.lock);
552 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
553 fence->signalled = true;
554 list_del(&fence->entry);
555
556 if (unlikely(fence->work))
557 fence->work(fence->priv, false);
558
559 kref_put(&fence->refcount, nouveau_fence_del);
560 }
561 spin_unlock(&chan->fence.lock);
562
563 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
564} 188}
565 189
566int 190int
567nouveau_fence_init(struct drm_device *dev) 191nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
568{ 192{
569 struct drm_nouveau_private *dev_priv = dev->dev_private; 193 struct nouveau_fence *fence;
570 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384; 194 int ret = 0;
571 int ret;
572
573 /* Create a shared VRAM heap for cross-channel sync. */
574 if (USE_SEMA(dev)) {
575 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
576 0, 0, &dev_priv->fence.bo);
577 if (ret)
578 return ret;
579 195
580 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); 196 if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
581 if (ret) 197 return -ENODEV;
582 goto fail;
583 198
584 ret = nouveau_bo_map(dev_priv->fence.bo); 199 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
585 if (ret) 200 if (!fence)
586 goto fail; 201 return -ENOMEM;
202 kref_init(&fence->kref);
587 203
588 ret = drm_mm_init(&dev_priv->fence.heap, 0, 204 if (chan) {
589 dev_priv->fence.bo->bo.mem.size); 205 ret = nouveau_fence_emit(fence, chan);
590 if (ret) 206 if (ret)
591 goto fail; 207 nouveau_fence_unref(&fence);
592
593 spin_lock_init(&dev_priv->fence.lock);
594 } 208 }
595 209
596 return 0; 210 *pfence = fence;
597fail:
598 nouveau_bo_unmap(dev_priv->fence.bo);
599 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
600 return ret; 211 return ret;
601} 212}
602
603void
604nouveau_fence_fini(struct drm_device *dev)
605{
606 struct drm_nouveau_private *dev_priv = dev->dev_private;
607
608 if (USE_SEMA(dev)) {
609 drm_mm_takedown(&dev_priv->fence.heap);
610 nouveau_bo_unmap(dev_priv->fence.bo);
611 nouveau_bo_unpin(dev_priv->fence.bo);
612 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
613 }
614}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 000000000000..82ba733393ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,52 @@
1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__
3
4struct nouveau_fence {
5 struct list_head head;
6 struct kref kref;
7
8 struct nouveau_channel *channel;
9 unsigned long timeout;
10 u32 sequence;
11
12 void (*work)(void *priv, bool signalled);
13 void *priv;
14};
15
16int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
17struct nouveau_fence *
18nouveau_fence_ref(struct nouveau_fence *);
19void nouveau_fence_unref(struct nouveau_fence **);
20
21int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
22bool nouveau_fence_done(struct nouveau_fence *);
23int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
24int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *);
27
28struct nouveau_fence_chan {
29 struct list_head pending;
30 spinlock_t lock;
31 u32 sequence;
32};
33
34struct nouveau_fence_priv {
35 struct nouveau_exec_engine engine;
36 int (*emit)(struct nouveau_fence *);
37 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
38 struct nouveau_channel *);
39 u32 (*read)(struct nouveau_channel *);
40};
41
42void nouveau_fence_context_new(struct nouveau_fence_chan *);
43void nouveau_fence_context_del(struct nouveau_fence_chan *);
44
45int nv04_fence_create(struct drm_device *dev);
46int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
47
48int nv10_fence_create(struct drm_device *dev);
49int nv84_fence_create(struct drm_device *dev);
50int nvc0_fence_create(struct drm_device *dev);
51
52#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
new file mode 100644
index 000000000000..ce99cab2f257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h
@@ -0,0 +1,32 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4struct nouveau_fifo_priv {
5 struct nouveau_exec_engine base;
6 u32 channels;
7};
8
9struct nouveau_fifo_chan {
10};
11
12bool nv04_fifo_cache_pull(struct drm_device *, bool);
13void nv04_fifo_context_del(struct nouveau_channel *, int);
14int nv04_fifo_fini(struct drm_device *, int, bool);
15int nv04_fifo_init(struct drm_device *, int);
16void nv04_fifo_isr(struct drm_device *);
17void nv04_fifo_destroy(struct drm_device *, int);
18
19void nv50_fifo_playlist_update(struct drm_device *);
20void nv50_fifo_destroy(struct drm_device *, int);
21void nv50_fifo_tlb_flush(struct drm_device *, int);
22
23int nv04_fifo_create(struct drm_device *);
24int nv10_fifo_create(struct drm_device *);
25int nv17_fifo_create(struct drm_device *);
26int nv40_fifo_create(struct drm_device *);
27int nv50_fifo_create(struct drm_device *);
28int nv84_fifo_create(struct drm_device *);
29int nvc0_fifo_create(struct drm_device *);
30int nve0_fifo_create(struct drm_device *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ed52a6f41613..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,12 +23,14 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include <linux/dma-buf.h>
26#include "drmP.h" 27#include "drmP.h"
27#include "drm.h" 28#include "drm.h"
28 29
29#include "nouveau_drv.h" 30#include "nouveau_drv.h"
30#include "nouveau_drm.h" 31#include "nouveau_drm.h"
31#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_fence.h"
32 34
33#define nouveau_gem_pushbuf_sync(chan) 0 35#define nouveau_gem_pushbuf_sync(chan) 0
34 36
@@ -53,6 +55,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
53 nouveau_bo_unpin(nvbo); 55 nouveau_bo_unpin(nvbo);
54 } 56 }
55 57
58 if (gem->import_attach)
59 drm_prime_gem_destroy(gem, nvbo->bo.sg);
60
56 ttm_bo_unref(&bo); 61 ttm_bo_unref(&bo);
57 62
58 drm_gem_object_release(gem); 63 drm_gem_object_release(gem);
@@ -139,7 +144,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
139 flags |= TTM_PL_FLAG_SYSTEM; 144 flags |= TTM_PL_FLAG_SYSTEM;
140 145
141 ret = nouveau_bo_new(dev, size, align, flags, tile_mode, 146 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
142 tile_flags, pnvbo); 147 tile_flags, NULL, pnvbo);
143 if (ret) 148 if (ret)
144 return ret; 149 return ret;
145 nvbo = *pnvbo; 150 nvbo = *pnvbo;
@@ -704,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
704 } 709 }
705 710
706 if (chan->dma.ib_max) { 711 if (chan->dma.ib_max) {
707 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); 712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
708 if (ret) { 713 if (ret) {
709 NV_INFO(dev, "nv50cal_space: %d\n", ret); 714 NV_INFO(dev, "nv50cal_space: %d\n", ret);
710 goto out; 715 goto out;
@@ -774,7 +779,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
774 } 779 }
775 } 780 }
776 781
777 ret = nouveau_fence_new(chan, &fence, true); 782 ret = nouveau_fence_new(chan, &fence);
778 if (ret) { 783 if (ret) {
779 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 784 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
780 WIND_RING(chan); 785 WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index a580cc62337a..82c19e82ff02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -387,7 +387,7 @@ nouveau_gpio_reset(struct drm_device *dev)
387 if (dev_priv->card_type >= NV_D0) { 387 if (dev_priv->card_type >= NV_D0) {
388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0); 388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
389 if (unk1--) 389 if (unk1--)
390 nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line); 390 nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
391 } else 391 } else
392 if (dev_priv->card_type >= NV_50) { 392 if (dev_priv->card_type >= NV_50) {
393 static const u32 regs[] = { 0xe100, 0xe28c }; 393 static const u32 regs[] = { 0xe100, 0xe28c };
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 86c2e374e938..b0795ececbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -18,7 +18,6 @@ struct nouveau_grctx {
18 uint32_t ctxvals_base; 18 uint32_t ctxvals_base;
19}; 19};
20 20
21#ifdef CP_CTX
22static inline void 21static inline void
23cp_out(struct nouveau_grctx *ctx, uint32_t inst) 22cp_out(struct nouveau_grctx *ctx, uint32_t inst)
24{ 23{
@@ -88,10 +87,8 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
88 (state ? 0 : CP_BRA_IF_CLEAR)); 87 (state ? 0 : CP_BRA_IF_CLEAR));
89} 88}
90#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 89#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD
92#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 90#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 91#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif
95 92
96static inline void 93static inline void
97_cp_wait(struct nouveau_grctx *ctx, int flag, int state) 94_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
@@ -128,6 +125,5 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
128 125
129 nv_wo32(ctx->data, reg * 4, val); 126 nv_wo32(ctx->data, reg * 4, val);
130} 127}
131#endif
132 128
133#endif 129#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index ba896e54b799..b87ad3bd7739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -1018,11 +1018,6 @@ nv_load_state_ext(struct drm_device *dev, int head,
1018 } 1018 }
1019 1019
1020 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); 1020 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1021
1022 /* Enable vblank interrupts. */
1023 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
1024 (dev->vblank_enabled[head] ? 1 : 0));
1025 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1026} 1021}
1027 1022
1028static void 1023static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b08065f981df..5b498ea32e14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -39,6 +39,8 @@
39#include "nouveau_pm.h" 39#include "nouveau_pm.h"
40#include "nouveau_mm.h" 40#include "nouveau_mm.h"
41#include "nouveau_vm.h" 41#include "nouveau_vm.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
42 44
43/* 45/*
44 * NV10-NV40 tiling helpers 46 * NV10-NV40 tiling helpers
@@ -50,7 +52,6 @@ nv10_mem_update_tile_region(struct drm_device *dev,
50 uint32_t size, uint32_t pitch, uint32_t flags) 52 uint32_t size, uint32_t pitch, uint32_t flags)
51{ 53{
52 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
54 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 55 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
55 int i = tile - dev_priv->tile.reg, j; 56 int i = tile - dev_priv->tile.reg, j;
56 unsigned long save; 57 unsigned long save;
@@ -64,8 +65,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags); 65 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65 66
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save); 67 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 pfifo->reassign(dev, false); 68 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68 pfifo->cache_pull(dev, false); 69 nv04_fifo_cache_pull(dev, false);
69 70
70 nouveau_wait_for_idle(dev); 71 nouveau_wait_for_idle(dev);
71 72
@@ -75,8 +76,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
75 dev_priv->eng[j]->set_tile_region(dev, i); 76 dev_priv->eng[j]->set_tile_region(dev, i);
76 } 77 }
77 78
78 pfifo->cache_pull(dev, true); 79 nv04_fifo_cache_pull(dev, true);
79 pfifo->reassign(dev, true); 80 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); 81 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81} 82}
82 83
@@ -89,7 +90,7 @@ nv10_mem_get_tile_region(struct drm_device *dev, int i)
89 spin_lock(&dev_priv->tile.lock); 90 spin_lock(&dev_priv->tile.lock);
90 91
91 if (!tile->used && 92 if (!tile->used &&
92 (!tile->fence || nouveau_fence_signalled(tile->fence))) 93 (!tile->fence || nouveau_fence_done(tile->fence)))
93 tile->used = true; 94 tile->used = true;
94 else 95 else
95 tile = NULL; 96 tile = NULL;
@@ -416,7 +417,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
416 417
417 if (dev_priv->card_type < NV_50) { 418 if (dev_priv->card_type < NV_50) {
418 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM, 419 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
419 0, 0, &dev_priv->vga_ram); 420 0, 0, NULL, &dev_priv->vga_ram);
420 if (ret == 0) 421 if (ret == 0)
421 ret = nouveau_bo_pin(dev_priv->vga_ram, 422 ret = nouveau_bo_pin(dev_priv->vga_ram,
422 TTM_PL_FLAG_VRAM); 423 TTM_PL_FLAG_VRAM);
@@ -843,6 +844,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
843 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t); 844 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
844 break; 845 break;
845 case NV_C0: 846 case NV_C0:
847 case NV_D0:
846 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t); 848 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
847 break; 849 break;
848 default: 850 default:
@@ -977,6 +979,8 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
977 break; 979 break;
978 case NV_MEM_TYPE_DDR3: 980 case NV_MEM_TYPE_DDR3:
979 tDLLK = 12000; 981 tDLLK = 12000;
982 tCKSRE = 2000;
983 tXS = 1000;
980 mr1_dlloff = 0x00000001; 984 mr1_dlloff = 0x00000001;
981 break; 985 break;
982 case NV_MEM_TYPE_GDDR3: 986 case NV_MEM_TYPE_GDDR3:
@@ -1023,6 +1027,7 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
1023 exec->refresh_self(exec, false); 1027 exec->refresh_self(exec, false);
1024 exec->refresh_auto(exec, true); 1028 exec->refresh_auto(exec, true);
1025 exec->wait(exec, tXS); 1029 exec->wait(exec, tXS);
1030 exec->wait(exec, tXS);
1026 1031
1027 /* update MRs */ 1032 /* update MRs */
1028 if (mr[2] != info->mr[2]) { 1033 if (mr[2] != info->mr[2]) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fae794b..b190cc01c820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,9 +34,10 @@
34#include "drm.h" 34#include "drm.h"
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fifo.h"
37#include "nouveau_ramht.h" 38#include "nouveau_ramht.h"
39#include "nouveau_software.h"
38#include "nouveau_vm.h" 40#include "nouveau_vm.h"
39#include "nv50_display.h"
40 41
41struct nouveau_gpuobj_method { 42struct nouveau_gpuobj_method {
42 struct list_head head; 43 struct list_head head;
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data) 121 u32 class, u32 mthd, u32 data)
121{ 122{
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
123 struct nouveau_channel *chan = NULL; 125 struct nouveau_channel *chan = NULL;
124 unsigned long flags; 126 unsigned long flags;
125 int ret = -EINVAL; 127 int ret = -EINVAL;
126 128
127 spin_lock_irqsave(&dev_priv->channels.lock, flags); 129 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid >= 0 && chid < dev_priv->engine.fifo.channels) 130 if (chid >= 0 && chid < pfifo->channels)
129 chan = dev_priv->channels.ptr[chid]; 131 chan = dev_priv->channels.ptr[chid];
130 if (chan) 132 if (chan)
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); 133 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -133,37 +135,6 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
133 return ret; 135 return ret;
134} 136}
135 137
136/* NVidia uses context objects to drive drawing operations.
137
138 Context objects can be selected into 8 subchannels in the FIFO,
139 and then used via DMA command buffers.
140
141 A context object is referenced by a user defined handle (CARD32). The HW
142 looks up graphics objects in a hash table in the instance RAM.
143
144 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
145 the handle, the second one a bitfield, that contains the address of the
146 object in instance RAM.
147
148 The format of the second CARD32 seems to be:
149
150 NV4 to NV30:
151
152 15: 0 instance_addr >> 4
153 17:16 engine (here uses 1 = graphics)
154 28:24 channel id (here uses 0)
155 31 valid (use 1)
156
157 NV40:
158
159 15: 0 instance_addr >> 4 (maybe 19-0)
160 21:20 engine (here uses 1 = graphics)
161 I'm unsure about the other bits, but using 0 seems to work.
162
163 The key into the hash table depends on the object handle and channel id and
164 is given as:
165*/
166
167int 138int
168nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, 139nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
169 uint32_t size, int align, uint32_t flags, 140 uint32_t size, int align, uint32_t flags,
@@ -267,7 +238,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
267 kfree(oc); 238 kfree(oc);
268 } 239 }
269 240
270 BUG_ON(!list_empty(&dev_priv->gpuobj_list)); 241 WARN_ON(!list_empty(&dev_priv->gpuobj_list));
271} 242}
272 243
273 244
@@ -361,34 +332,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
361 return 0; 332 return 0;
362} 333}
363 334
364/*
365 DMA objects are used to reference a piece of memory in the
366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
367 and looks as follows:
368
369 entry[0]
370 11:0 class (seems like I can always use 0 here)
371 12 page table present?
372 13 page entry linear?
373 15:14 access: 0 rw, 1 ro, 2 wo
374 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
375 31:20 dma adjust (bits 0-11 of the address)
376 entry[1]
377 dma limit (size of transfer)
378 entry[X]
379 1 0 readonly, 1 readwrite
380 31:12 dma frame address of the page (bits 12-31 of the address)
381 entry[N]
382 page table terminator, same value as the first pte, as does nvidia
383 rivatv uses 0xffffffff
384
385 Non linear page tables need a list of frame addresses afterwards,
386 the rivatv project has some info on this.
387
388 The method below creates a DMA object in instance RAM and returns a handle
389 to it that can be used to set up context objects.
390*/
391
392void 335void
393nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, 336nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
394 u64 base, u64 size, int target, int access, 337 u64 base, u64 size, int target, int access,
@@ -540,82 +483,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
540 return 0; 483 return 0;
541} 484}
542 485
543/* Context objects in the instance RAM have the following structure.
544 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
545
546 NV4 - NV30:
547
548 entry[0]
549 11:0 class
550 12 chroma key enable
551 13 user clip enable
552 14 swizzle enable
553 17:15 patch config:
554 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
555 18 synchronize enable
556 19 endian: 1 big, 0 little
557 21:20 dither mode
558 23 single step enable
559 24 patch status: 0 invalid, 1 valid
560 25 context_surface 0: 1 valid
561 26 context surface 1: 1 valid
562 27 context pattern: 1 valid
563 28 context rop: 1 valid
564 29,30 context beta, beta4
565 entry[1]
566 7:0 mono format
567 15:8 color format
568 31:16 notify instance address
569 entry[2]
570 15:0 dma 0 instance address
571 31:16 dma 1 instance address
572 entry[3]
573 dma method traps
574
575 NV40:
576 No idea what the exact format is. Here's what can be deducted:
577
578 entry[0]:
579 11:0 class (maybe uses more bits here?)
580 17 user clip enable
581 21:19 patch config
582 25 patch status valid ?
583 entry[1]:
584 15:0 DMA notifier (maybe 20:0)
585 entry[2]:
586 15:0 DMA 0 instance (maybe 20:0)
587 24 big endian
588 entry[3]:
589 15:0 DMA 1 instance (maybe 20:0)
590 entry[4]:
591 entry[5]:
592 set to 0?
593*/
594static int
595nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
596{
597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
598 struct nouveau_gpuobj *gpuobj;
599 int ret;
600
601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
602 if (!gpuobj)
603 return -ENOMEM;
604 gpuobj->dev = chan->dev;
605 gpuobj->engine = NVOBJ_ENGINE_SW;
606 gpuobj->class = class;
607 kref_init(&gpuobj->refcount);
608 gpuobj->cinst = 0x40;
609
610 spin_lock(&dev_priv->ramin_lock);
611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
612 spin_unlock(&dev_priv->ramin_lock);
613
614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
616 return ret;
617}
618
619int 486int
620nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) 487nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
621{ 488{
@@ -632,9 +499,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
632 if (oc->id != class) 499 if (oc->id != class)
633 continue; 500 continue;
634 501
635 if (oc->engine == NVOBJ_ENGINE_SW)
636 return nouveau_gpuobj_sw_new(chan, handle, class);
637
638 if (!chan->engctx[oc->engine]) { 502 if (!chan->engctx[oc->engine]) {
639 ret = eng->context_new(chan, oc->engine); 503 ret = eng->context_new(chan, oc->engine);
640 if (ret) 504 if (ret)
@@ -644,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
644 return eng->object_new(chan, oc->engine, handle, class); 508 return eng->object_new(chan, oc->engine, handle, class);
645 } 509 }
646 510
647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
648 return -EINVAL; 511 return -EINVAL;
649} 512}
650 513
@@ -693,11 +556,10 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
693static int 556static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) 557nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{ 558{
696 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
697 struct drm_device *dev = chan->dev; 559 struct drm_device *dev = chan->dev;
698 struct nouveau_gpuobj *pgd = NULL; 560 struct nouveau_gpuobj *pgd = NULL;
699 struct nouveau_vm_pgd *vpgd; 561 struct nouveau_vm_pgd *vpgd;
700 int ret, i; 562 int ret;
701 563
702 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); 564 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
703 if (ret) 565 if (ret)
@@ -722,19 +584,6 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
722 nv_wo32(chan->ramin, 0x0208, 0xffffffff); 584 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
723 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 585 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
724 586
725 /* map display semaphore buffers into channel's vm */
726 for (i = 0; i < dev->mode_config.num_crtc; i++) {
727 struct nouveau_bo *bo;
728 if (dev_priv->card_type >= NV_D0)
729 bo = nvd0_display_crtc_sema(dev, i);
730 else
731 bo = nv50_display(dev)->crtc[i].sem.bo;
732
733 ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
734 if (ret)
735 return ret;
736 }
737
738 return 0; 587 return 0;
739} 588}
740 589
@@ -747,7 +596,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
747 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); 596 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
748 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; 597 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
749 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 598 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
750 int ret, i; 599 int ret;
751 600
752 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 601 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
753 if (dev_priv->card_type >= NV_C0) 602 if (dev_priv->card_type >= NV_C0)
@@ -795,25 +644,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
795 nouveau_gpuobj_ref(NULL, &ramht); 644 nouveau_gpuobj_ref(NULL, &ramht);
796 if (ret) 645 if (ret)
797 return ret; 646 return ret;
798
799 /* dma objects for display sync channel semaphore blocks */
800 for (i = 0; i < dev->mode_config.num_crtc; i++) {
801 struct nouveau_gpuobj *sem = NULL;
802 struct nv50_display_crtc *dispc =
803 &nv50_display(dev)->crtc[i];
804 u64 offset = dispc->sem.bo->bo.offset;
805
806 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
807 NV_MEM_ACCESS_RW,
808 NV_MEM_TARGET_VRAM, &sem);
809 if (ret)
810 return ret;
811
812 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
813 nouveau_gpuobj_ref(NULL, &sem);
814 if (ret)
815 return ret;
816 }
817 } 647 }
818 648
819 /* VRAM ctxdma */ 649 /* VRAM ctxdma */
@@ -873,25 +703,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
873void 703void
874nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 704nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
875{ 705{
876 struct drm_device *dev = chan->dev; 706 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
877 struct drm_nouveau_private *dev_priv = dev->dev_private;
878 int i;
879
880 NV_DEBUG(dev, "ch%d\n", chan->id);
881
882 if (dev_priv->card_type >= NV_D0) {
883 for (i = 0; i < dev->mode_config.num_crtc; i++) {
884 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
885 nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
886 }
887 } else
888 if (dev_priv->card_type >= NV_50) {
889 struct nv50_display *disp = nv50_display(dev);
890 for (i = 0; i < dev->mode_config.num_crtc; i++) {
891 struct nv50_display_crtc *dispc = &disp->crtc[i];
892 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
893 }
894 }
895 707
896 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 708 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
897 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 709 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,6 +768,17 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
956 if (init->handle == ~0) 768 if (init->handle == ~0)
957 return -EINVAL; 769 return -EINVAL;
958 770
771 /* compatibility with userspace that assumes 506e for all chipsets */
772 if (init->class == 0x506e) {
773 init->class = nouveau_software_class(dev);
774 if (init->class == 0x906e)
775 return 0;
776 } else
777 if (init->class == 0x906e) {
778 NV_ERROR(dev, "906e not supported yet\n");
779 return -EINVAL;
780 }
781
959 chan = nouveau_channel_get(file_priv, init->channel); 782 chan = nouveau_channel_get(file_priv, init->channel);
960 if (IS_ERR(chan)) 783 if (IS_ERR(chan))
961 return PTR_ERR(chan); 784 return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d106e6..ea6acf1c4a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -83,7 +83,7 @@ nouveau_perf_entry(struct drm_device *dev, int idx,
83 return NULL; 83 return NULL;
84} 84}
85 85
86static u8 * 86u8 *
87nouveau_perf_rammap(struct drm_device *dev, u32 freq, 87nouveau_perf_rammap(struct drm_device *dev, u32 freq,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
89{ 89{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfea61dd..07cac72c72b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -61,8 +61,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
61/* nouveau_perf.c */ 61/* nouveau_perf.c */
62void nouveau_perf_init(struct drm_device *); 62void nouveau_perf_init(struct drm_device *);
63void nouveau_perf_fini(struct drm_device *); 63void nouveau_perf_fini(struct drm_device *);
64u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len); 64u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
65 u8 *hdr, u8 *cnt, u8 *len);
65u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len); 66u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
67u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
66 68
67/* nouveau_mem.c */ 69/* nouveau_mem.c */
68void nouveau_mem_timing_init(struct drm_device *); 70void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
new file mode 100644
index 000000000000..c58aab7370c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -0,0 +1,163 @@
1
2#include "drmP.h"
3#include "drm.h"
4
5#include "nouveau_drv.h"
6#include "nouveau_drm.h"
7#include "nouveau_dma.h"
8
9#include <linux/dma-buf.h>
10
11static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
12 enum dma_data_direction dir)
13{
14 struct nouveau_bo *nvbo = attachment->dmabuf->priv;
15 struct drm_device *dev = nvbo->gem->dev;
16 int npages = nvbo->bo.num_pages;
17 struct sg_table *sg;
18 int nents;
19
20 mutex_lock(&dev->struct_mutex);
21 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
22 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
23 mutex_unlock(&dev->struct_mutex);
24 return sg;
25}
26
27static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
28 struct sg_table *sg, enum dma_data_direction dir)
29{
30 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
31 sg_free_table(sg);
32 kfree(sg);
33}
34
35static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
36{
37 struct nouveau_bo *nvbo = dma_buf->priv;
38
39 if (nvbo->gem->export_dma_buf == dma_buf) {
40 nvbo->gem->export_dma_buf = NULL;
41 drm_gem_object_unreference_unlocked(nvbo->gem);
42 }
43}
44
45static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
46{
47 return NULL;
48}
49
50static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
51{
52
53}
54static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
55{
56 return NULL;
57}
58
59static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
60{
61
62}
63
64static const struct dma_buf_ops nouveau_dmabuf_ops = {
65 .map_dma_buf = nouveau_gem_map_dma_buf,
66 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
67 .release = nouveau_gem_dmabuf_release,
68 .kmap = nouveau_gem_kmap,
69 .kmap_atomic = nouveau_gem_kmap_atomic,
70 .kunmap = nouveau_gem_kunmap,
71 .kunmap_atomic = nouveau_gem_kunmap_atomic,
72};
73
74static int
75nouveau_prime_new(struct drm_device *dev,
76 size_t size,
77 struct sg_table *sg,
78 struct nouveau_bo **pnvbo)
79{
80 struct nouveau_bo *nvbo;
81 u32 flags = 0;
82 int ret;
83
84 flags = TTM_PL_FLAG_TT;
85
86 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
87 sg, pnvbo);
88 if (ret)
89 return ret;
90 nvbo = *pnvbo;
91
92 /* we restrict allowed domains on nv50+ to only the types
93 * that were requested at creation time. not possibly on
94 * earlier chips without busting the ABI.
95 */
96 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
97 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
98 if (!nvbo->gem) {
99 nouveau_bo_ref(NULL, pnvbo);
100 return -ENOMEM;
101 }
102
103 nvbo->gem->driver_private = nvbo;
104 return 0;
105}
106
107struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
108 struct drm_gem_object *obj, int flags)
109{
110 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
111 int ret = 0;
112
113 /* pin buffer into GTT */
114 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
115 if (ret)
116 return ERR_PTR(-EINVAL);
117
118 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
119}
120
121struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
122 struct dma_buf *dma_buf)
123{
124 struct dma_buf_attachment *attach;
125 struct sg_table *sg;
126 struct nouveau_bo *nvbo;
127 int ret;
128
129 if (dma_buf->ops == &nouveau_dmabuf_ops) {
130 nvbo = dma_buf->priv;
131 if (nvbo->gem) {
132 if (nvbo->gem->dev == dev) {
133 drm_gem_object_reference(nvbo->gem);
134 return nvbo->gem;
135 }
136 }
137 }
138 /* need to attach */
139 attach = dma_buf_attach(dma_buf, dev->dev);
140 if (IS_ERR(attach))
141 return ERR_PTR(PTR_ERR(attach));
142
143 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
144 if (IS_ERR(sg)) {
145 ret = PTR_ERR(sg);
146 goto fail_detach;
147 }
148
149 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
150 if (ret)
151 goto fail_unmap;
152
153 nvbo->gem->import_attach = attach;
154
155 return nvbo->gem;
156
157fail_unmap:
158 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
159fail_detach:
160 dma_buf_detach(dma_buf, attach);
161 return ERR_PTR(ret);
162}
163
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 47f245edf538..38483a042bc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -290,7 +290,10 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
290 struct nouveau_mem *node = mem->mm_node; 290 struct nouveau_mem *node = mem->mm_node;
291 291
292 /* noop: bound in move_notify() */ 292 /* noop: bound in move_notify() */
293 node->pages = nvbe->ttm.dma_address; 293 if (ttm->sg) {
294 node->sg = ttm->sg;
295 } else
296 node->pages = nvbe->ttm.dma_address;
294 return 0; 297 return 0;
295} 298}
296 299
@@ -338,10 +341,10 @@ nouveau_sgdma_init(struct drm_device *dev)
338 u32 aper_size, align; 341 u32 aper_size, align;
339 int ret; 342 int ret;
340 343
341 if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev)) 344 if (dev_priv->card_type >= NV_40)
342 aper_size = 512 * 1024 * 1024; 345 aper_size = 512 * 1024 * 1024;
343 else 346 else
344 aper_size = 64 * 1024 * 1024; 347 aper_size = 128 * 1024 * 1024;
345 348
346 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for 349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
347 * christmas. The cards before it have them, the cards after 350 * christmas. The cards before it have them, the cards after
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
new file mode 100644
index 000000000000..e60bc6ce9003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -0,0 +1,69 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4struct nouveau_software_priv {
5 struct nouveau_exec_engine base;
6 struct list_head vblank;
7};
8
9struct nouveau_software_chan {
10 struct list_head flip;
11 struct {
12 struct list_head list;
13 struct nouveau_bo *bo;
14 u32 offset;
15 u32 value;
16 u32 head;
17 } vblank;
18};
19
20static inline void
21nouveau_software_vblank(struct drm_device *dev, int crtc)
22{
23 struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
24 struct nouveau_software_chan *pch, *tmp;
25
26 list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
27 if (pch->vblank.head != crtc)
28 continue;
29
30 nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
31 pch->vblank.value);
32 list_del(&pch->vblank.list);
33 drm_vblank_put(dev, crtc);
34 }
35}
36
37static inline void
38nouveau_software_context_new(struct nouveau_software_chan *pch)
39{
40 INIT_LIST_HEAD(&pch->flip);
41}
42
43static inline void
44nouveau_software_create(struct nouveau_software_priv *psw)
45{
46 INIT_LIST_HEAD(&psw->vblank);
47}
48
49static inline u16
50nouveau_software_class(struct drm_device *dev)
51{
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 if (dev_priv->card_type <= NV_04)
54 return 0x006e;
55 if (dev_priv->card_type <= NV_40)
56 return 0x016e;
57 if (dev_priv->card_type <= NV_50)
58 return 0x506e;
59 if (dev_priv->card_type <= NV_E0)
60 return 0x906e;
61 return 0x0000;
62}
63
64int nv04_software_create(struct drm_device *);
65int nv50_software_create(struct drm_device *);
66int nvc0_software_create(struct drm_device *);
67u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
68
69#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index c2a8511e855a..19706f0532ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,9 @@
39#include "nouveau_gpio.h" 39#include "nouveau_gpio.h"
40#include "nouveau_pm.h" 40#include "nouveau_pm.h"
41#include "nv50_display.h" 41#include "nv50_display.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
44#include "nouveau_software.h"
42 45
43static void nouveau_stub_takedown(struct drm_device *dev) {} 46static void nouveau_stub_takedown(struct drm_device *dev) {}
44static int nouveau_stub_init(struct drm_device *dev) { return 0; } 47static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -66,18 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
66 engine->timer.takedown = nv04_timer_takedown; 69 engine->timer.takedown = nv04_timer_takedown;
67 engine->fb.init = nv04_fb_init; 70 engine->fb.init = nv04_fb_init;
68 engine->fb.takedown = nv04_fb_takedown; 71 engine->fb.takedown = nv04_fb_takedown;
69 engine->fifo.channels = 16;
70 engine->fifo.init = nv04_fifo_init;
71 engine->fifo.takedown = nv04_fifo_fini;
72 engine->fifo.disable = nv04_fifo_disable;
73 engine->fifo.enable = nv04_fifo_enable;
74 engine->fifo.reassign = nv04_fifo_reassign;
75 engine->fifo.cache_pull = nv04_fifo_cache_pull;
76 engine->fifo.channel_id = nv04_fifo_channel_id;
77 engine->fifo.create_context = nv04_fifo_create_context;
78 engine->fifo.destroy_context = nv04_fifo_destroy_context;
79 engine->fifo.load_context = nv04_fifo_load_context;
80 engine->fifo.unload_context = nv04_fifo_unload_context;
81 engine->display.early_init = nv04_display_early_init; 72 engine->display.early_init = nv04_display_early_init;
82 engine->display.late_takedown = nv04_display_late_takedown; 73 engine->display.late_takedown = nv04_display_late_takedown;
83 engine->display.create = nv04_display_create; 74 engine->display.create = nv04_display_create;
@@ -111,18 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
111 engine->fb.init_tile_region = nv10_fb_init_tile_region; 102 engine->fb.init_tile_region = nv10_fb_init_tile_region;
112 engine->fb.set_tile_region = nv10_fb_set_tile_region; 103 engine->fb.set_tile_region = nv10_fb_set_tile_region;
113 engine->fb.free_tile_region = nv10_fb_free_tile_region; 104 engine->fb.free_tile_region = nv10_fb_free_tile_region;
114 engine->fifo.channels = 32;
115 engine->fifo.init = nv10_fifo_init;
116 engine->fifo.takedown = nv04_fifo_fini;
117 engine->fifo.disable = nv04_fifo_disable;
118 engine->fifo.enable = nv04_fifo_enable;
119 engine->fifo.reassign = nv04_fifo_reassign;
120 engine->fifo.cache_pull = nv04_fifo_cache_pull;
121 engine->fifo.channel_id = nv10_fifo_channel_id;
122 engine->fifo.create_context = nv10_fifo_create_context;
123 engine->fifo.destroy_context = nv04_fifo_destroy_context;
124 engine->fifo.load_context = nv10_fifo_load_context;
125 engine->fifo.unload_context = nv10_fifo_unload_context;
126 engine->display.early_init = nv04_display_early_init; 105 engine->display.early_init = nv04_display_early_init;
127 engine->display.late_takedown = nv04_display_late_takedown; 106 engine->display.late_takedown = nv04_display_late_takedown;
128 engine->display.create = nv04_display_create; 107 engine->display.create = nv04_display_create;
@@ -162,18 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
162 engine->fb.init_tile_region = nv20_fb_init_tile_region; 141 engine->fb.init_tile_region = nv20_fb_init_tile_region;
163 engine->fb.set_tile_region = nv20_fb_set_tile_region; 142 engine->fb.set_tile_region = nv20_fb_set_tile_region;
164 engine->fb.free_tile_region = nv20_fb_free_tile_region; 143 engine->fb.free_tile_region = nv20_fb_free_tile_region;
165 engine->fifo.channels = 32;
166 engine->fifo.init = nv10_fifo_init;
167 engine->fifo.takedown = nv04_fifo_fini;
168 engine->fifo.disable = nv04_fifo_disable;
169 engine->fifo.enable = nv04_fifo_enable;
170 engine->fifo.reassign = nv04_fifo_reassign;
171 engine->fifo.cache_pull = nv04_fifo_cache_pull;
172 engine->fifo.channel_id = nv10_fifo_channel_id;
173 engine->fifo.create_context = nv10_fifo_create_context;
174 engine->fifo.destroy_context = nv04_fifo_destroy_context;
175 engine->fifo.load_context = nv10_fifo_load_context;
176 engine->fifo.unload_context = nv10_fifo_unload_context;
177 engine->display.early_init = nv04_display_early_init; 144 engine->display.early_init = nv04_display_early_init;
178 engine->display.late_takedown = nv04_display_late_takedown; 145 engine->display.late_takedown = nv04_display_late_takedown;
179 engine->display.create = nv04_display_create; 146 engine->display.create = nv04_display_create;
@@ -209,18 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
209 engine->fb.init_tile_region = nv30_fb_init_tile_region; 176 engine->fb.init_tile_region = nv30_fb_init_tile_region;
210 engine->fb.set_tile_region = nv10_fb_set_tile_region; 177 engine->fb.set_tile_region = nv10_fb_set_tile_region;
211 engine->fb.free_tile_region = nv30_fb_free_tile_region; 178 engine->fb.free_tile_region = nv30_fb_free_tile_region;
212 engine->fifo.channels = 32;
213 engine->fifo.init = nv10_fifo_init;
214 engine->fifo.takedown = nv04_fifo_fini;
215 engine->fifo.disable = nv04_fifo_disable;
216 engine->fifo.enable = nv04_fifo_enable;
217 engine->fifo.reassign = nv04_fifo_reassign;
218 engine->fifo.cache_pull = nv04_fifo_cache_pull;
219 engine->fifo.channel_id = nv10_fifo_channel_id;
220 engine->fifo.create_context = nv10_fifo_create_context;
221 engine->fifo.destroy_context = nv04_fifo_destroy_context;
222 engine->fifo.load_context = nv10_fifo_load_context;
223 engine->fifo.unload_context = nv10_fifo_unload_context;
224 engine->display.early_init = nv04_display_early_init; 179 engine->display.early_init = nv04_display_early_init;
225 engine->display.late_takedown = nv04_display_late_takedown; 180 engine->display.late_takedown = nv04_display_late_takedown;
226 engine->display.create = nv04_display_create; 181 engine->display.create = nv04_display_create;
@@ -259,18 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
259 engine->fb.init_tile_region = nv30_fb_init_tile_region; 214 engine->fb.init_tile_region = nv30_fb_init_tile_region;
260 engine->fb.set_tile_region = nv40_fb_set_tile_region; 215 engine->fb.set_tile_region = nv40_fb_set_tile_region;
261 engine->fb.free_tile_region = nv30_fb_free_tile_region; 216 engine->fb.free_tile_region = nv30_fb_free_tile_region;
262 engine->fifo.channels = 32;
263 engine->fifo.init = nv40_fifo_init;
264 engine->fifo.takedown = nv04_fifo_fini;
265 engine->fifo.disable = nv04_fifo_disable;
266 engine->fifo.enable = nv04_fifo_enable;
267 engine->fifo.reassign = nv04_fifo_reassign;
268 engine->fifo.cache_pull = nv04_fifo_cache_pull;
269 engine->fifo.channel_id = nv10_fifo_channel_id;
270 engine->fifo.create_context = nv40_fifo_create_context;
271 engine->fifo.destroy_context = nv04_fifo_destroy_context;
272 engine->fifo.load_context = nv40_fifo_load_context;
273 engine->fifo.unload_context = nv40_fifo_unload_context;
274 engine->display.early_init = nv04_display_early_init; 217 engine->display.early_init = nv04_display_early_init;
275 engine->display.late_takedown = nv04_display_late_takedown; 218 engine->display.late_takedown = nv04_display_late_takedown;
276 engine->display.create = nv04_display_create; 219 engine->display.create = nv04_display_create;
@@ -317,18 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
317 engine->timer.takedown = nv04_timer_takedown; 260 engine->timer.takedown = nv04_timer_takedown;
318 engine->fb.init = nv50_fb_init; 261 engine->fb.init = nv50_fb_init;
319 engine->fb.takedown = nv50_fb_takedown; 262 engine->fb.takedown = nv50_fb_takedown;
320 engine->fifo.channels = 128;
321 engine->fifo.init = nv50_fifo_init;
322 engine->fifo.takedown = nv50_fifo_takedown;
323 engine->fifo.disable = nv04_fifo_disable;
324 engine->fifo.enable = nv04_fifo_enable;
325 engine->fifo.reassign = nv04_fifo_reassign;
326 engine->fifo.channel_id = nv50_fifo_channel_id;
327 engine->fifo.create_context = nv50_fifo_create_context;
328 engine->fifo.destroy_context = nv50_fifo_destroy_context;
329 engine->fifo.load_context = nv50_fifo_load_context;
330 engine->fifo.unload_context = nv50_fifo_unload_context;
331 engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
332 engine->display.early_init = nv50_display_early_init; 263 engine->display.early_init = nv50_display_early_init;
333 engine->display.late_takedown = nv50_display_late_takedown; 264 engine->display.late_takedown = nv50_display_late_takedown;
334 engine->display.create = nv50_display_create; 265 engine->display.create = nv50_display_create;
@@ -392,17 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
392 engine->timer.takedown = nv04_timer_takedown; 323 engine->timer.takedown = nv04_timer_takedown;
393 engine->fb.init = nvc0_fb_init; 324 engine->fb.init = nvc0_fb_init;
394 engine->fb.takedown = nvc0_fb_takedown; 325 engine->fb.takedown = nvc0_fb_takedown;
395 engine->fifo.channels = 128;
396 engine->fifo.init = nvc0_fifo_init;
397 engine->fifo.takedown = nvc0_fifo_takedown;
398 engine->fifo.disable = nvc0_fifo_disable;
399 engine->fifo.enable = nvc0_fifo_enable;
400 engine->fifo.reassign = nvc0_fifo_reassign;
401 engine->fifo.channel_id = nvc0_fifo_channel_id;
402 engine->fifo.create_context = nvc0_fifo_create_context;
403 engine->fifo.destroy_context = nvc0_fifo_destroy_context;
404 engine->fifo.load_context = nvc0_fifo_load_context;
405 engine->fifo.unload_context = nvc0_fifo_unload_context;
406 engine->display.early_init = nv50_display_early_init; 326 engine->display.early_init = nv50_display_early_init;
407 engine->display.late_takedown = nv50_display_late_takedown; 327 engine->display.late_takedown = nv50_display_late_takedown;
408 engine->display.create = nv50_display_create; 328 engine->display.create = nv50_display_create;
@@ -445,17 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
445 engine->timer.takedown = nv04_timer_takedown; 365 engine->timer.takedown = nv04_timer_takedown;
446 engine->fb.init = nvc0_fb_init; 366 engine->fb.init = nvc0_fb_init;
447 engine->fb.takedown = nvc0_fb_takedown; 367 engine->fb.takedown = nvc0_fb_takedown;
448 engine->fifo.channels = 128;
449 engine->fifo.init = nvc0_fifo_init;
450 engine->fifo.takedown = nvc0_fifo_takedown;
451 engine->fifo.disable = nvc0_fifo_disable;
452 engine->fifo.enable = nvc0_fifo_enable;
453 engine->fifo.reassign = nvc0_fifo_reassign;
454 engine->fifo.channel_id = nvc0_fifo_channel_id;
455 engine->fifo.create_context = nvc0_fifo_create_context;
456 engine->fifo.destroy_context = nvc0_fifo_destroy_context;
457 engine->fifo.load_context = nvc0_fifo_load_context;
458 engine->fifo.unload_context = nvc0_fifo_unload_context;
459 engine->display.early_init = nouveau_stub_init; 368 engine->display.early_init = nouveau_stub_init;
460 engine->display.late_takedown = nouveau_stub_takedown; 369 engine->display.late_takedown = nouveau_stub_takedown;
461 engine->display.create = nvd0_display_create; 370 engine->display.create = nvd0_display_create;
@@ -496,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
496 engine->timer.takedown = nv04_timer_takedown; 405 engine->timer.takedown = nv04_timer_takedown;
497 engine->fb.init = nvc0_fb_init; 406 engine->fb.init = nvc0_fb_init;
498 engine->fb.takedown = nvc0_fb_takedown; 407 engine->fb.takedown = nvc0_fb_takedown;
499 engine->fifo.channels = 0;
500 engine->fifo.init = nouveau_stub_init;
501 engine->fifo.takedown = nouveau_stub_takedown;
502 engine->fifo.disable = nvc0_fifo_disable;
503 engine->fifo.enable = nvc0_fifo_enable;
504 engine->fifo.reassign = nvc0_fifo_reassign;
505 engine->fifo.unload_context = nouveau_stub_init;
506 engine->display.early_init = nouveau_stub_init; 408 engine->display.early_init = nouveau_stub_init;
507 engine->display.late_takedown = nouveau_stub_takedown; 409 engine->display.late_takedown = nouveau_stub_takedown;
508 engine->display.create = nvd0_display_create; 410 engine->display.create = nvd0_display_create;
@@ -607,61 +509,24 @@ nouveau_card_channel_init(struct drm_device *dev)
607{ 509{
608 struct drm_nouveau_private *dev_priv = dev->dev_private; 510 struct drm_nouveau_private *dev_priv = dev->dev_private;
609 struct nouveau_channel *chan; 511 struct nouveau_channel *chan;
610 int ret, oclass; 512 int ret;
611 513
612 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT); 514 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
613 dev_priv->channel = chan; 515 dev_priv->channel = chan;
614 if (ret) 516 if (ret)
615 return ret; 517 return ret;
616
617 mutex_unlock(&dev_priv->channel->mutex); 518 mutex_unlock(&dev_priv->channel->mutex);
618 519
619 if (dev_priv->card_type <= NV_50) { 520 nouveau_bo_move_init(chan);
620 if (dev_priv->card_type < NV_50) 521 return 0;
621 oclass = 0x0039;
622 else
623 oclass = 0x5039;
624
625 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
626 if (ret)
627 goto error;
628
629 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
630 &chan->m2mf_ntfy);
631 if (ret)
632 goto error;
633
634 ret = RING_SPACE(chan, 6);
635 if (ret)
636 goto error;
637
638 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
639 OUT_RING (chan, NvM2MF);
640 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
641 OUT_RING (chan, NvNotify0);
642 OUT_RING (chan, chan->vram_handle);
643 OUT_RING (chan, chan->gart_handle);
644 } else
645 if (dev_priv->card_type <= NV_D0) {
646 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
647 if (ret)
648 goto error;
649
650 ret = RING_SPACE(chan, 2);
651 if (ret)
652 goto error;
653
654 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
655 OUT_RING (chan, 0x00009039);
656 }
657
658 FIRE_RING (chan);
659error:
660 if (ret)
661 nouveau_card_channel_fini(dev);
662 return ret;
663} 522}
664 523
524static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
525 .set_gpu_state = nouveau_switcheroo_set_state,
526 .reprobe = nouveau_switcheroo_reprobe,
527 .can_switch = nouveau_switcheroo_can_switch,
528};
529
665int 530int
666nouveau_card_init(struct drm_device *dev) 531nouveau_card_init(struct drm_device *dev)
667{ 532{
@@ -670,9 +535,7 @@ nouveau_card_init(struct drm_device *dev)
670 int ret, e = 0; 535 int ret, e = 0;
671 536
672 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 537 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
673 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, 538 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
674 nouveau_switcheroo_reprobe,
675 nouveau_switcheroo_can_switch);
676 539
677 /* Initialise internal driver API hooks */ 540 /* Initialise internal driver API hooks */
678 ret = nouveau_init_engine_ptrs(dev); 541 ret = nouveau_init_engine_ptrs(dev);
@@ -745,6 +608,81 @@ nouveau_card_init(struct drm_device *dev)
745 if (!dev_priv->noaccel) { 608 if (!dev_priv->noaccel) {
746 switch (dev_priv->card_type) { 609 switch (dev_priv->card_type) {
747 case NV_04: 610 case NV_04:
611 nv04_fifo_create(dev);
612 break;
613 case NV_10:
614 case NV_20:
615 case NV_30:
616 if (dev_priv->chipset < 0x17)
617 nv10_fifo_create(dev);
618 else
619 nv17_fifo_create(dev);
620 break;
621 case NV_40:
622 nv40_fifo_create(dev);
623 break;
624 case NV_50:
625 if (dev_priv->chipset == 0x50)
626 nv50_fifo_create(dev);
627 else
628 nv84_fifo_create(dev);
629 break;
630 case NV_C0:
631 case NV_D0:
632 nvc0_fifo_create(dev);
633 break;
634 case NV_E0:
635 nve0_fifo_create(dev);
636 break;
637 default:
638 break;
639 }
640
641 switch (dev_priv->card_type) {
642 case NV_04:
643 nv04_fence_create(dev);
644 break;
645 case NV_10:
646 case NV_20:
647 case NV_30:
648 case NV_40:
649 case NV_50:
650 if (dev_priv->chipset < 0x84)
651 nv10_fence_create(dev);
652 else
653 nv84_fence_create(dev);
654 break;
655 case NV_C0:
656 case NV_D0:
657 case NV_E0:
658 nvc0_fence_create(dev);
659 break;
660 default:
661 break;
662 }
663
664 switch (dev_priv->card_type) {
665 case NV_04:
666 case NV_10:
667 case NV_20:
668 case NV_30:
669 case NV_40:
670 nv04_software_create(dev);
671 break;
672 case NV_50:
673 nv50_software_create(dev);
674 break;
675 case NV_C0:
676 case NV_D0:
677 case NV_E0:
678 nvc0_software_create(dev);
679 break;
680 default:
681 break;
682 }
683
684 switch (dev_priv->card_type) {
685 case NV_04:
748 nv04_graph_create(dev); 686 nv04_graph_create(dev);
749 break; 687 break;
750 case NV_10: 688 case NV_10:
@@ -764,6 +702,9 @@ nouveau_card_init(struct drm_device *dev)
764 case NV_D0: 702 case NV_D0:
765 nvc0_graph_create(dev); 703 nvc0_graph_create(dev);
766 break; 704 break;
705 case NV_E0:
706 nve0_graph_create(dev);
707 break;
767 default: 708 default:
768 break; 709 break;
769 } 710 }
@@ -796,8 +737,9 @@ nouveau_card_init(struct drm_device *dev)
796 } 737 }
797 break; 738 break;
798 case NV_C0: 739 case NV_C0:
799 nvc0_copy_create(dev, 0);
800 nvc0_copy_create(dev, 1); 740 nvc0_copy_create(dev, 1);
741 case NV_D0:
742 nvc0_copy_create(dev, 0);
801 break; 743 break;
802 default: 744 default:
803 break; 745 break;
@@ -830,16 +772,11 @@ nouveau_card_init(struct drm_device *dev)
830 goto out_engine; 772 goto out_engine;
831 } 773 }
832 } 774 }
833
834 /* PFIFO */
835 ret = engine->fifo.init(dev);
836 if (ret)
837 goto out_engine;
838 } 775 }
839 776
840 ret = nouveau_irq_init(dev); 777 ret = nouveau_irq_init(dev);
841 if (ret) 778 if (ret)
842 goto out_fifo; 779 goto out_engine;
843 780
844 ret = nouveau_display_create(dev); 781 ret = nouveau_display_create(dev);
845 if (ret) 782 if (ret)
@@ -848,14 +785,10 @@ nouveau_card_init(struct drm_device *dev)
848 nouveau_backlight_init(dev); 785 nouveau_backlight_init(dev);
849 nouveau_pm_init(dev); 786 nouveau_pm_init(dev);
850 787
851 ret = nouveau_fence_init(dev);
852 if (ret)
853 goto out_pm;
854
855 if (dev_priv->eng[NVOBJ_ENGINE_GR]) { 788 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
856 ret = nouveau_card_channel_init(dev); 789 ret = nouveau_card_channel_init(dev);
857 if (ret) 790 if (ret)
858 goto out_fence; 791 goto out_pm;
859 } 792 }
860 793
861 if (dev->mode_config.num_crtc) { 794 if (dev->mode_config.num_crtc) {
@@ -870,17 +803,12 @@ nouveau_card_init(struct drm_device *dev)
870 803
871out_chan: 804out_chan:
872 nouveau_card_channel_fini(dev); 805 nouveau_card_channel_fini(dev);
873out_fence:
874 nouveau_fence_fini(dev);
875out_pm: 806out_pm:
876 nouveau_pm_fini(dev); 807 nouveau_pm_fini(dev);
877 nouveau_backlight_exit(dev); 808 nouveau_backlight_exit(dev);
878 nouveau_display_destroy(dev); 809 nouveau_display_destroy(dev);
879out_irq: 810out_irq:
880 nouveau_irq_fini(dev); 811 nouveau_irq_fini(dev);
881out_fifo:
882 if (!dev_priv->noaccel)
883 engine->fifo.takedown(dev);
884out_engine: 812out_engine:
885 if (!dev_priv->noaccel) { 813 if (!dev_priv->noaccel) {
886 for (e = e - 1; e >= 0; e--) { 814 for (e = e - 1; e >= 0; e--) {
@@ -912,6 +840,7 @@ out_bios:
912out_display_early: 840out_display_early:
913 engine->display.late_takedown(dev); 841 engine->display.late_takedown(dev);
914out: 842out:
843 vga_switcheroo_unregister_client(dev->pdev);
915 vga_client_register(dev->pdev, NULL, NULL, NULL); 844 vga_client_register(dev->pdev, NULL, NULL, NULL);
916 return ret; 845 return ret;
917} 846}
@@ -928,13 +857,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
928 } 857 }
929 858
930 nouveau_card_channel_fini(dev); 859 nouveau_card_channel_fini(dev);
931 nouveau_fence_fini(dev);
932 nouveau_pm_fini(dev); 860 nouveau_pm_fini(dev);
933 nouveau_backlight_exit(dev); 861 nouveau_backlight_exit(dev);
934 nouveau_display_destroy(dev); 862 nouveau_display_destroy(dev);
935 863
936 if (!dev_priv->noaccel) { 864 if (!dev_priv->noaccel) {
937 engine->fifo.takedown(dev);
938 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 865 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
939 if (dev_priv->eng[e]) { 866 if (dev_priv->eng[e]) {
940 dev_priv->eng[e]->fini(dev, e, false); 867 dev_priv->eng[e]->fini(dev, e, false);
@@ -969,6 +896,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
969 896
970 nouveau_irq_fini(dev); 897 nouveau_irq_fini(dev);
971 898
899 vga_switcheroo_unregister_client(dev->pdev);
972 vga_client_register(dev->pdev, NULL, NULL, NULL); 900 vga_client_register(dev->pdev, NULL, NULL, NULL);
973} 901}
974 902
@@ -1176,7 +1104,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1176 goto err_priv; 1104 goto err_priv;
1177 } 1105 }
1178 1106
1179 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 1107 NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
1180 dev_priv->card_type, reg0); 1108 dev_priv->card_type, reg0);
1181 1109
1182 /* map the mmio regs, limiting the amount to preserve vmap space */ 1110 /* map the mmio regs, limiting the amount to preserve vmap space */
@@ -1219,6 +1147,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1219 if (nouveau_noaccel == -1) { 1147 if (nouveau_noaccel == -1) {
1220 switch (dev_priv->chipset) { 1148 switch (dev_priv->chipset) {
1221 case 0xd9: /* known broken */ 1149 case 0xd9: /* known broken */
1150 case 0xe4: /* needs binary driver firmware */
1151 case 0xe7: /* needs binary driver firmware */
1222 NV_INFO(dev, "acceleration disabled by default, pass " 1152 NV_INFO(dev, "acceleration disabled by default, pass "
1223 "noaccel=0 to force enable\n"); 1153 "noaccel=0 to force enable\n");
1224 dev_priv->noaccel = true; 1154 dev_priv->noaccel = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 2bf6c0350b4b..11edd5e91a0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -77,6 +77,63 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
77} 77}
78 78
79void 79void
80nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem)
82{
83 struct nouveau_vm *vm = vma->vm;
84 int big = vma->node->type != vm->spg_shift;
85 u32 offset = vma->node->offset + (delta >> 12);
86 u32 bits = vma->node->type - 12;
87 u32 num = length >> vma->node->type;
88 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
89 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
90 u32 max = 1 << (vm->pgt_bits - bits);
91 unsigned m, sglen;
92 u32 end, len;
93 int i;
94 struct scatterlist *sg;
95
96 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
97 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
98 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
99
100 end = pte + sglen;
101 if (unlikely(end >= max))
102 end = max;
103 len = end - pte;
104
105 for (m = 0; m < len; m++) {
106 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
107
108 vm->map_sg(vma, pgt, mem, pte, 1, &addr);
109 num--;
110 pte++;
111
112 if (num == 0)
113 goto finish;
114 }
115 if (unlikely(end >= max)) {
116 pde++;
117 pte = 0;
118 }
119 if (m < sglen) {
120 for (; m < sglen; m++) {
121 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
122
123 vm->map_sg(vma, pgt, mem, pte, 1, &addr);
124 num--;
125 pte++;
126 if (num == 0)
127 goto finish;
128 }
129 }
130
131 }
132finish:
133 vm->flush(vm);
134}
135
136void
80nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, 137nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem) 138 struct nouveau_mem *mem)
82{ 139{
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 4fb6e728734d..a8246e7e4a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -72,6 +72,9 @@ struct nouveau_vm {
72 u64 phys, u64 delta); 72 u64 phys, u64 delta);
73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
74 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); 74 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
75
76 void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
77 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
75 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 78 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
76 void (*flush)(struct nouveau_vm *); 79 void (*flush)(struct nouveau_vm *);
77}; 80};
@@ -90,7 +93,8 @@ void nouveau_vm_unmap(struct nouveau_vma *);
90void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); 93void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
91void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 94void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
92 struct nouveau_mem *); 95 struct nouveau_mem *);
93 96void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
97 struct nouveau_mem *mem);
94/* nv50_vm.c */ 98/* nv50_vm.c */
95void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 99void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
96 struct nouveau_gpuobj *pgt[2]); 100 struct nouveau_gpuobj *pgt[2]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 728d07584d39..4c31c63e5528 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1047,7 +1047,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1047 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1047 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1048 1048
1049 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1049 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1050 0, 0x0000, &nv_crtc->cursor.nvbo); 1050 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
1051 if (!ret) { 1051 if (!ret) {
1052 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1052 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
1053 if (!ret) 1053 if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37e8dab..44488e3a257d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -98,6 +98,13 @@ nv04_display_early_init(struct drm_device *dev)
98 NVSetOwner(dev, 0); 98 NVSetOwner(dev, 0);
99 } 99 }
100 100
101 /* ensure vblank interrupts are off, they can't be enabled until
102 * drm_vblank has been initialised
103 */
104 NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
105 if (nv_two_heads(dev))
106 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
107
101 return 0; 108 return 0;
102} 109}
103 110
@@ -246,6 +253,10 @@ nv04_display_init(struct drm_device *dev)
246void 253void
247nv04_display_fini(struct drm_device *dev) 254nv04_display_fini(struct drm_device *dev)
248{ 255{
256 /* disable vblank interrupts */
257 NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
258 if (nv_two_heads(dev))
259 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
249} 260}
250 261
251static void 262static void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7a1189371096..7cd7857347ef 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -41,7 +41,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); 44 BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
45 OUT_RING(chan, (region->sy << 16) | region->sx); 45 OUT_RING(chan, (region->sy << 16) | region->sx);
46 OUT_RING(chan, (region->dy << 16) | region->dx); 46 OUT_RING(chan, (region->dy << 16) | region->dx);
47 OUT_RING(chan, (region->height << 16) | region->width); 47 OUT_RING(chan, (region->height << 16) | region->width);
@@ -62,15 +62,15 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 65 BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
67 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); 67 BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
68 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 68 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
69 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 69 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
70 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 70 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
71 else 71 else
72 OUT_RING(chan, rect->color); 72 OUT_RING(chan, rect->color);
73 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); 73 BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
74 OUT_RING(chan, (rect->dx << 16) | rect->dy); 74 OUT_RING(chan, (rect->dx << 16) | rect->dy);
75 OUT_RING(chan, (rect->width << 16) | rect->height); 75 OUT_RING(chan, (rect->width << 16) | rect->height);
76 FIRE_RING(chan); 76 FIRE_RING(chan);
@@ -110,7 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
110 bg = image->bg_color; 110 bg = image->bg_color;
111 } 111 }
112 112
113 BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7); 113 BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
114 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 114 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
115 OUT_RING(chan, ((image->dy + image->height) << 16) | 115 OUT_RING(chan, ((image->dy + image->height) << 16) |
116 ((image->dx + image->width) & 0xffff)); 116 ((image->dx + image->width) & 0xffff));
@@ -127,7 +127,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
127 if (ret) 127 if (ret)
128 return ret; 128 return ret;
129 129
130 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); 130 BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
131 OUT_RINGp(chan, data, iter_len); 131 OUT_RINGp(chan, data, iter_len);
132 data += iter_len; 132 data += iter_len;
133 dsize -= iter_len; 133 dsize -= iter_len;
@@ -209,25 +209,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
209 return 0; 209 return 0;
210 } 210 }
211 211
212 BEGIN_RING(chan, sub, 0x0000, 1); 212 BEGIN_NV04(chan, sub, 0x0000, 1);
213 OUT_RING(chan, NvCtxSurf2D); 213 OUT_RING(chan, NvCtxSurf2D);
214 BEGIN_RING(chan, sub, 0x0184, 2); 214 BEGIN_NV04(chan, sub, 0x0184, 2);
215 OUT_RING(chan, NvDmaFB); 215 OUT_RING(chan, NvDmaFB);
216 OUT_RING(chan, NvDmaFB); 216 OUT_RING(chan, NvDmaFB);
217 BEGIN_RING(chan, sub, 0x0300, 4); 217 BEGIN_NV04(chan, sub, 0x0300, 4);
218 OUT_RING(chan, surface_fmt); 218 OUT_RING(chan, surface_fmt);
219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
222 222
223 BEGIN_RING(chan, sub, 0x0000, 1); 223 BEGIN_NV04(chan, sub, 0x0000, 1);
224 OUT_RING(chan, NvRop); 224 OUT_RING(chan, NvRop);
225 BEGIN_RING(chan, sub, 0x0300, 1); 225 BEGIN_NV04(chan, sub, 0x0300, 1);
226 OUT_RING(chan, 0x55); 226 OUT_RING(chan, 0x55);
227 227
228 BEGIN_RING(chan, sub, 0x0000, 1); 228 BEGIN_NV04(chan, sub, 0x0000, 1);
229 OUT_RING(chan, NvImagePatt); 229 OUT_RING(chan, NvImagePatt);
230 BEGIN_RING(chan, sub, 0x0300, 8); 230 BEGIN_NV04(chan, sub, 0x0300, 8);
231 OUT_RING(chan, pattern_fmt); 231 OUT_RING(chan, pattern_fmt);
232#ifdef __BIG_ENDIAN 232#ifdef __BIG_ENDIAN
233 OUT_RING(chan, 2); 233 OUT_RING(chan, 2);
@@ -241,31 +241,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
241 OUT_RING(chan, ~0); 241 OUT_RING(chan, ~0);
242 OUT_RING(chan, ~0); 242 OUT_RING(chan, ~0);
243 243
244 BEGIN_RING(chan, sub, 0x0000, 1); 244 BEGIN_NV04(chan, sub, 0x0000, 1);
245 OUT_RING(chan, NvClipRect); 245 OUT_RING(chan, NvClipRect);
246 BEGIN_RING(chan, sub, 0x0300, 2); 246 BEGIN_NV04(chan, sub, 0x0300, 2);
247 OUT_RING(chan, 0); 247 OUT_RING(chan, 0);
248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
249 249
250 BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1); 250 BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
251 OUT_RING(chan, NvImageBlit); 251 OUT_RING(chan, NvImageBlit);
252 BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1); 252 BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
253 OUT_RING(chan, NvCtxSurf2D); 253 OUT_RING(chan, NvCtxSurf2D);
254 BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1); 254 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
255 OUT_RING(chan, 3); 255 OUT_RING(chan, 3);
256 256
257 BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1); 257 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
258 OUT_RING(chan, NvGdiRect); 258 OUT_RING(chan, NvGdiRect);
259 BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1); 259 BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
260 OUT_RING(chan, NvCtxSurf2D); 260 OUT_RING(chan, NvCtxSurf2D);
261 BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2); 261 BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
262 OUT_RING(chan, NvImagePatt); 262 OUT_RING(chan, NvImagePatt);
263 OUT_RING(chan, NvRop); 263 OUT_RING(chan, NvRop);
264 BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1); 264 BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
265 OUT_RING(chan, 1); 265 OUT_RING(chan, 1);
266 BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1); 266 BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
267 OUT_RING(chan, rect_fmt); 267 OUT_RING(chan, rect_fmt);
268 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 268 BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
269 OUT_RING(chan, 3); 269 OUT_RING(chan, 3);
270 270
271 FIRE_RING(chan); 271 FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..abe89db6de24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv04_fence_chan {
32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34};
35
36struct nv04_fence_priv {
37 struct nouveau_fence_priv base;
38};
39
40static int
41nv04_fence_emit(struct nouveau_fence *fence)
42{
43 struct nouveau_channel *chan = fence->channel;
44 int ret = RING_SPACE(chan, 2);
45 if (ret == 0) {
46 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
47 OUT_RING (chan, fence->sequence);
48 FIRE_RING (chan);
49 }
50 return ret;
51}
52
53static int
54nv04_fence_sync(struct nouveau_fence *fence,
55 struct nouveau_channel *prev, struct nouveau_channel *chan)
56{
57 return -ENODEV;
58}
59
60int
61nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
62{
63 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
64 atomic_set(&fctx->sequence, data);
65 return 0;
66}
67
68static u32
69nv04_fence_read(struct nouveau_channel *chan)
70{
71 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
72 return atomic_read(&fctx->sequence);
73}
74
75static void
76nv04_fence_context_del(struct nouveau_channel *chan, int engine)
77{
78 struct nv04_fence_chan *fctx = chan->engctx[engine];
79 nouveau_fence_context_del(&fctx->base);
80 chan->engctx[engine] = NULL;
81 kfree(fctx);
82}
83
84static int
85nv04_fence_context_new(struct nouveau_channel *chan, int engine)
86{
87 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 if (fctx) {
89 nouveau_fence_context_new(&fctx->base);
90 atomic_set(&fctx->sequence, 0);
91 chan->engctx[engine] = fctx;
92 return 0;
93 }
94 return -ENOMEM;
95}
96
97static int
98nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
99{
100 return 0;
101}
102
103static int
104nv04_fence_init(struct drm_device *dev, int engine)
105{
106 return 0;
107}
108
109static void
110nv04_fence_destroy(struct drm_device *dev, int engine)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nv04_fence_priv *priv = nv_engine(dev, engine);
114
115 dev_priv->eng[engine] = NULL;
116 kfree(priv);
117}
118
119int
120nv04_fence_create(struct drm_device *dev)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv04_fence_priv *priv;
124 int ret;
125
126 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
127 if (!priv)
128 return -ENOMEM;
129
130 priv->base.engine.destroy = nv04_fence_destroy;
131 priv->base.engine.init = nv04_fence_init;
132 priv->base.engine.fini = nv04_fence_fini;
133 priv->base.engine.context_new = nv04_fence_context_new;
134 priv->base.engine.context_del = nv04_fence_context_del;
135 priv->base.emit = nv04_fence_emit;
136 priv->base.sync = nv04_fence_sync;
137 priv->base.read = nv04_fence_read;
138 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
139 return ret;
140}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index db465a3ee1b2..a6295cd00ec7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,49 +27,38 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_fifo.h"
31#include "nouveau_util.h" 31#include "nouveau_util.h"
32 32#include "nouveau_ramht.h"
33#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) 33#include "nouveau_software.h"
34#define NV04_RAMFC__SIZE 32 34
35#define NV04_RAMFC_DMA_PUT 0x00 35static struct ramfc_desc {
36#define NV04_RAMFC_DMA_GET 0x04 36 unsigned bits:6;
37#define NV04_RAMFC_DMA_INSTANCE 0x08 37 unsigned ctxs:5;
38#define NV04_RAMFC_DMA_STATE 0x0C 38 unsigned ctxp:8;
39#define NV04_RAMFC_DMA_FETCH 0x10 39 unsigned regs:5;
40#define NV04_RAMFC_ENGINE 0x14 40 unsigned regp;
41#define NV04_RAMFC_PULL1_ENGINE 0x18 41} nv04_ramfc[] = {
42 42 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
43#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) 43 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
44#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) 44 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 45 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46void 46 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47nv04_fifo_disable(struct drm_device *dev) 47 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48{ 48 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
49 uint32_t tmp; 49 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
50 50 {}
51 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH); 51};
52 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1); 52
53 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); 53struct nv04_fifo_priv {
54 tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1); 54 struct nouveau_fifo_priv base;
55 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1); 55 struct ramfc_desc *ramfc_desc;
56} 56};
57 57
58void 58struct nv04_fifo_chan {
59nv04_fifo_enable(struct drm_device *dev) 59 struct nouveau_fifo_chan base;
60{ 60 struct nouveau_gpuobj *ramfc;
61 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); 61};
62 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
63}
64
65bool
66nv04_fifo_reassign(struct drm_device *dev, bool enable)
67{
68 uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
69
70 nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
71 return (reassign == 1);
72}
73 62
74bool 63bool
75nv04_fifo_cache_pull(struct drm_device *dev, bool enable) 64nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
@@ -86,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
86 * invalidate the most recently calculated instance. 75 * invalidate the most recently calculated instance.
87 */ 76 */
88 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, 77 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
89 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) 78 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
90 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); 79 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
91 80
92 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & 81 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
93 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 82 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
94 nv_wr32(dev, NV03_PFIFO_INTR_0, 83 nv_wr32(dev, NV03_PFIFO_INTR_0,
95 NV_PFIFO_INTR_CACHE_ERROR); 84 NV_PFIFO_INTR_CACHE_ERROR);
96 85
97 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); 86 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
98 } 87 }
@@ -100,242 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
100 return pull & 1; 89 return pull & 1;
101} 90}
102 91
103int 92static int
104nv04_fifo_channel_id(struct drm_device *dev) 93nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
105{
106 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
107 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
108}
109
110#ifdef __BIG_ENDIAN
111#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
112#else
113#define DMA_FETCH_ENDIANNESS 0
114#endif
115
116int
117nv04_fifo_create_context(struct nouveau_channel *chan)
118{ 94{
119 struct drm_device *dev = chan->dev; 95 struct drm_device *dev = chan->dev;
120 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
98 struct nv04_fifo_chan *fctx;
121 unsigned long flags; 99 unsigned long flags;
122 int ret; 100 int ret;
123 101
124 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, 102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
125 NV04_RAMFC__SIZE, 103 if (!fctx)
126 NVOBJ_FLAG_ZERO_ALLOC | 104 return -ENOMEM;
127 NVOBJ_FLAG_ZERO_FREE,
128 &chan->ramfc);
129 if (ret)
130 return ret;
131 105
106 /* map channel control registers */
132 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 107 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
133 NV03_USER(chan->id), PAGE_SIZE); 108 NV03_USER(chan->id), PAGE_SIZE);
134 if (!chan->user) 109 if (!chan->user) {
135 return -ENOMEM; 110 ret = -ENOMEM;
136 111 goto error;
137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 112 }
138
139 /* Setup initial state */
140 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
141 RAMFC_WR(DMA_GET, chan->pushbuf_base);
142 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
143 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
144 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
145 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
146 DMA_FETCH_ENDIANNESS));
147 113
148 /* enable the fifo dma operation */ 114 /* initialise default fifo context */
149 nv_wr32(dev, NV04_PFIFO_MODE, 115 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
150 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 116 chan->id * 32, ~0, 32,
117 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
118 if (ret)
119 goto error;
120
121 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
122 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
123 nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
124 nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
125 nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
126 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
127#ifdef __BIG_ENDIAN
128 NV_PFIFO_CACHE1_BIG_ENDIAN |
129#endif
130 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
131 nv_wo32(fctx->ramfc, 0x14, 0x00000000);
132 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
133 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
151 134
135 /* enable dma mode on the channel */
136 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
137 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
152 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 138 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
153 return 0; 139
140error:
141 if (ret)
142 priv->base.base.context_del(chan, engine);
143 return ret;
154} 144}
155 145
156void 146void
157nv04_fifo_destroy_context(struct nouveau_channel *chan) 147nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
158{ 148{
159 struct drm_device *dev = chan->dev; 149 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 150 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 151 struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
152 struct nv04_fifo_chan *fctx = chan->engctx[engine];
153 struct ramfc_desc *c = priv->ramfc_desc;
162 unsigned long flags; 154 unsigned long flags;
155 int chid;
163 156
157 /* prevent fifo context switches */
164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 158 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
165 pfifo->reassign(dev, false); 159 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
166 160
167 /* Unload the context if it's the currently active one */ 161 /* if this channel is active, replace it with a null context */
168 if (pfifo->channel_id(dev) == chan->id) { 162 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
169 pfifo->disable(dev); 163 if (chid == chan->id) {
170 pfifo->unload_context(dev); 164 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
171 pfifo->enable(dev); 165 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
166 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
167
168 do {
169 u32 mask = ((1ULL << c->bits) - 1) << c->regs;
170 nv_mask(dev, c->regp, mask, 0x00000000);
171 } while ((++c)->bits);
172
173 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
174 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
175 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
176 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
177 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
172 } 178 }
173 179
174 /* Keep it from being rescheduled */ 180 /* restore normal operation, after disabling dma mode */
175 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); 181 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
176 182 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
177 pfifo->reassign(dev, true);
178 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 183 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
179 184
180 /* Free the channel resources */ 185 /* clean up */
186 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
187 nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
181 if (chan->user) { 188 if (chan->user) {
182 iounmap(chan->user); 189 iounmap(chan->user);
183 chan->user = NULL; 190 chan->user = NULL;
184 } 191 }
185 nouveau_gpuobj_ref(NULL, &chan->ramfc);
186}
187
188static void
189nv04_fifo_do_load_context(struct drm_device *dev, int chid)
190{
191 struct drm_nouveau_private *dev_priv = dev->dev_private;
192 uint32_t fc = NV04_RAMFC(chid), tmp;
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
195 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
196 tmp = nv_ri32(dev, fc + 8);
197 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
198 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
199 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
200 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
201 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
202 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
203
204 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
205 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
206}
207
208int
209nv04_fifo_load_context(struct nouveau_channel *chan)
210{
211 uint32_t tmp;
212
213 nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
214 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
215 nv04_fifo_do_load_context(chan->dev, chan->id);
216 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
217
218 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
219 tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
220 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
221
222 return 0;
223} 192}
224 193
225int 194int
226nv04_fifo_unload_context(struct drm_device *dev) 195nv04_fifo_init(struct drm_device *dev, int engine)
227{ 196{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 197 struct drm_nouveau_private *dev_priv = dev->dev_private;
229 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 198 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
230 struct nouveau_channel *chan = NULL; 199 int i;
231 uint32_t tmp;
232 int chid;
233
234 chid = pfifo->channel_id(dev);
235 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
236 return 0;
237
238 chan = dev_priv->channels.ptr[chid];
239 if (!chan) {
240 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
241 return -EINVAL;
242 }
243
244 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
245 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
246 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
247 tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
248 RAMFC_WR(DMA_INSTANCE, tmp);
249 RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
250 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
251 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
252 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
253
254 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
255 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
256 return 0;
257}
258 200
259static void 201 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
260nv04_fifo_init_reset(struct drm_device *dev) 202 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
261{
262 nv_wr32(dev, NV03_PMC_ENABLE,
263 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
264 nv_wr32(dev, NV03_PMC_ENABLE,
265 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
266
267 nv_wr32(dev, 0x003224, 0x000f0078);
268 nv_wr32(dev, 0x002044, 0x0101ffff);
269 nv_wr32(dev, 0x002040, 0x000000ff);
270 nv_wr32(dev, 0x002500, 0x00000000);
271 nv_wr32(dev, 0x003000, 0x00000000);
272 nv_wr32(dev, 0x003050, 0x00000000);
273 nv_wr32(dev, 0x003200, 0x00000000);
274 nv_wr32(dev, 0x003250, 0x00000000);
275 nv_wr32(dev, 0x003220, 0x00000000);
276
277 nv_wr32(dev, 0x003250, 0x00000000);
278 nv_wr32(dev, 0x003270, 0x00000000);
279 nv_wr32(dev, 0x003210, 0x00000000);
280}
281 203
282static void 204 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
283nv04_fifo_init_ramxx(struct drm_device *dev) 205 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
284{
285 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 206
287 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 207 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
288 ((dev_priv->ramht->bits - 9) << 16) | 208 ((dev_priv->ramht->bits - 9) << 16) |
289 (dev_priv->ramht->gpuobj->pinst >> 8)); 209 (dev_priv->ramht->gpuobj->pinst >> 8));
290 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); 210 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
291 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); 211 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
292}
293 212
294static void 213 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
295nv04_fifo_init_intr(struct drm_device *dev) 214
296{ 215 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
297 nouveau_irq_register(dev, 8, nv04_fifo_isr); 216 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
298 nv_wr32(dev, 0x002100, 0xffffffff); 217
299 nv_wr32(dev, 0x002140, 0xffffffff); 218 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
219 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
220 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
221
222 for (i = 0; i < priv->base.channels; i++) {
223 if (dev_priv->channels.ptr[i])
224 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
225 }
226
227 return 0;
300} 228}
301 229
302int 230int
303nv04_fifo_init(struct drm_device *dev) 231nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
304{ 232{
305 struct drm_nouveau_private *dev_priv = dev->dev_private; 233 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 234 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
307 int i; 235 struct nouveau_channel *chan;
308 236 int chid;
309 nv04_fifo_init_reset(dev);
310 nv04_fifo_init_ramxx(dev);
311
312 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
313 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
314 237
315 nv04_fifo_init_intr(dev); 238 /* prevent context switches and halt fifo operation */
316 pfifo->enable(dev); 239 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
317 pfifo->reassign(dev, true); 240 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
241 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
242 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
318 243
319 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 244 /* store current fifo context in ramfc */
320 if (dev_priv->channels.ptr[i]) { 245 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
321 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 246 chan = dev_priv->channels.ptr[chid];
322 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 247 if (suspend && chid != priv->base.channels && chan) {
323 } 248 struct nv04_fifo_chan *fctx = chan->engctx[engine];
249 struct nouveau_gpuobj *ctx = fctx->ramfc;
250 struct ramfc_desc *c = priv->ramfc_desc;
251 do {
252 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
253 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
254 u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
255 u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
256 nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
257 } while ((++c)->bits);
324 } 258 }
325 259
260 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
326 return 0; 261 return 0;
327} 262}
328 263
329void
330nv04_fifo_fini(struct drm_device *dev)
331{
332 nv_wr32(dev, 0x2140, 0x00000000);
333 nouveau_irq_unregister(dev, 8);
334}
335
336static bool 264static bool
337nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) 265nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
338{ 266{
267 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 268 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_channel *chan = NULL; 269 struct nouveau_channel *chan = NULL;
341 struct nouveau_gpuobj *obj; 270 struct nouveau_gpuobj *obj;
@@ -346,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
346 u32 engine; 275 u32 engine;
347 276
348 spin_lock_irqsave(&dev_priv->channels.lock, flags); 277 spin_lock_irqsave(&dev_priv->channels.lock, flags);
349 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) 278 if (likely(chid >= 0 && chid < pfifo->channels))
350 chan = dev_priv->channels.ptr[chid]; 279 chan = dev_priv->channels.ptr[chid];
351 if (unlikely(!chan)) 280 if (unlikely(!chan))
352 goto out; 281 goto out;
@@ -357,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
357 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) 286 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
358 break; 287 break;
359 288
360 chan->sw_subchannel[subc] = obj->class;
361 engine = 0x0000000f << (subc * 4); 289 engine = 0x0000000f << (subc * 4);
362 290
363 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); 291 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
@@ -368,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
368 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 296 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
369 break; 297 break;
370 298
371 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], 299 if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
372 mthd, data)) 300 mthd, data))
373 handled = true; 301 handled = true;
374 break; 302 break;
@@ -391,8 +319,8 @@ static const char *nv_dma_state_err(u32 state)
391void 319void
392nv04_fifo_isr(struct drm_device *dev) 320nv04_fifo_isr(struct drm_device *dev)
393{ 321{
322 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
394 struct drm_nouveau_private *dev_priv = dev->dev_private; 323 struct drm_nouveau_private *dev_priv = dev->dev_private;
395 struct nouveau_engine *engine = &dev_priv->engine;
396 uint32_t status, reassign; 324 uint32_t status, reassign;
397 int cnt = 0; 325 int cnt = 0;
398 326
@@ -402,7 +330,7 @@ nv04_fifo_isr(struct drm_device *dev)
402 330
403 nv_wr32(dev, NV03_PFIFO_CACHES, 0); 331 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
404 332
405 chid = engine->fifo.channel_id(dev); 333 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
406 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); 334 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
407 335
408 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 336 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
@@ -541,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev)
541 469
542 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); 470 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
543} 471}
472
473void
474nv04_fifo_destroy(struct drm_device *dev, int engine)
475{
476 struct drm_nouveau_private *dev_priv = dev->dev_private;
477 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
478
479 nouveau_irq_unregister(dev, 8);
480
481 dev_priv->eng[engine] = NULL;
482 kfree(priv);
483}
484
485int
486nv04_fifo_create(struct drm_device *dev)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nv04_fifo_priv *priv;
490
491 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
492 if (!priv)
493 return -ENOMEM;
494
495 priv->base.base.destroy = nv04_fifo_destroy;
496 priv->base.base.init = nv04_fifo_init;
497 priv->base.base.fini = nv04_fifo_fini;
498 priv->base.base.context_new = nv04_fifo_context_new;
499 priv->base.base.context_del = nv04_fifo_context_del;
500 priv->base.channels = 15;
501 priv->ramfc_desc = nv04_ramfc;
502 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
503
504 nouveau_irq_register(dev, 8, nv04_fifo_isr);
505 return 0;
506}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8ed3925..72f1a62903b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -356,12 +356,12 @@ static struct nouveau_channel *
356nv04_graph_channel(struct drm_device *dev) 356nv04_graph_channel(struct drm_device *dev)
357{ 357{
358 struct drm_nouveau_private *dev_priv = dev->dev_private; 358 struct drm_nouveau_private *dev_priv = dev->dev_private;
359 int chid = dev_priv->engine.fifo.channels; 359 int chid = 15;
360 360
361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) 361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; 362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
363 363
364 if (chid >= dev_priv->engine.fifo.channels) 364 if (chid > 15)
365 return NULL; 365 return NULL;
366 366
367 return dev_priv->channels.ptr[chid]; 367 return dev_priv->channels.ptr[chid];
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan)
404static int 404static int
405nv04_graph_unload_context(struct drm_device *dev) 405nv04_graph_unload_context(struct drm_device *dev)
406{ 406{
407 struct drm_nouveau_private *dev_priv = dev->dev_private;
408 struct nouveau_channel *chan = NULL; 407 struct nouveau_channel *chan = NULL;
409 struct graph_state *ctx; 408 struct graph_state *ctx;
410 uint32_t tmp; 409 uint32_t tmp;
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev)
420 419
421 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); 420 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
422 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 421 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
423 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 422 tmp |= 15 << 24;
424 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 423 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
425 return 0; 424 return 0;
426} 425}
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine,
495static int 494static int
496nv04_graph_init(struct drm_device *dev, int engine) 495nv04_graph_init(struct drm_device *dev, int engine)
497{ 496{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 uint32_t tmp; 497 uint32_t tmp;
500 498
501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 499 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine)
527 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); 525 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
528 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); 526 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
529 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 527 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
530 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 528 tmp |= 15 << 24;
531 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 529 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
532 530
533 /* These don't belong here, they're part of a per-channel context */ 531 /* These don't belong here, they're part of a per-channel context */
@@ -550,28 +548,6 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
550 return 0; 548 return 0;
551} 549}
552 550
553static int
554nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
555 u32 class, u32 mthd, u32 data)
556{
557 atomic_set(&chan->fence.last_sequence_irq, data);
558 return 0;
559}
560
561int
562nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
563 u32 class, u32 mthd, u32 data)
564{
565 struct drm_device *dev = chan->dev;
566 struct nouveau_page_flip_state s;
567
568 if (!nouveau_finish_page_flip(chan, &s))
569 nv_set_crtc_base(dev, s.crtc,
570 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
571
572 return 0;
573}
574
575/* 551/*
576 * Software methods, why they are needed, and how they all work: 552 * Software methods, why they are needed, and how they all work:
577 * 553 *
@@ -1020,7 +996,8 @@ nv04_graph_context_switch(struct drm_device *dev)
1020 nv04_graph_unload_context(dev); 996 nv04_graph_unload_context(dev);
1021 997
1022 /* Load context for next channel */ 998 /* Load context for next channel */
1023 chid = dev_priv->engine.fifo.channel_id(dev); 999 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
1000 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
1024 chan = dev_priv->channels.ptr[chid]; 1001 chan = dev_priv->channels.ptr[chid];
1025 if (chan) 1002 if (chan)
1026 nv04_graph_load_context(chan); 1003 nv04_graph_load_context(chan);
@@ -1345,9 +1322,5 @@ nv04_graph_create(struct drm_device *dev)
1345 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); 1322 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1346 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); 1323 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1347 1324
1348 /* nvsw */
1349 NVOBJ_CLASS(dev, 0x506e, SW);
1350 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1351 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1352 return 0; 1325 return 0;
1353} 1326}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0740a3..ef7a934a499a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,8 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "drm.h" 2#include "drm.h"
3
3#include "nouveau_drv.h" 4#include "nouveau_drv.h"
5#include "nouveau_fifo.h"
4#include "nouveau_ramht.h" 6#include "nouveau_ramht.h"
5 7
6/* returns the size of fifo context */ 8/* returns the size of fifo context */
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 12 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 13
12 if (dev_priv->chipset >= 0x40) 14 if (dev_priv->chipset >= 0x40)
13 return 128; 15 return 128 * 32;
14 else 16 else
15 if (dev_priv->chipset >= 0x17) 17 if (dev_priv->chipset >= 0x17)
16 return 64; 18 return 64 * 32;
19 else
20 if (dev_priv->chipset >= 0x10)
21 return 32 * 32;
17 22
18 return 32; 23 return 32 * 16;
19} 24}
20 25
21int nv04_instmem_init(struct drm_device *dev) 26int nv04_instmem_init(struct drm_device *dev)
@@ -39,14 +44,10 @@ int nv04_instmem_init(struct drm_device *dev)
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; 44 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs; 45 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024; 46 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels; 47 rsvd *= 32; /* per-channel */
43
44 /* pciegart table */
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
47 48
48 /* object storage */ 49 rsvd += 512 * 1024; /* pci(e)gart table */
49 rsvd += 512 * 1024; 50 rsvd += 512 * 1024; /* object storage */
50 51
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); 52 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else { 53 } else {
@@ -71,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev)
71 return ret; 72 return ret;
72 73
73 /* And RAMFC */ 74 /* And RAMFC */
74 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); 75 length = nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) { 76 switch (dev_priv->card_type) {
76 case NV_40: 77 case NV_40:
77 offset = 0x20000; 78 offset = 0x20000;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
new file mode 100644
index 000000000000..0c41abf48774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30#include "nouveau_software.h"
31#include "nouveau_hw.h"
32
33struct nv04_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv04_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44
45 struct nouveau_page_flip_state state;
46
47 if (!nouveau_finish_page_flip(chan, &state)) {
48 nv_set_crtc_base(chan->dev, state.crtc, state.offset +
49 state.y * state.pitch +
50 state.x * state.bpp / 8);
51 }
52
53 return 0;
54}
55
56static int
57nv04_software_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct nv04_software_chan *pch;
60
61 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
62 if (!pch)
63 return -ENOMEM;
64
65 nouveau_software_context_new(&pch->base);
66 chan->engctx[engine] = pch;
67 return 0;
68}
69
70static void
71nv04_software_context_del(struct nouveau_channel *chan, int engine)
72{
73 struct nv04_software_chan *pch = chan->engctx[engine];
74 chan->engctx[engine] = NULL;
75 kfree(pch);
76}
77
78static int
79nv04_software_object_new(struct nouveau_channel *chan, int engine,
80 u32 handle, u16 class)
81{
82 struct drm_device *dev = chan->dev;
83 struct nouveau_gpuobj *obj = NULL;
84 int ret;
85
86 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
87 if (ret)
88 return ret;
89 obj->engine = 0;
90 obj->class = class;
91
92 ret = nouveau_ramht_insert(chan, handle, obj);
93 nouveau_gpuobj_ref(NULL, &obj);
94 return ret;
95}
96
97static int
98nv04_software_init(struct drm_device *dev, int engine)
99{
100 return 0;
101}
102
103static int
104nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
105{
106 return 0;
107}
108
109static void
110nv04_software_destroy(struct drm_device *dev, int engine)
111{
112 struct nv04_software_priv *psw = nv_engine(dev, engine);
113
114 NVOBJ_ENGINE_DEL(dev, SW);
115 kfree(psw);
116}
117
118int
119nv04_software_create(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv04_software_priv *psw;
123
124 psw = kzalloc(sizeof(*psw), GFP_KERNEL);
125 if (!psw)
126 return -ENOMEM;
127
128 psw->base.base.destroy = nv04_software_destroy;
129 psw->base.base.init = nv04_software_init;
130 psw->base.base.fini = nv04_software_fini;
131 psw->base.base.context_new = nv04_software_context_new;
132 psw->base.base.context_del = nv04_software_context_del;
133 psw->base.base.object_new = nv04_software_object_new;
134 nouveau_software_create(&psw->base);
135
136 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
137 if (dev_priv->card_type <= NV_04) {
138 NVOBJ_CLASS(dev, 0x006e, SW);
139 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
140 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
141 } else {
142 NVOBJ_CLASS(dev, 0x016e, SW);
143 NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..8a1b75009185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv10_fence_chan {
32 struct nouveau_fence_chan base;
33};
34
35struct nv10_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_bo *bo;
38 spinlock_t lock;
39 u32 sequence;
40};
41
42static int
43nv10_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 int ret = RING_SPACE(chan, 2);
47 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49 OUT_RING (chan, fence->sequence);
50 FIRE_RING (chan);
51 }
52 return ret;
53}
54
55
56static int
57nv10_fence_sync(struct nouveau_fence *fence,
58 struct nouveau_channel *prev, struct nouveau_channel *chan)
59{
60 return -ENODEV;
61}
62
63static int
64nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{
67 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
68 u32 value;
69 int ret;
70
71 if (!mutex_trylock(&prev->mutex))
72 return -EBUSY;
73
74 spin_lock(&priv->lock);
75 value = priv->sequence;
76 priv->sequence += 2;
77 spin_unlock(&priv->lock);
78
79 ret = RING_SPACE(prev, 5);
80 if (!ret) {
81 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
82 OUT_RING (prev, NvSema);
83 OUT_RING (prev, 0);
84 OUT_RING (prev, value + 0);
85 OUT_RING (prev, value + 1);
86 FIRE_RING (prev);
87 }
88
89 if (!ret && !(ret = RING_SPACE(chan, 5))) {
90 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
91 OUT_RING (chan, NvSema);
92 OUT_RING (chan, 0);
93 OUT_RING (chan, value + 1);
94 OUT_RING (chan, value + 2);
95 FIRE_RING (chan);
96 }
97
98 mutex_unlock(&prev->mutex);
99 return 0;
100}
101
102static u32
103nv10_fence_read(struct nouveau_channel *chan)
104{
105 return nvchan_rd32(chan, 0x0048);
106}
107
108static void
109nv10_fence_context_del(struct nouveau_channel *chan, int engine)
110{
111 struct nv10_fence_chan *fctx = chan->engctx[engine];
112 nouveau_fence_context_del(&fctx->base);
113 chan->engctx[engine] = NULL;
114 kfree(fctx);
115}
116
117static int
118nv10_fence_context_new(struct nouveau_channel *chan, int engine)
119{
120 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
121 struct nv10_fence_chan *fctx;
122 struct nouveau_gpuobj *obj;
123 int ret = 0;
124
125 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx)
127 return -ENOMEM;
128
129 nouveau_fence_context_new(&fctx->base);
130
131 if (priv->bo) {
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
133
134 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
135 mem->start * PAGE_SIZE, mem->size,
136 NV_MEM_ACCESS_RW,
137 NV_MEM_TARGET_VRAM, &obj);
138 if (!ret) {
139 ret = nouveau_ramht_insert(chan, NvSema, obj);
140 nouveau_gpuobj_ref(NULL, &obj);
141 }
142 }
143
144 if (ret)
145 nv10_fence_context_del(chan, engine);
146 return ret;
147}
148
149static int
150nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
151{
152 return 0;
153}
154
155static int
156nv10_fence_init(struct drm_device *dev, int engine)
157{
158 return 0;
159}
160
161static void
162nv10_fence_destroy(struct drm_device *dev, int engine)
163{
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nv10_fence_priv *priv = nv_engine(dev, engine);
166
167 nouveau_bo_ref(NULL, &priv->bo);
168 dev_priv->eng[engine] = NULL;
169 kfree(priv);
170}
171
172int
173nv10_fence_create(struct drm_device *dev)
174{
175 struct drm_nouveau_private *dev_priv = dev->dev_private;
176 struct nv10_fence_priv *priv;
177 int ret = 0;
178
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv)
181 return -ENOMEM;
182
183 priv->base.engine.destroy = nv10_fence_destroy;
184 priv->base.engine.init = nv10_fence_init;
185 priv->base.engine.fini = nv10_fence_fini;
186 priv->base.engine.context_new = nv10_fence_context_new;
187 priv->base.engine.context_del = nv10_fence_context_del;
188 priv->base.emit = nv10_fence_emit;
189 priv->base.read = nv10_fence_read;
190 priv->base.sync = nv10_fence_sync;
191 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192 spin_lock_init(&priv->lock);
193
194 if (dev_priv->chipset >= 0x17) {
195 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196 0, 0x0000, NULL, &priv->bo);
197 if (!ret) {
198 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
199 if (!ret)
200 ret = nouveau_bo_map(priv->bo);
201 if (ret)
202 nouveau_bo_ref(NULL, &priv->bo);
203 }
204
205 if (ret == 0) {
206 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
207 priv->base.sync = nv17_fence_sync;
208 }
209 }
210
211 if (ret)
212 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
213 return ret;
214}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index d2ecbff4bee1..f1fe7d758241 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,220 +27,112 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
30#include "nouveau_ramht.h" 32#include "nouveau_ramht.h"
31 33
32#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) 34static struct ramfc_desc {
33#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) 35 unsigned bits:6;
34 36 unsigned ctxs:5;
35int 37 unsigned ctxp:8;
36nv10_fifo_channel_id(struct drm_device *dev) 38 unsigned regs:5;
37{ 39 unsigned regp;
38 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 40} nv10_ramfc[] = {
39 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK; 41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
40} 42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
41 43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
42int 44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
43nv10_fifo_create_context(struct nouveau_channel *chan) 45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 {}
51};
52
53struct nv10_fifo_priv {
54 struct nouveau_fifo_priv base;
55 struct ramfc_desc *ramfc_desc;
56};
57
58struct nv10_fifo_chan {
59 struct nouveau_fifo_chan base;
60 struct nouveau_gpuobj *ramfc;
61};
62
63static int
64nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
44{ 65{
45 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
46 struct drm_device *dev = chan->dev; 66 struct drm_device *dev = chan->dev;
47 uint32_t fc = NV10_RAMFC(chan->id); 67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nv10_fifo_priv *priv = nv_engine(dev, engine);
69 struct nv10_fifo_chan *fctx;
70 unsigned long flags;
48 int ret; 71 int ret;
49 72
50 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, 73 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
51 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 74 if (!fctx)
52 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); 75 return -ENOMEM;
53 if (ret)
54 return ret;
55 76
77 /* map channel control registers */
56 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 78 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
57 NV03_USER(chan->id), PAGE_SIZE); 79 NV03_USER(chan->id), PAGE_SIZE);
58 if (!chan->user) 80 if (!chan->user) {
59 return -ENOMEM; 81 ret = -ENOMEM;
82 goto error;
83 }
60 84
61 /* Fill entries that are seen filled in dumps of nvidia driver just 85 /* initialise default fifo context */
62 * after channel's is put into DMA mode 86 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
63 */ 87 chan->id * 32, ~0, 32,
64 nv_wi32(dev, fc + 0, chan->pushbuf_base); 88 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
65 nv_wi32(dev, fc + 4, chan->pushbuf_base); 89 if (ret)
66 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); 90 goto error;
67 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 91
68 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 92 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
69 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 93 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
94 nv_wo32(fctx->ramfc, 0x08, 0x00000000);
95 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
96 nv_wo32(fctx->ramfc, 0x10, 0x00000000);
97 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
98 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
70#ifdef __BIG_ENDIAN 99#ifdef __BIG_ENDIAN
71 NV_PFIFO_CACHE1_BIG_ENDIAN | 100 NV_PFIFO_CACHE1_BIG_ENDIAN |
72#endif 101#endif
73 0); 102 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
74 103 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
75 /* enable the fifo dma operation */ 104 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
76 nv_wr32(dev, NV04_PFIFO_MODE,
77 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
78 return 0;
79}
80
81static void
82nv10_fifo_do_load_context(struct drm_device *dev, int chid)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 uint32_t fc = NV10_RAMFC(chid), tmp;
86
87 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
88 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
89 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
90
91 tmp = nv_ri32(dev, fc + 12);
92 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
93 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
94
95 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
98 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
99
100 if (dev_priv->chipset < 0x17)
101 goto out;
102
103 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
104 tmp = nv_ri32(dev, fc + 36);
105 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
106 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
107 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
108 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
109
110out:
111 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
112 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
113}
114
115int
116nv10_fifo_load_context(struct nouveau_channel *chan)
117{
118 struct drm_device *dev = chan->dev;
119 uint32_t tmp;
120
121 nv10_fifo_do_load_context(dev, chan->id);
122 105
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 106 /* enable dma mode on the channel */
124 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); 107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
125 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); 108 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
109 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
126 110
127 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ 111error:
128 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); 112 if (ret)
129 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); 113 priv->base.base.context_del(chan, engine);
130 114 return ret;
131 return 0;
132} 115}
133 116
134int 117int
135nv10_fifo_unload_context(struct drm_device *dev) 118nv10_fifo_create(struct drm_device *dev)
136{
137 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
139 uint32_t fc, tmp;
140 int chid;
141
142 chid = pfifo->channel_id(dev);
143 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
144 return 0;
145 fc = NV10_RAMFC(chid);
146
147 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
148 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
149 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
150 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
151 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
152 nv_wi32(dev, fc + 12, tmp);
153 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
154 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
155 nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
156 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
157
158 if (dev_priv->chipset < 0x17)
159 goto out;
160
161 nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
162 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
163 nv_wi32(dev, fc + 36, tmp);
164 nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
165 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
166 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
167
168out:
169 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
170 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
171 return 0;
172}
173
174static void
175nv10_fifo_init_reset(struct drm_device *dev)
176{
177 nv_wr32(dev, NV03_PMC_ENABLE,
178 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
179 nv_wr32(dev, NV03_PMC_ENABLE,
180 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
181
182 nv_wr32(dev, 0x003224, 0x000f0078);
183 nv_wr32(dev, 0x002044, 0x0101ffff);
184 nv_wr32(dev, 0x002040, 0x000000ff);
185 nv_wr32(dev, 0x002500, 0x00000000);
186 nv_wr32(dev, 0x003000, 0x00000000);
187 nv_wr32(dev, 0x003050, 0x00000000);
188
189 nv_wr32(dev, 0x003258, 0x00000000);
190 nv_wr32(dev, 0x003210, 0x00000000);
191 nv_wr32(dev, 0x003270, 0x00000000);
192}
193
194static void
195nv10_fifo_init_ramxx(struct drm_device *dev)
196{ 119{
197 struct drm_nouveau_private *dev_priv = dev->dev_private; 120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nv10_fifo_priv *priv;
198 122
199 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 123 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
200 ((dev_priv->ramht->bits - 9) << 16) | 124 if (!priv)
201 (dev_priv->ramht->gpuobj->pinst >> 8)); 125 return -ENOMEM;
202 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
203 126
204 if (dev_priv->chipset < 0x17) { 127 priv->base.base.destroy = nv04_fifo_destroy;
205 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); 128 priv->base.base.init = nv04_fifo_init;
206 } else { 129 priv->base.base.fini = nv04_fifo_fini;
207 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | 130 priv->base.base.context_new = nv10_fifo_context_new;
208 (1 << 16) /* 64 Bytes entry*/); 131 priv->base.base.context_del = nv04_fifo_context_del;
209 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ 132 priv->base.channels = 31;
210 } 133 priv->ramfc_desc = nv10_ramfc;
211} 134 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
212 135
213static void
214nv10_fifo_init_intr(struct drm_device *dev)
215{
216 nouveau_irq_register(dev, 8, nv04_fifo_isr); 136 nouveau_irq_register(dev, 8, nv04_fifo_isr);
217 nv_wr32(dev, 0x002100, 0xffffffff);
218 nv_wr32(dev, 0x002140, 0xffffffff);
219}
220
221int
222nv10_fifo_init(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
226 int i;
227
228 nv10_fifo_init_reset(dev);
229 nv10_fifo_init_ramxx(dev);
230
231 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
232 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
233
234 nv10_fifo_init_intr(dev);
235 pfifo->enable(dev);
236 pfifo->reassign(dev, true);
237
238 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
239 if (dev_priv->channels.ptr[i]) {
240 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
241 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
242 }
243 }
244
245 return 0; 137 return 0;
246} 138}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a4d3f3..fb1d88a951de 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -759,7 +759,6 @@ static int
759nv10_graph_unload_context(struct drm_device *dev) 759nv10_graph_unload_context(struct drm_device *dev)
760{ 760{
761 struct drm_nouveau_private *dev_priv = dev->dev_private; 761 struct drm_nouveau_private *dev_priv = dev->dev_private;
762 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
763 struct nouveau_channel *chan; 762 struct nouveau_channel *chan;
764 struct graph_state *ctx; 763 struct graph_state *ctx;
765 uint32_t tmp; 764 uint32_t tmp;
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev)
782 781
783 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 782 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
784 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 783 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
785 tmp |= (pfifo->channels - 1) << 24; 784 tmp |= 31 << 24;
786 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 785 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
787 return 0; 786 return 0;
788} 787}
@@ -822,12 +821,12 @@ struct nouveau_channel *
822nv10_graph_channel(struct drm_device *dev) 821nv10_graph_channel(struct drm_device *dev)
823{ 822{
824 struct drm_nouveau_private *dev_priv = dev->dev_private; 823 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 int chid = dev_priv->engine.fifo.channels; 824 int chid = 31;
826 825
827 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) 826 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
828 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; 827 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
829 828
830 if (chid >= dev_priv->engine.fifo.channels) 829 if (chid >= 31)
831 return NULL; 830 return NULL;
832 831
833 return dev_priv->channels.ptr[chid]; 832 return dev_priv->channels.ptr[chid];
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine)
948 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); 947 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
949 948
950 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 949 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
951 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 950 tmp |= 31 << 24;
952 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 951 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
953 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 952 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
954 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); 953 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
@@ -1153,10 +1152,6 @@ nv10_graph_create(struct drm_device *dev)
1153 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1152 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1154 nouveau_irq_register(dev, 12, nv10_graph_isr); 1153 nouveau_irq_register(dev, 12, nv10_graph_isr);
1155 1154
1156 /* nvsw */
1157 NVOBJ_CLASS(dev, 0x506e, SW);
1158 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1159
1160 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1155 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1161 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 1156 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1162 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 1157 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
new file mode 100644
index 000000000000..d9e482e4abee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fifo.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv17_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 {}
56};
57
58struct nv17_fifo_priv {
59 struct nouveau_fifo_priv base;
60 struct ramfc_desc *ramfc_desc;
61};
62
63struct nv17_fifo_chan {
64 struct nouveau_fifo_chan base;
65 struct nouveau_gpuobj *ramfc;
66};
67
68static int
69nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
70{
71 struct drm_device *dev = chan->dev;
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
74 struct nv17_fifo_chan *fctx;
75 unsigned long flags;
76 int ret;
77
78 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
79 if (!fctx)
80 return -ENOMEM;
81
82 /* map channel control registers */
83 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
84 NV03_USER(chan->id), PAGE_SIZE);
85 if (!chan->user) {
86 ret = -ENOMEM;
87 goto error;
88 }
89
90 /* initialise default fifo context */
91 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
92 chan->id * 64, ~0, 64,
93 NVOBJ_FLAG_ZERO_ALLOC |
94 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
95 if (ret)
96 goto error;
97
98 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
99 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
100 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
101 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
103#ifdef __BIG_ENDIAN
104 NV_PFIFO_CACHE1_BIG_ENDIAN |
105#endif
106 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
107
108 /* enable dma mode on the channel */
109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
110 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
111 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
112
113error:
114 if (ret)
115 priv->base.base.context_del(chan, engine);
116 return ret;
117}
118
119static int
120nv17_fifo_init(struct drm_device *dev, int engine)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
124 int i;
125
126 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
127 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
128
129 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
130 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
131
132 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
133 ((dev_priv->ramht->bits - 9) << 16) |
134 (dev_priv->ramht->gpuobj->pinst >> 8));
135 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
136 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
137 dev_priv->ramfc->pinst >> 8);
138
139 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
140
141 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
142 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
143
144 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
145 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
146 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
147
148 for (i = 0; i < priv->base.channels; i++) {
149 if (dev_priv->channels.ptr[i])
150 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
151 }
152
153 return 0;
154}
155
156int
157nv17_fifo_create(struct drm_device *dev)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nv17_fifo_priv *priv;
161
162 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
163 if (!priv)
164 return -ENOMEM;
165
166 priv->base.base.destroy = nv04_fifo_destroy;
167 priv->base.base.init = nv17_fifo_init;
168 priv->base.base.fini = nv04_fifo_fini;
169 priv->base.base.context_new = nv17_fifo_context_new;
170 priv->base.base.context_del = nv04_fifo_context_del;
171 priv->base.channels = 31;
172 priv->ramfc_desc = nv17_ramfc;
173 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
174
175 nouveau_irq_register(dev, 8, nv04_fifo_isr);
176 return 0;
177}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e37512ef9..e34ea30758f6 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -43,8 +43,6 @@ struct nv20_graph_engine {
43int 43int
44nv20_graph_unload_context(struct drm_device *dev) 44nv20_graph_unload_context(struct drm_device *dev)
45{ 45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_channel *chan; 46 struct nouveau_channel *chan;
49 struct nouveau_gpuobj *grctx; 47 struct nouveau_gpuobj *grctx;
50 u32 tmp; 48 u32 tmp;
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev)
62 60
63 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 61 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
64 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 62 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
65 tmp |= (pfifo->channels - 1) << 24; 63 tmp |= 31 << 24;
66 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 64 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
67 return 0; 65 return 0;
68} 66}
@@ -796,10 +794,6 @@ nv20_graph_create(struct drm_device *dev)
796 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 794 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
797 nouveau_irq_register(dev, 12, nv20_graph_isr); 795 nouveau_irq_register(dev, 12, nv20_graph_isr);
798 796
799 /* nvsw */
800 NVOBJ_CLASS(dev, 0x506e, SW);
801 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
802
803 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 797 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
804 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 798 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
805 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 799 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index 6f06a0713f00..5f239bf658c4 100644
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_fifo.h"
27#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
28 29
29struct nv31_mpeg_engine { 30struct nv31_mpeg_engine {
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
208static int 209static int
209nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) 210nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
210{ 211{
212 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
211 struct drm_nouveau_private *dev_priv = dev->dev_private; 213 struct drm_nouveau_private *dev_priv = dev->dev_private;
212 struct nouveau_gpuobj *ctx; 214 struct nouveau_gpuobj *ctx;
213 unsigned long flags; 215 unsigned long flags;
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
218 return 0; 220 return 0;
219 221
220 spin_lock_irqsave(&dev_priv->channels.lock, flags); 222 spin_lock_irqsave(&dev_priv->channels.lock, flags);
221 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 223 for (i = 0; i < pfifo->channels; i++) {
222 if (!dev_priv->channels.ptr[i]) 224 if (!dev_priv->channels.ptr[i])
223 continue; 225 continue;
224 226
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 68cb2d991c88..cdc818479b0a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -25,215 +25,123 @@
25 */ 25 */
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h"
28#include "nouveau_drv.h" 29#include "nouveau_drv.h"
29#include "nouveau_drm.h" 30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
30#include "nouveau_ramht.h" 32#include "nouveau_ramht.h"
31 33
32#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) 34static struct ramfc_desc {
33#define NV40_RAMFC__SIZE 128 35 unsigned bits:6;
34 36 unsigned ctxs:5;
35int 37 unsigned ctxp:8;
36nv40_fifo_create_context(struct nouveau_channel *chan) 38 unsigned regs:5;
39 unsigned regp;
40} nv40_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 2, 28, 0x18, 28, 0x002058 },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
55 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
56 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
57 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
58 { 32, 0, 0x40, 0, 0x0032e4 },
59 { 32, 0, 0x44, 0, 0x0032e8 },
60 { 32, 0, 0x4c, 0, 0x002088 },
61 { 32, 0, 0x50, 0, 0x003300 },
62 { 32, 0, 0x54, 0, 0x00330c },
63 {}
64};
65
66struct nv40_fifo_priv {
67 struct nouveau_fifo_priv base;
68 struct ramfc_desc *ramfc_desc;
69};
70
71struct nv40_fifo_chan {
72 struct nouveau_fifo_chan base;
73 struct nouveau_gpuobj *ramfc;
74};
75
76static int
77nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
37{ 78{
38 struct drm_device *dev = chan->dev; 79 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 uint32_t fc = NV40_RAMFC(chan->id); 81 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
82 struct nv40_fifo_chan *fctx;
41 unsigned long flags; 83 unsigned long flags;
42 int ret; 84 int ret;
43 85
44 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 86 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
45 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 87 if (!fctx)
46 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
47 if (ret)
48 return ret;
49
50 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
51 NV40_USER(chan->id), PAGE_SIZE);
52 if (!chan->user)
53 return -ENOMEM; 88 return -ENOMEM;
54 89
55 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 90 /* map channel control registers */
91 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
92 NV03_USER(chan->id), PAGE_SIZE);
93 if (!chan->user) {
94 ret = -ENOMEM;
95 goto error;
96 }
56 97
57 nv_wi32(dev, fc + 0, chan->pushbuf_base); 98 /* initialise default fifo context */
58 nv_wi32(dev, fc + 4, chan->pushbuf_base); 99 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
59 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); 100 chan->id * 128, ~0, 128,
60 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 101 NVOBJ_FLAG_ZERO_ALLOC |
61 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 102 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
62 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 103 if (ret)
104 goto error;
105
106 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
107 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
108 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
109 nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
110 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
111 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
63#ifdef __BIG_ENDIAN 112#ifdef __BIG_ENDIAN
64 NV_PFIFO_CACHE1_BIG_ENDIAN | 113 NV_PFIFO_CACHE1_BIG_ENDIAN |
65#endif 114#endif
66 0x30000000 /* no idea.. */); 115 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
67 nv_wi32(dev, fc + 60, 0x0001FFFF); 116 nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
68
69 /* enable the fifo dma operation */
70 nv_wr32(dev, NV04_PFIFO_MODE,
71 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
72 117
118 /* enable dma mode on the channel */
119 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
120 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
73 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 121 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
74 return 0;
75}
76
77static void
78nv40_fifo_do_load_context(struct drm_device *dev, int chid)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
82
83 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
84 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
85 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
86 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
87 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
88 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
89
90 /* No idea what 0x2058 is.. */
91 tmp = nv_ri32(dev, fc + 24);
92 tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
93 tmp2 |= (tmp & 0x30000000);
94 nv_wr32(dev, 0x2058, tmp2);
95 tmp &= ~0x30000000;
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
97 122
98 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); 123 /*XXX: remove this later, need fifo engine context commit hook */
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); 124 nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
100 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
101 tmp = nv_ri32(dev, fc + 40);
102 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
103 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
104 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
105 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
106 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
107 125
108 /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ 126error:
109 tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; 127 if (ret)
110 tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; 128 priv->base.base.context_del(chan, engine);
111 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); 129 return ret;
112
113 nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
114 /* NVIDIA does this next line twice... */
115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
118 nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
119
120 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
121 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
122}
123
124int
125nv40_fifo_load_context(struct nouveau_channel *chan)
126{
127 struct drm_device *dev = chan->dev;
128 uint32_t tmp;
129
130 nv40_fifo_do_load_context(dev, chan->id);
131
132 /* Set channel active, and in DMA mode */
133 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
134 NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
135 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
136
137 /* Reset DMA_CTL_AT_INFO to INVALID */
138 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
139 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
140
141 return 0;
142} 130}
143 131
144int 132static int
145nv40_fifo_unload_context(struct drm_device *dev) 133nv40_fifo_init(struct drm_device *dev, int engine)
146{ 134{
147 struct drm_nouveau_private *dev_priv = dev->dev_private; 135 struct drm_nouveau_private *dev_priv = dev->dev_private;
148 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 136 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
149 uint32_t fc, tmp;
150 int chid;
151
152 chid = pfifo->channel_id(dev);
153 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
154 return 0;
155 fc = NV40_RAMFC(chid);
156
157 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
158 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
159 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
160 nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
161 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
162 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
163 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
164 tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
165 nv_wi32(dev, fc + 24, tmp);
166 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
167 nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
168 nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
169 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
170 nv_wi32(dev, fc + 40, tmp);
171 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
172 nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
173 /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
174 * more involved depending on the value of 0x3228?
175 */
176 nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
177 nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
178 nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
179 /* No idea what the below is for exactly, ripped from a mmio-trace */
180 nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
181 /* NVIDIA do this next line twice.. bug? */
182 nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
183 nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
184 nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
185#if 0 /* no real idea which is PUT/GET in UNK_48.. */
186 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
187 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
188 nv_wi32(dev, fc + 72, tmp);
189#endif
190 nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
191
192 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
194 NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
195 return 0;
196}
197
198static void
199nv40_fifo_init_reset(struct drm_device *dev)
200{
201 int i; 137 int i;
202 138
203 nv_wr32(dev, NV03_PMC_ENABLE, 139 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
204 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); 140 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
205 nv_wr32(dev, NV03_PMC_ENABLE,
206 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
207 141
208 nv_wr32(dev, 0x003224, 0x000f0078);
209 nv_wr32(dev, 0x003210, 0x00000000);
210 nv_wr32(dev, 0x003270, 0x00000000);
211 nv_wr32(dev, 0x003240, 0x00000000);
212 nv_wr32(dev, 0x003244, 0x00000000);
213 nv_wr32(dev, 0x003258, 0x00000000);
214 nv_wr32(dev, 0x002504, 0x00000000);
215 for (i = 0; i < 16; i++)
216 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
217 nv_wr32(dev, 0x00250c, 0x0000ffff);
218 nv_wr32(dev, 0x002048, 0x00000000);
219 nv_wr32(dev, 0x003228, 0x00000000);
220 nv_wr32(dev, 0x0032e8, 0x00000000);
221 nv_wr32(dev, 0x002410, 0x00000000);
222 nv_wr32(dev, 0x002420, 0x00000000);
223 nv_wr32(dev, 0x002058, 0x00000001);
224 nv_wr32(dev, 0x00221c, 0x00000000);
225 /* something with 0x2084, read/modify/write, no change */
226 nv_wr32(dev, 0x002040, 0x000000ff); 142 nv_wr32(dev, 0x002040, 0x000000ff);
227 nv_wr32(dev, 0x002500, 0x00000000); 143 nv_wr32(dev, 0x002044, 0x2101ffff);
228 nv_wr32(dev, 0x003200, 0x00000000); 144 nv_wr32(dev, 0x002058, 0x00000001);
229
230 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
231}
232
233static void
234nv40_fifo_init_ramxx(struct drm_device *dev)
235{
236 struct drm_nouveau_private *dev_priv = dev->dev_private;
237 145
238 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 146 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
239 ((dev_priv->ramht->bits - 9) << 16) | 147 ((dev_priv->ramht->bits - 9) << 16) |
@@ -244,64 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
244 case 0x47: 152 case 0x47:
245 case 0x49: 153 case 0x49:
246 case 0x4b: 154 case 0x4b:
247 nv_wr32(dev, 0x2230, 1); 155 nv_wr32(dev, 0x002230, 0x00000001);
248 break;
249 default:
250 break;
251 }
252
253 switch (dev_priv->chipset) {
254 case 0x40: 156 case 0x40:
255 case 0x41: 157 case 0x41:
256 case 0x42: 158 case 0x42:
257 case 0x43: 159 case 0x43:
258 case 0x45: 160 case 0x45:
259 case 0x47:
260 case 0x48: 161 case 0x48:
261 case 0x49: 162 nv_wr32(dev, 0x002220, 0x00030002);
262 case 0x4b:
263 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
264 break; 163 break;
265 default: 164 default:
266 nv_wr32(dev, 0x2230, 0); 165 nv_wr32(dev, 0x002230, 0x00000000);
267 nv_wr32(dev, NV40_PFIFO_RAMFC, 166 nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
268 ((dev_priv->vram_size - 512 * 1024 + 167 dev_priv->ramfc->pinst) >> 16) |
269 dev_priv->ramfc->pinst) >> 16) | (3 << 16)); 168 0x00030000);
270 break; 169 break;
271 } 170 }
272}
273 171
274static void 172 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
275nv40_fifo_init_intr(struct drm_device *dev) 173
276{ 174 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
277 nouveau_irq_register(dev, 8, nv04_fifo_isr); 175 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
278 nv_wr32(dev, 0x002100, 0xffffffff); 176
279 nv_wr32(dev, 0x002140, 0xffffffff); 177 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
178 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
179 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
180
181 for (i = 0; i < priv->base.channels; i++) {
182 if (dev_priv->channels.ptr[i])
183 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
184 }
185
186 return 0;
280} 187}
281 188
282int 189int
283nv40_fifo_init(struct drm_device *dev) 190nv40_fifo_create(struct drm_device *dev)
284{ 191{
285 struct drm_nouveau_private *dev_priv = dev->dev_private; 192 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 193 struct nv40_fifo_priv *priv;
287 int i;
288
289 nv40_fifo_init_reset(dev);
290 nv40_fifo_init_ramxx(dev);
291 194
292 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
293 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); 196 if (!priv)
294 197 return -ENOMEM;
295 nv40_fifo_init_intr(dev);
296 pfifo->enable(dev);
297 pfifo->reassign(dev, true);
298 198
299 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 199 priv->base.base.destroy = nv04_fifo_destroy;
300 if (dev_priv->channels.ptr[i]) { 200 priv->base.base.init = nv40_fifo_init;
301 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 201 priv->base.base.fini = nv04_fifo_fini;
302 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 202 priv->base.base.context_new = nv40_fifo_context_new;
303 } 203 priv->base.base.context_del = nv04_fifo_context_del;
304 } 204 priv->base.channels = 31;
205 priv->ramfc_desc = nv40_ramfc;
206 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
305 207
208 nouveau_irq_register(dev, 8, nv04_fifo_isr);
306 return 0; 209 return 0;
307} 210}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93d8afa..aa9e2df64a26 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -27,7 +27,7 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
32 32
33struct nv40_graph_engine { 33struct nv40_graph_engine {
@@ -42,7 +42,6 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
42 struct drm_device *dev = chan->dev; 42 struct drm_device *dev = chan->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private; 43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_gpuobj *grctx = NULL; 44 struct nouveau_gpuobj *grctx = NULL;
45 struct nouveau_grctx ctx = {};
46 unsigned long flags; 45 unsigned long flags;
47 int ret; 46 int ret;
48 47
@@ -52,11 +51,7 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
52 return ret; 51 return ret;
53 52
54 /* Initialise default context values */ 53 /* Initialise default context values */
55 ctx.dev = chan->dev; 54 nv40_grctx_fill(dev, grctx);
56 ctx.mode = NOUVEAU_GRCTX_VALS;
57 ctx.data = grctx;
58 nv40_grctx_init(&ctx);
59
60 nv_wo32(grctx, 0, grctx->vinst); 55 nv_wo32(grctx, 0, grctx->vinst);
61 56
62 /* init grctx pointer in ramfc, and on PFIFO if channel is 57 /* init grctx pointer in ramfc, and on PFIFO if channel is
@@ -184,8 +179,7 @@ nv40_graph_init(struct drm_device *dev, int engine)
184 struct nv40_graph_engine *pgraph = nv_engine(dev, engine); 179 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
185 struct drm_nouveau_private *dev_priv = dev->dev_private; 180 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 181 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
187 struct nouveau_grctx ctx = {}; 182 uint32_t vramsz;
188 uint32_t vramsz, *cp;
189 int i, j; 183 int i, j;
190 184
191 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 185 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -193,22 +187,8 @@ nv40_graph_init(struct drm_device *dev, int engine)
193 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 187 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
194 NV_PMC_ENABLE_PGRAPH); 188 NV_PMC_ENABLE_PGRAPH);
195 189
196 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); 190 /* generate and upload context program */
197 if (!cp) 191 nv40_grctx_init(dev, &pgraph->grctx_size);
198 return -ENOMEM;
199
200 ctx.dev = dev;
201 ctx.mode = NOUVEAU_GRCTX_PROG;
202 ctx.data = cp;
203 ctx.ctxprog_max = 256;
204 nv40_grctx_init(&ctx);
205 pgraph->grctx_size = ctx.ctxvals_pos * 4;
206
207 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
208 for (i = 0; i < ctx.ctxprog_len; i++)
209 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
210
211 kfree(cp);
212 192
213 /* No context present currently */ 193 /* No context present currently */
214 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 194 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -366,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
366static int 346static int
367nv40_graph_isr_chid(struct drm_device *dev, u32 inst) 347nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
368{ 348{
349 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
369 struct drm_nouveau_private *dev_priv = dev->dev_private; 350 struct drm_nouveau_private *dev_priv = dev->dev_private;
370 struct nouveau_gpuobj *grctx; 351 struct nouveau_gpuobj *grctx;
371 unsigned long flags; 352 unsigned long flags;
372 int i; 353 int i;
373 354
374 spin_lock_irqsave(&dev_priv->channels.lock, flags); 355 spin_lock_irqsave(&dev_priv->channels.lock, flags);
375 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 356 for (i = 0; i < pfifo->channels; i++) {
376 if (!dev_priv->channels.ptr[i]) 357 if (!dev_priv->channels.ptr[i])
377 continue; 358 continue;
378 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; 359 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
@@ -460,7 +441,6 @@ nv40_graph_create(struct drm_device *dev)
460 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 441 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
461 nouveau_irq_register(dev, 12, nv40_graph_isr); 442 nouveau_irq_register(dev, 12, nv40_graph_isr);
462 443
463 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
464 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 444 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
465 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 445 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
466 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 446 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
@@ -483,8 +463,5 @@ nv40_graph_create(struct drm_device *dev)
483 else 463 else
484 NVOBJ_CLASS(dev, 0x4097, GR); 464 NVOBJ_CLASS(dev, 0x4097, GR);
485 465
486 /* nvsw */
487 NVOBJ_CLASS(dev, 0x506e, SW);
488 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
489 return 0; 466 return 0;
490} 467}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index f70447d131d7..be0a74750fb1 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -595,8 +595,8 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
595 } 595 }
596} 596}
597 597
598void 598static void
599nv40_grctx_init(struct nouveau_grctx *ctx) 599nv40_grctx_generate(struct nouveau_grctx *ctx)
600{ 600{
601 /* decide whether we're loading/unloading the context */ 601 /* decide whether we're loading/unloading the context */
602 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); 602 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -660,3 +660,31 @@ nv40_grctx_init(struct nouveau_grctx *ctx)
660 cp_out (ctx, CP_END); 660 cp_out (ctx, CP_END);
661} 661}
662 662
663void
664nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
665{
666 nv40_grctx_generate(&(struct nouveau_grctx) {
667 .dev = dev,
668 .mode = NOUVEAU_GRCTX_VALS,
669 .data = mem,
670 });
671}
672
673void
674nv40_grctx_init(struct drm_device *dev, u32 *size)
675{
676 u32 ctxprog[256], i;
677 struct nouveau_grctx ctx = {
678 .dev = dev,
679 .mode = NOUVEAU_GRCTX_PROG,
680 .data = ctxprog,
681 .ctxprog_max = ARRAY_SIZE(ctxprog)
682 };
683
684 nv40_grctx_generate(&ctx);
685
686 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
687 for (i = 0; i < ctx.ctxprog_len; i++)
688 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
689 *size = ctx.ctxvals_pos * 4;
690}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index c7615381c5d9..e66273aff493 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,6 +27,7 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_fifo.h"
30 31
31#define min2(a,b) ((a) < (b) ? (a) : (b)) 32#define min2(a,b) ((a) < (b) ? (a) : (b))
32 33
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 701b927998bf..97a477b3d52d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -79,15 +79,15 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
79 NV_ERROR(dev, "no space while blanking crtc\n"); 79 NV_ERROR(dev, "no space while blanking crtc\n");
80 return ret; 80 return ret;
81 } 81 }
82 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 82 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); 83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0); 84 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) { 85 if (dev_priv->chipset != 0x50) {
86 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 86 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); 87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 } 88 }
89 89
90 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 90 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); 91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
92 } else { 92 } else {
93 if (nv_crtc->cursor.visible) 93 if (nv_crtc->cursor.visible)
@@ -100,20 +100,20 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
100 NV_ERROR(dev, "no space while unblanking crtc\n"); 100 NV_ERROR(dev, "no space while unblanking crtc\n");
101 return ret; 101 return ret;
102 } 102 }
103 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 103 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); 107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
108 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
110 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
111 } 111 }
112 112
113 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); 113 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
114 OUT_RING(evo, nv_crtc->fb.offset >> 8); 114 OUT_RING(evo, nv_crtc->fb.offset >> 8);
115 OUT_RING(evo, 0); 115 OUT_RING(evo, 0);
116 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 116 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
117 if (dev_priv->chipset != 0x50) 117 if (dev_priv->chipset != 0x50)
118 if (nv_crtc->fb.tile_flags == 0x7a00 || 118 if (nv_crtc->fb.tile_flags == 0x7a00 ||
119 nv_crtc->fb.tile_flags == 0xfe00) 119 nv_crtc->fb.tile_flags == 0xfe00)
@@ -158,10 +158,10 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
158 158
159 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 159 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
160 if (ret == 0) { 160 if (ret == 0) {
161 BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1); 161 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
162 OUT_RING (evo, mode); 162 OUT_RING (evo, mode);
163 if (update) { 163 if (update) {
164 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 164 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
165 OUT_RING (evo, 0); 165 OUT_RING (evo, 0);
166 FIRE_RING (evo); 166 FIRE_RING (evo);
167 } 167 }
@@ -193,11 +193,11 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
193 193
194 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; 194 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
195 195
196 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); 196 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
197 OUT_RING (evo, (hue << 20) | (vib << 8)); 197 OUT_RING (evo, (hue << 20) | (vib << 8));
198 198
199 if (update) { 199 if (update) {
200 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 200 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
201 OUT_RING (evo, 0); 201 OUT_RING (evo, 0);
202 FIRE_RING (evo); 202 FIRE_RING (evo);
203 } 203 }
@@ -311,9 +311,9 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
311 if (ret) 311 if (ret)
312 return ret; 312 return ret;
313 313
314 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); 314 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
315 OUT_RING (evo, ctrl); 315 OUT_RING (evo, ctrl);
316 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); 316 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
317 OUT_RING (evo, oY << 16 | oX); 317 OUT_RING (evo, oY << 16 | oX);
318 OUT_RING (evo, oY << 16 | oX); 318 OUT_RING (evo, oY << 16 | oX);
319 319
@@ -383,23 +383,15 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
383static void 383static void
384nv50_crtc_destroy(struct drm_crtc *crtc) 384nv50_crtc_destroy(struct drm_crtc *crtc)
385{ 385{
386 struct drm_device *dev; 386 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
387 struct nouveau_crtc *nv_crtc;
388
389 if (!crtc)
390 return;
391
392 dev = crtc->dev;
393 nv_crtc = nouveau_crtc(crtc);
394
395 NV_DEBUG_KMS(dev, "\n");
396 387
397 drm_crtc_cleanup(&nv_crtc->base); 388 NV_DEBUG_KMS(crtc->dev, "\n");
398 389
399 nouveau_bo_unmap(nv_crtc->lut.nvbo); 390 nouveau_bo_unmap(nv_crtc->lut.nvbo);
400 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 391 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
401 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 392 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
402 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 393 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
394 drm_crtc_cleanup(&nv_crtc->base);
403 kfree(nv_crtc); 395 kfree(nv_crtc);
404} 396}
405 397
@@ -593,7 +585,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
593 if (ret) 585 if (ret)
594 return ret; 586 return ret;
595 587
596 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); 588 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
597 OUT_RING (evo, fb->r_dma); 589 OUT_RING (evo, fb->r_dma);
598 } 590 }
599 591
@@ -601,18 +593,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
601 if (ret) 593 if (ret)
602 return ret; 594 return ret;
603 595
604 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); 596 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
605 OUT_RING (evo, nv_crtc->fb.offset >> 8); 597 OUT_RING (evo, nv_crtc->fb.offset >> 8);
606 OUT_RING (evo, 0); 598 OUT_RING (evo, 0);
607 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); 599 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
608 OUT_RING (evo, fb->r_pitch); 600 OUT_RING (evo, fb->r_pitch);
609 OUT_RING (evo, fb->r_format); 601 OUT_RING (evo, fb->r_format);
610 602
611 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); 603 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
612 OUT_RING (evo, fb->base.depth == 8 ? 604 OUT_RING (evo, fb->base.depth == 8 ?
613 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); 605 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
614 606
615 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); 607 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
616 OUT_RING (evo, (y << 16) | x); 608 OUT_RING (evo, (y << 16) | x);
617 609
618 if (nv_crtc->lut.depth != fb->base.depth) { 610 if (nv_crtc->lut.depth != fb->base.depth) {
@@ -672,23 +664,23 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
672 664
673 ret = RING_SPACE(evo, 18); 665 ret = RING_SPACE(evo, 18);
674 if (ret == 0) { 666 if (ret == 0) {
675 BEGIN_RING(evo, 0, 0x0804 + head, 2); 667 BEGIN_NV04(evo, 0, 0x0804 + head, 2);
676 OUT_RING (evo, 0x00800000 | mode->clock); 668 OUT_RING (evo, 0x00800000 | mode->clock);
677 OUT_RING (evo, (ilace == 2) ? 2 : 0); 669 OUT_RING (evo, (ilace == 2) ? 2 : 0);
678 BEGIN_RING(evo, 0, 0x0810 + head, 6); 670 BEGIN_NV04(evo, 0, 0x0810 + head, 6);
679 OUT_RING (evo, 0x00000000); /* border colour */ 671 OUT_RING (evo, 0x00000000); /* border colour */
680 OUT_RING (evo, (vactive << 16) | hactive); 672 OUT_RING (evo, (vactive << 16) | hactive);
681 OUT_RING (evo, ( vsynce << 16) | hsynce); 673 OUT_RING (evo, ( vsynce << 16) | hsynce);
682 OUT_RING (evo, (vblanke << 16) | hblanke); 674 OUT_RING (evo, (vblanke << 16) | hblanke);
683 OUT_RING (evo, (vblanks << 16) | hblanks); 675 OUT_RING (evo, (vblanks << 16) | hblanks);
684 OUT_RING (evo, (vblan2e << 16) | vblan2s); 676 OUT_RING (evo, (vblan2e << 16) | vblan2s);
685 BEGIN_RING(evo, 0, 0x082c + head, 1); 677 BEGIN_NV04(evo, 0, 0x082c + head, 1);
686 OUT_RING (evo, 0x00000000); 678 OUT_RING (evo, 0x00000000);
687 BEGIN_RING(evo, 0, 0x0900 + head, 1); 679 BEGIN_NV04(evo, 0, 0x0900 + head, 1);
688 OUT_RING (evo, 0x00000311); /* makes sync channel work */ 680 OUT_RING (evo, 0x00000311); /* makes sync channel work */
689 BEGIN_RING(evo, 0, 0x08c8 + head, 1); 681 BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
690 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay); 682 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
691 BEGIN_RING(evo, 0, 0x08d4 + head, 1); 683 BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
692 OUT_RING (evo, 0x00000000); /* screen position */ 684 OUT_RING (evo, 0x00000000); /* screen position */
693 } 685 }
694 686
@@ -755,21 +747,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
755 if (!nv_crtc) 747 if (!nv_crtc)
756 return -ENOMEM; 748 return -ENOMEM;
757 749
750 nv_crtc->index = index;
751 nv_crtc->set_dither = nv50_crtc_set_dither;
752 nv_crtc->set_scale = nv50_crtc_set_scale;
753 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
758 nv_crtc->color_vibrance = 50; 754 nv_crtc->color_vibrance = 50;
759 nv_crtc->vibrant_hue = 0; 755 nv_crtc->vibrant_hue = 0;
760 756 nv_crtc->lut.depth = 0;
761 /* Default CLUT parameters, will be activated on the hw upon
762 * first mode set.
763 */
764 for (i = 0; i < 256; i++) { 757 for (i = 0; i < 256; i++) {
765 nv_crtc->lut.r[i] = i << 8; 758 nv_crtc->lut.r[i] = i << 8;
766 nv_crtc->lut.g[i] = i << 8; 759 nv_crtc->lut.g[i] = i << 8;
767 nv_crtc->lut.b[i] = i << 8; 760 nv_crtc->lut.b[i] = i << 8;
768 } 761 }
769 nv_crtc->lut.depth = 0; 762
763 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
764 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
765 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
770 766
771 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, 767 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
772 0, 0x0000, &nv_crtc->lut.nvbo); 768 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
773 if (!ret) { 769 if (!ret) {
774 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); 770 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
775 if (!ret) 771 if (!ret)
@@ -778,24 +774,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
778 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 774 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
779 } 775 }
780 776
781 if (ret) { 777 if (ret)
782 kfree(nv_crtc); 778 goto out;
783 return ret;
784 }
785
786 nv_crtc->index = index;
787 779
788 /* set function pointers */
789 nv_crtc->set_dither = nv50_crtc_set_dither;
790 nv_crtc->set_scale = nv50_crtc_set_scale;
791 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
792
793 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
794 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
795 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
796 780
797 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 781 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
798 0, 0x0000, &nv_crtc->cursor.nvbo); 782 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
799 if (!ret) { 783 if (!ret) {
800 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 784 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
801 if (!ret) 785 if (!ret)
@@ -804,6 +788,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
804 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 788 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
805 } 789 }
806 790
791 if (ret)
792 goto out;
793
807 nv50_cursor_init(nv_crtc); 794 nv50_cursor_init(nv_crtc);
808 return 0; 795out:
796 if (ret)
797 nv50_crtc_destroy(&nv_crtc->base);
798 return ret;
809} 799}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b607a50..af4ec7bf3670 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -53,15 +53,15 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
53 } 53 }
54 54
55 if (dev_priv->chipset != 0x50) { 55 if (dev_priv->chipset != 0x50) {
56 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 56 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM); 57 OUT_RING(evo, NvEvoVRAM);
58 } 58 }
59 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 59 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); 60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
61 OUT_RING(evo, nv_crtc->cursor.offset >> 8); 61 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
62 62
63 if (update) { 63 if (update) {
64 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 64 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
65 OUT_RING(evo, 0); 65 OUT_RING(evo, 0);
66 FIRE_RING(evo); 66 FIRE_RING(evo);
67 nv_crtc->cursor.visible = true; 67 nv_crtc->cursor.visible = true;
@@ -86,16 +86,16 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
86 NV_ERROR(dev, "no space while hiding cursor\n"); 86 NV_ERROR(dev, "no space while hiding cursor\n");
87 return; 87 return;
88 } 88 }
89 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 89 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); 90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0); 91 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) { 92 if (dev_priv->chipset != 0x50) {
93 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 93 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); 94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 } 95 }
96 96
97 if (update) { 97 if (update) {
98 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 98 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
99 OUT_RING(evo, 0); 99 OUT_RING(evo, 0);
100 FIRE_RING(evo); 100 FIRE_RING(evo);
101 nv_crtc->cursor.visible = false; 101 nv_crtc->cursor.visible = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c56330be6d..eb216a446b89 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -55,9 +55,9 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
55 NV_ERROR(dev, "no space while disconnecting DAC\n"); 55 NV_ERROR(dev, "no space while disconnecting DAC\n");
56 return; 56 return;
57 } 57 }
58 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); 58 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
59 OUT_RING (evo, 0); 59 OUT_RING (evo, 0);
60 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 60 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
61 OUT_RING (evo, 0); 61 OUT_RING (evo, 0);
62 62
63 nv_encoder->crtc = NULL; 63 nv_encoder->crtc = NULL;
@@ -240,7 +240,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
240 NV_ERROR(dev, "no space while connecting DAC\n"); 240 NV_ERROR(dev, "no space while connecting DAC\n");
241 return; 241 return;
242 } 242 }
243 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); 243 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
244 OUT_RING(evo, mode_ctl); 244 OUT_RING(evo, mode_ctl);
245 OUT_RING(evo, mode_ctl2); 245 OUT_RING(evo, mode_ctl2);
246 246
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b78b9cfa383..5c41612723b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -32,6 +32,7 @@
32#include "nouveau_fb.h" 32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 33#include "nouveau_fbcon.h"
34#include "nouveau_ramht.h" 34#include "nouveau_ramht.h"
35#include "nouveau_software.h"
35#include "drm_crtc_helper.h" 36#include "drm_crtc_helper.h"
36 37
37static void nv50_display_isr(struct drm_device *); 38static void nv50_display_isr(struct drm_device *);
@@ -140,11 +141,11 @@ nv50_display_sync(struct drm_device *dev)
140 141
141 ret = RING_SPACE(evo, 6); 142 ret = RING_SPACE(evo, 6);
142 if (ret == 0) { 143 if (ret == 0) {
143 BEGIN_RING(evo, 0, 0x0084, 1); 144 BEGIN_NV04(evo, 0, 0x0084, 1);
144 OUT_RING (evo, 0x80000000); 145 OUT_RING (evo, 0x80000000);
145 BEGIN_RING(evo, 0, 0x0080, 1); 146 BEGIN_NV04(evo, 0, 0x0080, 1);
146 OUT_RING (evo, 0); 147 OUT_RING (evo, 0);
147 BEGIN_RING(evo, 0, 0x0084, 1); 148 BEGIN_NV04(evo, 0, 0x0084, 1);
148 OUT_RING (evo, 0x00000000); 149 OUT_RING (evo, 0x00000000);
149 150
150 nv_wo32(disp->ntfy, 0x000, 0x00000000); 151 nv_wo32(disp->ntfy, 0x000, 0x00000000);
@@ -267,7 +268,7 @@ nv50_display_init(struct drm_device *dev)
267 ret = RING_SPACE(evo, 3); 268 ret = RING_SPACE(evo, 3);
268 if (ret) 269 if (ret)
269 return ret; 270 return ret;
270 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2); 271 BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
271 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED); 272 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
272 OUT_RING (evo, NvEvoSync); 273 OUT_RING (evo, NvEvoSync);
273 274
@@ -292,7 +293,7 @@ nv50_display_fini(struct drm_device *dev)
292 293
293 ret = RING_SPACE(evo, 2); 294 ret = RING_SPACE(evo, 2);
294 if (ret == 0) { 295 if (ret == 0) {
295 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 296 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
296 OUT_RING(evo, 0); 297 OUT_RING(evo, 0);
297 } 298 }
298 FIRE_RING(evo); 299 FIRE_RING(evo);
@@ -358,8 +359,11 @@ nv50_display_create(struct drm_device *dev)
358 dev_priv->engine.display.priv = priv; 359 dev_priv->engine.display.priv = priv;
359 360
360 /* Create CRTC objects */ 361 /* Create CRTC objects */
361 for (i = 0; i < 2; i++) 362 for (i = 0; i < 2; i++) {
362 nv50_crtc_create(dev, i); 363 ret = nv50_crtc_create(dev, i);
364 if (ret)
365 return ret;
366 }
363 367
364 /* We setup the encoders from the BIOS table */ 368 /* We setup the encoders from the BIOS table */
365 for (i = 0 ; i < dcb->entries; i++) { 369 for (i = 0 ; i < dcb->entries; i++) {
@@ -438,13 +442,13 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
438 return; 442 return;
439 } 443 }
440 444
441 BEGIN_RING(evo, 0, 0x0084, 1); 445 BEGIN_NV04(evo, 0, 0x0084, 1);
442 OUT_RING (evo, 0x00000000); 446 OUT_RING (evo, 0x00000000);
443 BEGIN_RING(evo, 0, 0x0094, 1); 447 BEGIN_NV04(evo, 0, 0x0094, 1);
444 OUT_RING (evo, 0x00000000); 448 OUT_RING (evo, 0x00000000);
445 BEGIN_RING(evo, 0, 0x00c0, 1); 449 BEGIN_NV04(evo, 0, 0x00c0, 1);
446 OUT_RING (evo, 0x00000000); 450 OUT_RING (evo, 0x00000000);
447 BEGIN_RING(evo, 0, 0x0080, 1); 451 BEGIN_NV04(evo, 0, 0x0080, 1);
448 OUT_RING (evo, 0x00000000); 452 OUT_RING (evo, 0x00000000);
449 FIRE_RING (evo); 453 FIRE_RING (evo);
450} 454}
@@ -474,28 +478,28 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
474 } 478 }
475 479
476 if (dev_priv->chipset < 0xc0) { 480 if (dev_priv->chipset < 0xc0) {
477 BEGIN_RING(chan, 0, 0x0060, 2); 481 BEGIN_NV04(chan, 0, 0x0060, 2);
478 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 482 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
479 OUT_RING (chan, dispc->sem.offset); 483 OUT_RING (chan, dispc->sem.offset);
480 BEGIN_RING(chan, 0, 0x006c, 1); 484 BEGIN_NV04(chan, 0, 0x006c, 1);
481 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 485 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
482 BEGIN_RING(chan, 0, 0x0064, 2); 486 BEGIN_NV04(chan, 0, 0x0064, 2);
483 OUT_RING (chan, dispc->sem.offset ^ 0x10); 487 OUT_RING (chan, dispc->sem.offset ^ 0x10);
484 OUT_RING (chan, 0x74b1e000); 488 OUT_RING (chan, 0x74b1e000);
485 BEGIN_RING(chan, 0, 0x0060, 1); 489 BEGIN_NV04(chan, 0, 0x0060, 1);
486 if (dev_priv->chipset < 0x84) 490 if (dev_priv->chipset < 0x84)
487 OUT_RING (chan, NvSema); 491 OUT_RING (chan, NvSema);
488 else 492 else
489 OUT_RING (chan, chan->vram_handle); 493 OUT_RING (chan, chan->vram_handle);
490 } else { 494 } else {
491 u64 offset = chan->dispc_vma[nv_crtc->index].offset; 495 u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
492 offset += dispc->sem.offset; 496 offset += dispc->sem.offset;
493 BEGIN_NVC0(chan, 2, 0, 0x0010, 4); 497 BEGIN_NVC0(chan, 0, 0x0010, 4);
494 OUT_RING (chan, upper_32_bits(offset)); 498 OUT_RING (chan, upper_32_bits(offset));
495 OUT_RING (chan, lower_32_bits(offset)); 499 OUT_RING (chan, lower_32_bits(offset));
496 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 500 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
497 OUT_RING (chan, 0x1002); 501 OUT_RING (chan, 0x1002);
498 BEGIN_NVC0(chan, 2, 0, 0x0010, 4); 502 BEGIN_NVC0(chan, 0, 0x0010, 4);
499 OUT_RING (chan, upper_32_bits(offset)); 503 OUT_RING (chan, upper_32_bits(offset));
500 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 504 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
501 OUT_RING (chan, 0x74b1e000); 505 OUT_RING (chan, 0x74b1e000);
@@ -508,40 +512,40 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
508 } 512 }
509 513
510 /* queue the flip on the crtc's "display sync" channel */ 514 /* queue the flip on the crtc's "display sync" channel */
511 BEGIN_RING(evo, 0, 0x0100, 1); 515 BEGIN_NV04(evo, 0, 0x0100, 1);
512 OUT_RING (evo, 0xfffe0000); 516 OUT_RING (evo, 0xfffe0000);
513 if (chan) { 517 if (chan) {
514 BEGIN_RING(evo, 0, 0x0084, 1); 518 BEGIN_NV04(evo, 0, 0x0084, 1);
515 OUT_RING (evo, 0x00000100); 519 OUT_RING (evo, 0x00000100);
516 } else { 520 } else {
517 BEGIN_RING(evo, 0, 0x0084, 1); 521 BEGIN_NV04(evo, 0, 0x0084, 1);
518 OUT_RING (evo, 0x00000010); 522 OUT_RING (evo, 0x00000010);
519 /* allows gamma somehow, PDISP will bitch at you if 523 /* allows gamma somehow, PDISP will bitch at you if
520 * you don't wait for vblank before changing this.. 524 * you don't wait for vblank before changing this..
521 */ 525 */
522 BEGIN_RING(evo, 0, 0x00e0, 1); 526 BEGIN_NV04(evo, 0, 0x00e0, 1);
523 OUT_RING (evo, 0x40000000); 527 OUT_RING (evo, 0x40000000);
524 } 528 }
525 BEGIN_RING(evo, 0, 0x0088, 4); 529 BEGIN_NV04(evo, 0, 0x0088, 4);
526 OUT_RING (evo, dispc->sem.offset); 530 OUT_RING (evo, dispc->sem.offset);
527 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 531 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
528 OUT_RING (evo, 0x74b1e000); 532 OUT_RING (evo, 0x74b1e000);
529 OUT_RING (evo, NvEvoSync); 533 OUT_RING (evo, NvEvoSync);
530 BEGIN_RING(evo, 0, 0x00a0, 2); 534 BEGIN_NV04(evo, 0, 0x00a0, 2);
531 OUT_RING (evo, 0x00000000); 535 OUT_RING (evo, 0x00000000);
532 OUT_RING (evo, 0x00000000); 536 OUT_RING (evo, 0x00000000);
533 BEGIN_RING(evo, 0, 0x00c0, 1); 537 BEGIN_NV04(evo, 0, 0x00c0, 1);
534 OUT_RING (evo, nv_fb->r_dma); 538 OUT_RING (evo, nv_fb->r_dma);
535 BEGIN_RING(evo, 0, 0x0110, 2); 539 BEGIN_NV04(evo, 0, 0x0110, 2);
536 OUT_RING (evo, 0x00000000); 540 OUT_RING (evo, 0x00000000);
537 OUT_RING (evo, 0x00000000); 541 OUT_RING (evo, 0x00000000);
538 BEGIN_RING(evo, 0, 0x0800, 5); 542 BEGIN_NV04(evo, 0, 0x0800, 5);
539 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); 543 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
540 OUT_RING (evo, 0); 544 OUT_RING (evo, 0);
541 OUT_RING (evo, (fb->height << 16) | fb->width); 545 OUT_RING (evo, (fb->height << 16) | fb->width);
542 OUT_RING (evo, nv_fb->r_pitch); 546 OUT_RING (evo, nv_fb->r_pitch);
543 OUT_RING (evo, nv_fb->r_format); 547 OUT_RING (evo, nv_fb->r_format);
544 BEGIN_RING(evo, 0, 0x0080, 1); 548 BEGIN_NV04(evo, 0, 0x0080, 1);
545 OUT_RING (evo, 0x00000000); 549 OUT_RING (evo, 0x00000000);
546 FIRE_RING (evo); 550 FIRE_RING (evo);
547 551
@@ -642,20 +646,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
642static void 646static void
643nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) 647nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
644{ 648{
645 struct drm_nouveau_private *dev_priv = dev->dev_private; 649 nouveau_software_vblank(dev, crtc);
646 struct nouveau_channel *chan, *tmp;
647
648 list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
649 nvsw.vbl_wait) {
650 if (chan->nvsw.vblsem_head != crtc)
651 continue;
652
653 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
654 chan->nvsw.vblsem_rval);
655 list_del(&chan->nvsw.vbl_wait);
656 drm_vblank_put(dev, crtc);
657 }
658
659 drm_handle_vblank(dev, crtc); 650 drm_handle_vblank(dev, crtc);
660} 651}
661 652
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14d2837..e9db9b97f041 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,6 +33,7 @@
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_reg.h" 34#include "nouveau_reg.h"
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_software.h"
36#include "nv50_evo.h" 37#include "nv50_evo.h"
37 38
38struct nv50_display_crtc { 39struct nv50_display_crtc {
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 9b962e989d7c..ddcd55595824 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
117 evo->user_get = 4; 117 evo->user_get = 4;
118 evo->user_put = 0; 118 evo->user_put = 0;
119 119
120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, 120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
121 &evo->pushbuf_bo); 121 &evo->pushbuf_bo);
122 if (ret == 0) 122 if (ret == 0)
123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -333,7 +333,7 @@ nv50_evo_create(struct drm_device *dev)
333 goto err; 333 goto err;
334 334
335 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 335 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
336 0, 0x0000, &dispc->sem.bo); 336 0, 0x0000, NULL, &dispc->sem.bo);
337 if (!ret) { 337 if (!ret) {
338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); 338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
339 if (!ret) 339 if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index bdd2afe29205..f1e4b9e07d14 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -2,6 +2,7 @@
2#include "drm.h" 2#include "drm.h"
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5#include "nouveau_fifo.h"
5 6
6struct nv50_fb_priv { 7struct nv50_fb_priv {
7 struct page *r100c08_page; 8 struct page *r100c08_page;
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = {
212void 213void
213nv50_fb_vm_trap(struct drm_device *dev, int display) 214nv50_fb_vm_trap(struct drm_device *dev, int display)
214{ 215{
216 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
215 struct drm_nouveau_private *dev_priv = dev->dev_private; 217 struct drm_nouveau_private *dev_priv = dev->dev_private;
216 const struct nouveau_enum *en, *cl; 218 const struct nouveau_enum *en, *cl;
217 unsigned long flags; 219 unsigned long flags;
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display)
236 /* lookup channel id */ 238 /* lookup channel id */
237 chinst = (trap[2] << 16) | trap[1]; 239 chinst = (trap[2] << 16) | trap[1];
238 spin_lock_irqsave(&dev_priv->channels.lock, flags); 240 spin_lock_irqsave(&dev_priv->channels.lock, flags);
239 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { 241 for (ch = 0; ch < pfifo->channels; ch++) {
240 struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; 242 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
241 243
242 if (!chan || !chan->ramin) 244 if (!chan || !chan->ramin)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a7206524..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -43,22 +43,22 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
43 return ret; 43 return ret;
44 44
45 if (rect->rop != ROP_COPY) { 45 if (rect->rop != ROP_COPY) {
46 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 46 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
47 OUT_RING(chan, 1); 47 OUT_RING(chan, 1);
48 } 48 }
49 BEGIN_RING(chan, NvSub2D, 0x0588, 1); 49 BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
50 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 50 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
51 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 51 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
52 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 52 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
53 else 53 else
54 OUT_RING(chan, rect->color); 54 OUT_RING(chan, rect->color);
55 BEGIN_RING(chan, NvSub2D, 0x0600, 4); 55 BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
56 OUT_RING(chan, rect->dx); 56 OUT_RING(chan, rect->dx);
57 OUT_RING(chan, rect->dy); 57 OUT_RING(chan, rect->dy);
58 OUT_RING(chan, rect->dx + rect->width); 58 OUT_RING(chan, rect->dx + rect->width);
59 OUT_RING(chan, rect->dy + rect->height); 59 OUT_RING(chan, rect->dy + rect->height);
60 if (rect->rop != ROP_COPY) { 60 if (rect->rop != ROP_COPY) {
61 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 61 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
62 OUT_RING(chan, 3); 62 OUT_RING(chan, 3);
63 } 63 }
64 FIRE_RING(chan); 64 FIRE_RING(chan);
@@ -78,14 +78,14 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
78 if (ret) 78 if (ret)
79 return ret; 79 return ret;
80 80
81 BEGIN_RING(chan, NvSub2D, 0x0110, 1); 81 BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
82 OUT_RING(chan, 0); 82 OUT_RING(chan, 0);
83 BEGIN_RING(chan, NvSub2D, 0x08b0, 4); 83 BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
84 OUT_RING(chan, region->dx); 84 OUT_RING(chan, region->dx);
85 OUT_RING(chan, region->dy); 85 OUT_RING(chan, region->dy);
86 OUT_RING(chan, region->width); 86 OUT_RING(chan, region->width);
87 OUT_RING(chan, region->height); 87 OUT_RING(chan, region->height);
88 BEGIN_RING(chan, NvSub2D, 0x08d0, 4); 88 BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
89 OUT_RING(chan, 0); 89 OUT_RING(chan, 0);
90 OUT_RING(chan, region->sx); 90 OUT_RING(chan, region->sx);
91 OUT_RING(chan, 0); 91 OUT_RING(chan, 0);
@@ -116,7 +116,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
116 width = ALIGN(image->width, 32); 116 width = ALIGN(image->width, 32);
117 dwords = (width * image->height) >> 5; 117 dwords = (width * image->height) >> 5;
118 118
119 BEGIN_RING(chan, NvSub2D, 0x0814, 2); 119 BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
120 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 120 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
122 OUT_RING(chan, palette[image->bg_color] | mask); 122 OUT_RING(chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING(chan, image->bg_color); 125 OUT_RING(chan, image->bg_color);
126 OUT_RING(chan, image->fg_color); 126 OUT_RING(chan, image->fg_color);
127 } 127 }
128 BEGIN_RING(chan, NvSub2D, 0x0838, 2); 128 BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
129 OUT_RING(chan, image->width); 129 OUT_RING(chan, image->width);
130 OUT_RING(chan, image->height); 130 OUT_RING(chan, image->height);
131 BEGIN_RING(chan, NvSub2D, 0x0850, 4); 131 BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
132 OUT_RING(chan, 0); 132 OUT_RING(chan, 0);
133 OUT_RING(chan, image->dx); 133 OUT_RING(chan, image->dx);
134 OUT_RING(chan, 0); 134 OUT_RING(chan, 0);
@@ -143,7 +143,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
143 143
144 dwords -= push; 144 dwords -= push;
145 145
146 BEGIN_RING(chan, NvSub2D, 0x40000860, push); 146 BEGIN_NI04(chan, NvSub2D, 0x0860, push);
147 OUT_RINGp(chan, data, push); 147 OUT_RINGp(chan, data, push);
148 data += push; 148 data += push;
149 } 149 }
@@ -199,60 +199,59 @@ nv50_fbcon_accel_init(struct fb_info *info)
199 return ret; 199 return ret;
200 } 200 }
201 201
202 BEGIN_RING(chan, NvSub2D, 0x0000, 1); 202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 203 OUT_RING(chan, Nv2D);
204 BEGIN_RING(chan, NvSub2D, 0x0180, 4); 204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, NvNotify0);
206 OUT_RING(chan, chan->vram_handle); 205 OUT_RING(chan, chan->vram_handle);
207 OUT_RING(chan, chan->vram_handle); 206 OUT_RING(chan, chan->vram_handle);
208 OUT_RING(chan, chan->vram_handle); 207 OUT_RING(chan, chan->vram_handle);
209 BEGIN_RING(chan, NvSub2D, 0x0290, 1); 208 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
210 OUT_RING(chan, 0); 209 OUT_RING(chan, 0);
211 BEGIN_RING(chan, NvSub2D, 0x0888, 1); 210 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
212 OUT_RING(chan, 1); 211 OUT_RING(chan, 1);
213 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 212 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
214 OUT_RING(chan, 3); 213 OUT_RING(chan, 3);
215 BEGIN_RING(chan, NvSub2D, 0x02a0, 1); 214 BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
216 OUT_RING(chan, 0x55); 215 OUT_RING(chan, 0x55);
217 BEGIN_RING(chan, NvSub2D, 0x08c0, 4); 216 BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
218 OUT_RING(chan, 0); 217 OUT_RING(chan, 0);
219 OUT_RING(chan, 1); 218 OUT_RING(chan, 1);
220 OUT_RING(chan, 0); 219 OUT_RING(chan, 0);
221 OUT_RING(chan, 1); 220 OUT_RING(chan, 1);
222 BEGIN_RING(chan, NvSub2D, 0x0580, 2); 221 BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
223 OUT_RING(chan, 4); 222 OUT_RING(chan, 4);
224 OUT_RING(chan, format); 223 OUT_RING(chan, format);
225 BEGIN_RING(chan, NvSub2D, 0x02e8, 2); 224 BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
226 OUT_RING(chan, 2); 225 OUT_RING(chan, 2);
227 OUT_RING(chan, 1); 226 OUT_RING(chan, 1);
228 BEGIN_RING(chan, NvSub2D, 0x0804, 1); 227 BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
229 OUT_RING(chan, format); 228 OUT_RING(chan, format);
230 BEGIN_RING(chan, NvSub2D, 0x0800, 1); 229 BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
231 OUT_RING(chan, 1); 230 OUT_RING(chan, 1);
232 BEGIN_RING(chan, NvSub2D, 0x0808, 3); 231 BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
233 OUT_RING(chan, 0); 232 OUT_RING(chan, 0);
234 OUT_RING(chan, 0); 233 OUT_RING(chan, 0);
235 OUT_RING(chan, 1); 234 OUT_RING(chan, 1);
236 BEGIN_RING(chan, NvSub2D, 0x081c, 1); 235 BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
237 OUT_RING(chan, 1); 236 OUT_RING(chan, 1);
238 BEGIN_RING(chan, NvSub2D, 0x0840, 4); 237 BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
239 OUT_RING(chan, 0); 238 OUT_RING(chan, 0);
240 OUT_RING(chan, 1); 239 OUT_RING(chan, 1);
241 OUT_RING(chan, 0); 240 OUT_RING(chan, 0);
242 OUT_RING(chan, 1); 241 OUT_RING(chan, 1);
243 BEGIN_RING(chan, NvSub2D, 0x0200, 2); 242 BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
244 OUT_RING(chan, format); 243 OUT_RING(chan, format);
245 OUT_RING(chan, 1); 244 OUT_RING(chan, 1);
246 BEGIN_RING(chan, NvSub2D, 0x0214, 5); 245 BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
247 OUT_RING(chan, info->fix.line_length); 246 OUT_RING(chan, info->fix.line_length);
248 OUT_RING(chan, info->var.xres_virtual); 247 OUT_RING(chan, info->var.xres_virtual);
249 OUT_RING(chan, info->var.yres_virtual); 248 OUT_RING(chan, info->var.yres_virtual);
250 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 249 OUT_RING(chan, upper_32_bits(fb->vma.offset));
251 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 250 OUT_RING(chan, lower_32_bits(fb->vma.offset));
252 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 251 BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
253 OUT_RING(chan, format); 252 OUT_RING(chan, format);
254 OUT_RING(chan, 1); 253 OUT_RING(chan, 1);
255 BEGIN_RING(chan, NvSub2D, 0x0244, 5); 254 BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
256 OUT_RING(chan, info->fix.line_length); 255 OUT_RING(chan, info->fix.line_length);
257 OUT_RING(chan, info->var.xres_virtual); 256 OUT_RING(chan, info->var.xres_virtual);
258 OUT_RING(chan, info->var.yres_virtual); 257 OUT_RING(chan, info->var.yres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a565c20b..55383b85db0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,480 +27,268 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
31#include "nouveau_vm.h" 32#include "nouveau_vm.h"
32 33
33static void 34struct nv50_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv50_fifo_chan {
41 struct nouveau_fifo_chan base;
42};
43
44void
34nv50_fifo_playlist_update(struct drm_device *dev) 45nv50_fifo_playlist_update(struct drm_device *dev)
35{ 46{
47 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 48 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
38 struct nouveau_gpuobj *cur; 49 struct nouveau_gpuobj *cur;
39 int i, nr; 50 int i, p;
40
41 NV_DEBUG(dev, "\n");
42 51
43 cur = pfifo->playlist[pfifo->cur_playlist]; 52 cur = priv->playlist[priv->cur_playlist];
44 pfifo->cur_playlist = !pfifo->cur_playlist; 53 priv->cur_playlist = !priv->cur_playlist;
45 54
46 /* We never schedule channel 0 or 127 */ 55 for (i = 0, p = 0; i < priv->base.channels; i++) {
47 for (i = 1, nr = 0; i < 127; i++) { 56 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
48 if (dev_priv->channels.ptr[i] && 57 nv_wo32(cur, p++ * 4, i);
49 dev_priv->channels.ptr[i]->ramfc) {
50 nv_wo32(cur, (nr * 4), i);
51 nr++;
52 }
53 } 58 }
54 dev_priv->engine.instmem.flush(dev);
55
56 nv_wr32(dev, 0x32f4, cur->vinst >> 12);
57 nv_wr32(dev, 0x32ec, nr);
58 nv_wr32(dev, 0x2500, 0x101);
59}
60 59
61static void 60 dev_priv->engine.instmem.flush(dev);
62nv50_fifo_channel_enable(struct drm_device *dev, int channel)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65 struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
66 uint32_t inst;
67
68 NV_DEBUG(dev, "ch%d\n", channel);
69
70 if (dev_priv->chipset == 0x50)
71 inst = chan->ramfc->vinst >> 12;
72 else
73 inst = chan->ramfc->vinst >> 8;
74 61
75 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | 62 nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
76 NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); 63 nv_wr32(dev, 0x0032ec, p);
64 nv_wr32(dev, 0x002500, 0x00000101);
77} 65}
78 66
79static void 67static int
80nv50_fifo_channel_disable(struct drm_device *dev, int channel) 68nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
81{ 69{
70 struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
71 struct nv50_fifo_chan *fctx;
72 struct drm_device *dev = chan->dev;
82 struct drm_nouveau_private *dev_priv = dev->dev_private; 73 struct drm_nouveau_private *dev_priv = dev->dev_private;
83 uint32_t inst; 74 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
84 75 u64 instance = chan->ramin->vinst >> 12;
85 NV_DEBUG(dev, "ch%d\n", channel); 76 unsigned long flags;
77 int ret = 0, i;
86 78
87 if (dev_priv->chipset == 0x50) 79 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; 80 if (!fctx)
89 else 81 return -ENOMEM;
90 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; 82 atomic_inc(&chan->vm->engref[engine]);
91 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
92}
93 83
94static void 84 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
95nv50_fifo_init_reset(struct drm_device *dev) 85 NV50_USER(chan->id), PAGE_SIZE);
96{ 86 if (!chan->user) {
97 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; 87 ret = -ENOMEM;
88 goto error;
89 }
98 90
99 NV_DEBUG(dev, "\n"); 91 for (i = 0; i < 0x100; i += 4)
92 nv_wo32(chan->ramin, i, 0x00000000);
93 nv_wo32(chan->ramin, 0x3c, 0x403f6078);
94 nv_wo32(chan->ramin, 0x40, 0x00000000);
95 nv_wo32(chan->ramin, 0x44, 0x01003fff);
96 nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
97 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
98 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
99 drm_order(chan->dma.ib_max + 1) << 16);
100 nv_wo32(chan->ramin, 0x60, 0x7fffffff);
101 nv_wo32(chan->ramin, 0x78, 0x00000000);
102 nv_wo32(chan->ramin, 0x7c, 0x30000001);
103 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
104 (4 << 24) /* SEARCH_FULL */ |
105 (chan->ramht->gpuobj->cinst >> 4));
100 106
101 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); 107 dev_priv->engine.instmem.flush(dev);
102 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
103}
104 108
105static void 109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
106nv50_fifo_init_intr(struct drm_device *dev) 110 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
107{ 111 nv50_fifo_playlist_update(dev);
108 NV_DEBUG(dev, "\n"); 112 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
109 113
110 nouveau_irq_register(dev, 8, nv04_fifo_isr); 114error:
111 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); 115 if (ret)
112 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); 116 priv->base.base.context_del(chan, engine);
117 return ret;
113} 118}
114 119
115static void 120static bool
116nv50_fifo_init_context_table(struct drm_device *dev) 121nv50_fifo_kickoff(struct nouveau_channel *chan)
117{ 122{
118 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_device *dev = chan->dev;
119 int i; 124 bool done = true;
120 125 u32 me;
121 NV_DEBUG(dev, "\n"); 126
122 127 /* HW bug workaround:
123 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { 128 *
124 if (dev_priv->channels.ptr[i]) 129 * PFIFO will hang forever if the connected engines don't report
125 nv50_fifo_channel_enable(dev, i); 130 * that they've processed the context switch request.
126 else 131 *
127 nv50_fifo_channel_disable(dev, i); 132 * In order for the kickoff to work, we need to ensure all the
133 * connected engines are in a state where they can answer.
134 *
135 * Newer chipsets don't seem to suffer from this issue, and well,
136 * there's also a "ignore these engines" bitmask reg we can use
137 * if we hit the issue there..
138 */
139
140 /* PME: make sure engine is enabled */
141 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
142
143 /* do the kickoff... */
144 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
145 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
146 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
147 done = false;
128 } 148 }
129 149
130 nv50_fifo_playlist_update(dev); 150 /* restore any engine states we changed, and exit */
151 nv_wr32(dev, 0x00b860, me);
152 return done;
131} 153}
132 154
133static void 155static void
134nv50_fifo_init_regs__nv(struct drm_device *dev) 156nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
135{
136 NV_DEBUG(dev, "\n");
137
138 nv_wr32(dev, 0x250c, 0x6f3cfc34);
139}
140
141static void
142nv50_fifo_init_regs(struct drm_device *dev)
143{
144 NV_DEBUG(dev, "\n");
145
146 nv_wr32(dev, 0x2500, 0);
147 nv_wr32(dev, 0x3250, 0);
148 nv_wr32(dev, 0x3220, 0);
149 nv_wr32(dev, 0x3204, 0);
150 nv_wr32(dev, 0x3210, 0);
151 nv_wr32(dev, 0x3270, 0);
152 nv_wr32(dev, 0x2044, 0x01003fff);
153
154 /* Enable dummy channels setup by nv50_instmem.c */
155 nv50_fifo_channel_enable(dev, 0);
156 nv50_fifo_channel_enable(dev, 127);
157}
158
159int
160nv50_fifo_init(struct drm_device *dev)
161{ 157{
158 struct nv50_fifo_chan *fctx = chan->engctx[engine];
159 struct drm_device *dev = chan->dev;
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 161 unsigned long flags;
164 int ret;
165 162
166 NV_DEBUG(dev, "\n"); 163 /* remove channel from playlist, will context switch if active */
164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
165 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
166 nv50_fifo_playlist_update(dev);
167 167
168 if (pfifo->playlist[0]) { 168 /* tell any engines on this channel to unload their contexts */
169 pfifo->cur_playlist = !pfifo->cur_playlist; 169 nv50_fifo_kickoff(chan);
170 goto just_reset;
171 }
172 170
173 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, 171 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
174 NVOBJ_FLAG_ZERO_ALLOC, 172 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
175 &pfifo->playlist[0]);
176 if (ret) {
177 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
178 return ret;
179 }
180 173
181 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, 174 /* clean up */
182 NVOBJ_FLAG_ZERO_ALLOC, 175 if (chan->user) {
183 &pfifo->playlist[1]); 176 iounmap(chan->user);
184 if (ret) { 177 chan->user = NULL;
185 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
186 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
187 return ret;
188 } 178 }
189 179
190just_reset: 180 atomic_dec(&chan->vm->engref[engine]);
191 nv50_fifo_init_reset(dev); 181 chan->engctx[engine] = NULL;
192 nv50_fifo_init_intr(dev); 182 kfree(fctx);
193 nv50_fifo_init_context_table(dev);
194 nv50_fifo_init_regs__nv(dev);
195 nv50_fifo_init_regs(dev);
196 dev_priv->engine.fifo.enable(dev);
197 dev_priv->engine.fifo.reassign(dev, true);
198
199 return 0;
200} 183}
201 184
202void 185static int
203nv50_fifo_takedown(struct drm_device *dev) 186nv50_fifo_init(struct drm_device *dev, int engine)
204{ 187{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 189 u32 instance;
190 int i;
207 191
208 NV_DEBUG(dev, "\n"); 192 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
193 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
194 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
195 nv_wr32(dev, 0x002044, 0x01003fff);
209 196
210 if (!pfifo->playlist[0]) 197 nv_wr32(dev, 0x002100, 0xffffffff);
211 return; 198 nv_wr32(dev, 0x002140, 0xffffffff);
212 199
213 nv_wr32(dev, 0x2140, 0x00000000); 200 for (i = 0; i < 128; i++) {
214 nouveau_irq_unregister(dev, 8); 201 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan && chan->engctx[engine])
203 instance = 0x80000000 | chan->ramin->vinst >> 12;
204 else
205 instance = 0x00000000;
206 nv_wr32(dev, 0x002600 + (i * 4), instance);
207 }
215 208
216 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); 209 nv50_fifo_playlist_update(dev);
217 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
218}
219 210
220int 211 nv_wr32(dev, 0x003200, 1);
221nv50_fifo_channel_id(struct drm_device *dev) 212 nv_wr32(dev, 0x003250, 1);
222{ 213 nv_wr32(dev, 0x002500, 1);
223 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 214 return 0;
224 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
225} 215}
226 216
227int 217static int
228nv50_fifo_create_context(struct nouveau_channel *chan) 218nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
229{ 219{
230 struct drm_device *dev = chan->dev;
231 struct drm_nouveau_private *dev_priv = dev->dev_private; 220 struct drm_nouveau_private *dev_priv = dev->dev_private;
232 struct nouveau_gpuobj *ramfc = NULL; 221 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
233 uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; 222 int i;
234 unsigned long flags;
235 int ret;
236
237 NV_DEBUG(dev, "ch%d\n", chan->id);
238
239 if (dev_priv->chipset == 0x50) {
240 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
241 chan->ramin->vinst, 0x100,
242 NVOBJ_FLAG_ZERO_ALLOC |
243 NVOBJ_FLAG_ZERO_FREE,
244 &chan->ramfc);
245 if (ret)
246 return ret;
247
248 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
249 chan->ramin->vinst + 0x0400,
250 4096, 0, &chan->cache);
251 if (ret)
252 return ret;
253 } else {
254 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
255 NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
257 if (ret)
258 return ret;
259
260 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
261 0, &chan->cache);
262 if (ret)
263 return ret;
264 }
265 ramfc = chan->ramfc;
266 223
267 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 224 /* set playlist length to zero, fifo will unload context */
268 NV50_USER(chan->id), PAGE_SIZE); 225 nv_wr32(dev, 0x0032ec, 0);
269 if (!chan->user)
270 return -ENOMEM;
271 226
272 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 227 /* tell all connected engines to unload their contexts */
273 228 for (i = 0; i < priv->base.channels; i++) {
274 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); 229 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
275 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 230 if (chan && !nv50_fifo_kickoff(chan))
276 (4 << 24) /* SEARCH_FULL */ | 231 return -EBUSY;
277 (chan->ramht->gpuobj->cinst >> 4));
278 nv_wo32(ramfc, 0x44, 0x01003fff);
279 nv_wo32(ramfc, 0x60, 0x7fffffff);
280 nv_wo32(ramfc, 0x40, 0x00000000);
281 nv_wo32(ramfc, 0x7c, 0x30000001);
282 nv_wo32(ramfc, 0x78, 0x00000000);
283 nv_wo32(ramfc, 0x3c, 0x403f6078);
284 nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
285 nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
286 drm_order(chan->dma.ib_max + 1) << 16);
287
288 if (dev_priv->chipset != 0x50) {
289 nv_wo32(chan->ramin, 0, chan->id);
290 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
291
292 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
293 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
294 } 232 }
295 233
296 dev_priv->engine.instmem.flush(dev); 234 nv_wr32(dev, 0x002140, 0);
297
298 nv50_fifo_channel_enable(dev, chan->id);
299 nv50_fifo_playlist_update(dev);
300 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
301 return 0; 235 return 0;
302} 236}
303 237
304void 238void
305nv50_fifo_destroy_context(struct nouveau_channel *chan) 239nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
306{ 240{
307 struct drm_device *dev = chan->dev; 241 nv50_vm_flush_engine(dev, 5);
308 struct drm_nouveau_private *dev_priv = dev->dev_private;
309 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
310 struct nouveau_gpuobj *ramfc = NULL;
311 unsigned long flags;
312
313 NV_DEBUG(dev, "ch%d\n", chan->id);
314
315 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
316 pfifo->reassign(dev, false);
317
318 /* Unload the context if it's the currently active one */
319 if (pfifo->channel_id(dev) == chan->id) {
320 pfifo->disable(dev);
321 pfifo->unload_context(dev);
322 pfifo->enable(dev);
323 }
324
325 /* This will ensure the channel is seen as disabled. */
326 nouveau_gpuobj_ref(chan->ramfc, &ramfc);
327 nouveau_gpuobj_ref(NULL, &chan->ramfc);
328 nv50_fifo_channel_disable(dev, chan->id);
329
330 /* Dummy channel, also used on ch 127 */
331 if (chan->id == 0)
332 nv50_fifo_channel_disable(dev, 127);
333 nv50_fifo_playlist_update(dev);
334
335 pfifo->reassign(dev, true);
336 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
337
338 /* Free the channel resources */
339 if (chan->user) {
340 iounmap(chan->user);
341 chan->user = NULL;
342 }
343 nouveau_gpuobj_ref(NULL, &ramfc);
344 nouveau_gpuobj_ref(NULL, &chan->cache);
345} 242}
346 243
347int 244void
348nv50_fifo_load_context(struct nouveau_channel *chan) 245nv50_fifo_destroy(struct drm_device *dev, int engine)
349{ 246{
350 struct drm_device *dev = chan->dev;
351 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct drm_nouveau_private *dev_priv = dev->dev_private;
352 struct nouveau_gpuobj *ramfc = chan->ramfc; 248 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
353 struct nouveau_gpuobj *cache = chan->cache;
354 int ptr, cnt;
355
356 NV_DEBUG(dev, "ch%d\n", chan->id);
357
358 nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
359 nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
360 nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
361 nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
362 nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
363 nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
364 nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
365 nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
366 nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
367 nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
368 nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
369 nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
370 nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
371 nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
372 nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
373 nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
374 nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
375 nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
376 nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
377 nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
378 nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
379 nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
380 nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
381 nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
382 nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
383 nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
384 nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
385 nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
386 nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
387 nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
388 nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
389 nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
390 nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
391
392 cnt = nv_ro32(ramfc, 0x84);
393 for (ptr = 0; ptr < cnt; ptr++) {
394 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
395 nv_ro32(cache, (ptr * 8) + 0));
396 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
397 nv_ro32(cache, (ptr * 8) + 4));
398 }
399 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
400 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
401
402 /* guessing that all the 0x34xx regs aren't on NV50 */
403 if (dev_priv->chipset != 0x50) {
404 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
405 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
406 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
407 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
408 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
409 }
410 249
411 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 250 nouveau_irq_unregister(dev, 8);
412 return 0; 251
252 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
253 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
254
255 dev_priv->eng[engine] = NULL;
256 kfree(priv);
413} 257}
414 258
415int 259int
416nv50_fifo_unload_context(struct drm_device *dev) 260nv50_fifo_create(struct drm_device *dev)
417{ 261{
418 struct drm_nouveau_private *dev_priv = dev->dev_private; 262 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 263 struct nv50_fifo_priv *priv;
420 struct nouveau_gpuobj *ramfc, *cache; 264 int ret;
421 struct nouveau_channel *chan = NULL;
422 int chid, get, put, ptr;
423
424 NV_DEBUG(dev, "\n");
425
426 chid = pfifo->channel_id(dev);
427 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
428 return 0;
429
430 chan = dev_priv->channels.ptr[chid];
431 if (!chan) {
432 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
433 return -EINVAL;
434 }
435 NV_DEBUG(dev, "ch%d\n", chan->id);
436 ramfc = chan->ramfc;
437 cache = chan->cache;
438
439 nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
440 nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
441 nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
442 nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
443 nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
444 nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
445 nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
446 nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
447 nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
448 nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
449 nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
450 nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
451 nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
452 nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
453 nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
454 nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
455 nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
456 nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
457 nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
458 nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
459 nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
460 nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
461 nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
462 nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
463 nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
464 nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
465 nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
466 nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
467 nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
468 nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
469 nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
470 nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
471 nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
472
473 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
474 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
475 ptr = 0;
476 while (put != get) {
477 nv_wo32(cache, ptr + 0,
478 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
479 nv_wo32(cache, ptr + 4,
480 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
481 get = (get + 1) & 0x1ff;
482 ptr += 8;
483 }
484
485 /* guessing that all the 0x34xx regs aren't on NV50 */
486 if (dev_priv->chipset != 0x50) {
487 nv_wo32(ramfc, 0x84, ptr >> 3);
488 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
489 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
490 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
491 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
492 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
493 }
494 265
495 dev_priv->engine.instmem.flush(dev); 266 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
267 if (!priv)
268 return -ENOMEM;
496 269
497 /*XXX: probably reload ch127 (NULL) state back too */ 270 priv->base.base.destroy = nv50_fifo_destroy;
498 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); 271 priv->base.base.init = nv50_fifo_init;
499 return 0; 272 priv->base.base.fini = nv50_fifo_fini;
500} 273 priv->base.base.context_new = nv50_fifo_context_new;
274 priv->base.base.context_del = nv50_fifo_context_del;
275 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
276 priv->base.channels = 127;
277 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
278
279 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
280 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
281 if (ret)
282 goto error;
283
284 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
285 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
286 if (ret)
287 goto error;
501 288
502void 289 nouveau_irq_register(dev, 8, nv04_fifo_isr);
503nv50_fifo_tlb_flush(struct drm_device *dev) 290error:
504{ 291 if (ret)
505 nv50_vm_flush_engine(dev, 5); 292 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
293 return ret;
506} 294}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711a918d..d9cc2f2638d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,8 +27,8 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
31#include "nouveau_grctx.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_vm.h" 33#include "nouveau_vm.h"
34#include "nv50_evo.h" 34#include "nv50_evo.h"
@@ -40,86 +40,6 @@ struct nv50_graph_engine {
40 u32 grctx_size; 40 u32 grctx_size;
41}; 41};
42 42
43static void
44nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
45{
46 const uint32_t mask = 0x00010001;
47
48 if (enabled)
49 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
50 else
51 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
52}
53
54static struct nouveau_channel *
55nv50_graph_channel(struct drm_device *dev)
56{
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 uint32_t inst;
59 int i;
60
61 /* Be sure we're not in the middle of a context switch or bad things
62 * will happen, such as unloading the wrong pgraph context.
63 */
64 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
65 NV_ERROR(dev, "Ctxprog is still running\n");
66
67 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
68 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
69 return NULL;
70 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
71
72 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
73 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
74
75 if (chan && chan->ramin && chan->ramin->vinst == inst)
76 return chan;
77 }
78
79 return NULL;
80}
81
82static int
83nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
84{
85 uint32_t fifo = nv_rd32(dev, 0x400500);
86
87 nv_wr32(dev, 0x400500, fifo & ~1);
88 nv_wr32(dev, 0x400784, inst);
89 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
90 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
91 nv_wr32(dev, 0x400040, 0xffffffff);
92 (void)nv_rd32(dev, 0x400040);
93 nv_wr32(dev, 0x400040, 0x00000000);
94 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
95
96 if (nouveau_wait_for_idle(dev))
97 nv_wr32(dev, 0x40032c, inst | (1<<31));
98 nv_wr32(dev, 0x400500, fifo);
99
100 return 0;
101}
102
103static int
104nv50_graph_unload_context(struct drm_device *dev)
105{
106 uint32_t inst;
107
108 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
109 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
110 return 0;
111 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
112
113 nouveau_wait_for_idle(dev);
114 nv_wr32(dev, 0x400784, inst);
115 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
116 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
117 nouveau_wait_for_idle(dev);
118
119 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
120 return 0;
121}
122
123static int 43static int
124nv50_graph_init(struct drm_device *dev, int engine) 44nv50_graph_init(struct drm_device *dev, int engine)
125{ 45{
@@ -211,12 +131,6 @@ nv50_graph_init(struct drm_device *dev, int engine)
211static int 131static int
212nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) 132nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
213{ 133{
214 nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
215 if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
216 nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
217 return -EBUSY;
218 }
219 nv50_graph_unload_context(dev);
220 nv_wr32(dev, 0x40013c, 0x00000000); 134 nv_wr32(dev, 0x40013c, 0x00000000);
221 return 0; 135 return 0;
222} 136}
@@ -229,7 +143,6 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
229 struct nouveau_gpuobj *ramin = chan->ramin; 143 struct nouveau_gpuobj *ramin = chan->ramin;
230 struct nouveau_gpuobj *grctx = NULL; 144 struct nouveau_gpuobj *grctx = NULL;
231 struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 145 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
232 struct nouveau_grctx ctx = {};
233 int hdr, ret; 146 int hdr, ret;
234 147
235 NV_DEBUG(dev, "ch%d\n", chan->id); 148 NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -248,11 +161,7 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
248 nv_wo32(ramin, hdr + 0x10, 0); 161 nv_wo32(ramin, hdr + 0x10, 0);
249 nv_wo32(ramin, hdr + 0x14, 0x00010000); 162 nv_wo32(ramin, hdr + 0x14, 0x00010000);
250 163
251 ctx.dev = chan->dev; 164 nv50_grctx_fill(dev, grctx);
252 ctx.mode = NOUVEAU_GRCTX_VALS;
253 ctx.data = grctx;
254 nv50_grctx_init(&ctx);
255
256 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); 165 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
257 166
258 dev_priv->engine.instmem.flush(dev); 167 dev_priv->engine.instmem.flush(dev);
@@ -268,33 +177,14 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
268 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 177 struct nouveau_gpuobj *grctx = chan->engctx[engine];
269 struct drm_device *dev = chan->dev; 178 struct drm_device *dev = chan->dev;
270 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
272 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 180 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
273 unsigned long flags;
274
275 NV_DEBUG(dev, "ch%d\n", chan->id);
276
277 if (!chan->ramin)
278 return;
279
280 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
281 pfifo->reassign(dev, false);
282 nv50_graph_fifo_access(dev, false);
283
284 if (nv50_graph_channel(dev) == chan)
285 nv50_graph_unload_context(dev);
286 181
287 for (i = hdr; i < hdr + 24; i += 4) 182 for (i = hdr; i < hdr + 24; i += 4)
288 nv_wo32(chan->ramin, i, 0); 183 nv_wo32(chan->ramin, i, 0);
289 dev_priv->engine.instmem.flush(dev); 184 dev_priv->engine.instmem.flush(dev);
290 185
291 nv50_graph_fifo_access(dev, true);
292 pfifo->reassign(dev, true);
293 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
294
295 nouveau_gpuobj_ref(NULL, &grctx);
296
297 atomic_dec(&chan->vm->engref[engine]); 186 atomic_dec(&chan->vm->engref[engine]);
187 nouveau_gpuobj_ref(NULL, &grctx);
298 chan->engctx[engine] = NULL; 188 chan->engctx[engine] = NULL;
299} 189}
300 190
@@ -325,85 +215,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
325} 215}
326 216
327static void 217static void
328nv50_graph_context_switch(struct drm_device *dev)
329{
330 uint32_t inst;
331
332 nv50_graph_unload_context(dev);
333
334 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
335 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
336 nv50_graph_do_load_context(dev, inst);
337
338 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
339 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
340}
341
342static int
343nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
344 u32 class, u32 mthd, u32 data)
345{
346 struct nouveau_gpuobj *gpuobj;
347
348 gpuobj = nouveau_ramht_find(chan, data);
349 if (!gpuobj)
350 return -ENOENT;
351
352 if (nouveau_notifier_offset(gpuobj, NULL))
353 return -EINVAL;
354
355 chan->nvsw.vblsem = gpuobj;
356 chan->nvsw.vblsem_offset = ~0;
357 return 0;
358}
359
360static int
361nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
362 u32 class, u32 mthd, u32 data)
363{
364 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
365 return -ERANGE;
366
367 chan->nvsw.vblsem_offset = data >> 2;
368 return 0;
369}
370
371static int
372nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
373 u32 class, u32 mthd, u32 data)
374{
375 chan->nvsw.vblsem_rval = data;
376 return 0;
377}
378
379static int
380nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
381 u32 class, u32 mthd, u32 data)
382{
383 struct drm_device *dev = chan->dev;
384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385
386 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
387 return -EINVAL;
388
389 drm_vblank_get(dev, data);
390
391 chan->nvsw.vblsem_head = data;
392 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
393
394 return 0;
395}
396
397static int
398nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
399 u32 class, u32 mthd, u32 data)
400{
401 nouveau_finish_page_flip(chan, NULL);
402 return 0;
403}
404
405
406static void
407nv50_graph_tlb_flush(struct drm_device *dev, int engine) 218nv50_graph_tlb_flush(struct drm_device *dev, int engine)
408{ 219{
409 nv50_vm_flush_engine(dev, 0); 220 nv50_vm_flush_engine(dev, 0);
@@ -514,6 +325,7 @@ struct nouveau_enum nv50_data_error_names[] = {
514 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, 325 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
515 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, 326 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
516 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, 327 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
328 { 0x00000024, "VP_ZERO_INPUTS", NULL },
517 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, 329 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
518 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, 330 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
519 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, 331 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
@@ -900,13 +712,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
900int 712int
901nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 713nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
902{ 714{
715 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
903 struct drm_nouveau_private *dev_priv = dev->dev_private; 716 struct drm_nouveau_private *dev_priv = dev->dev_private;
904 struct nouveau_channel *chan; 717 struct nouveau_channel *chan;
905 unsigned long flags; 718 unsigned long flags;
906 int i; 719 int i;
907 720
908 spin_lock_irqsave(&dev_priv->channels.lock, flags); 721 spin_lock_irqsave(&dev_priv->channels.lock, flags);
909 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 722 for (i = 0; i < pfifo->channels; i++) {
910 chan = dev_priv->channels.ptr[i]; 723 chan = dev_priv->channels.ptr[i];
911 if (!chan || !chan->ramin) 724 if (!chan || !chan->ramin)
912 continue; 725 continue;
@@ -939,15 +752,6 @@ nv50_graph_isr(struct drm_device *dev)
939 show &= ~0x00000010; 752 show &= ~0x00000010;
940 } 753 }
941 754
942 if (stat & 0x00001000) {
943 nv_wr32(dev, 0x400500, 0x00000000);
944 nv_wr32(dev, 0x400100, 0x00001000);
945 nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
946 nv50_graph_context_switch(dev);
947 stat &= ~0x00001000;
948 show &= ~0x00001000;
949 }
950
951 show = (show && nouveau_ratelimit()) ? show : 0; 755 show = (show && nouveau_ratelimit()) ? show : 0;
952 756
953 if (show & 0x00100000) { 757 if (show & 0x00100000) {
@@ -996,28 +800,21 @@ nv50_graph_create(struct drm_device *dev)
996{ 800{
997 struct drm_nouveau_private *dev_priv = dev->dev_private; 801 struct drm_nouveau_private *dev_priv = dev->dev_private;
998 struct nv50_graph_engine *pgraph; 802 struct nv50_graph_engine *pgraph;
999 struct nouveau_grctx ctx = {};
1000 int ret; 803 int ret;
1001 804
1002 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); 805 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1003 if (!pgraph) 806 if (!pgraph)
1004 return -ENOMEM; 807 return -ENOMEM;
1005 808
1006 ctx.dev = dev; 809 ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
1007 ctx.mode = NOUVEAU_GRCTX_PROG; 810 &pgraph->ctxprog_size,
1008 ctx.data = pgraph->ctxprog; 811 &pgraph->grctx_size);
1009 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1010
1011 ret = nv50_grctx_init(&ctx);
1012 if (ret) { 812 if (ret) {
1013 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); 813 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1014 kfree(pgraph); 814 kfree(pgraph);
1015 return 0; 815 return 0;
1016 } 816 }
1017 817
1018 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1019 pgraph->ctxprog_size = ctx.ctxprog_len;
1020
1021 pgraph->base.destroy = nv50_graph_destroy; 818 pgraph->base.destroy = nv50_graph_destroy;
1022 pgraph->base.init = nv50_graph_init; 819 pgraph->base.init = nv50_graph_init;
1023 pgraph->base.fini = nv50_graph_fini; 820 pgraph->base.fini = nv50_graph_fini;
@@ -1031,14 +828,6 @@ nv50_graph_create(struct drm_device *dev)
1031 828
1032 nouveau_irq_register(dev, 12, nv50_graph_isr); 829 nouveau_irq_register(dev, 12, nv50_graph_isr);
1033 830
1034 /* NVSW really doesn't live here... */
1035 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1036 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1037 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1038 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1039 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1040 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1041
1042 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 831 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1043 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 832 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1044 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ 833 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d6968566..881e22b249fc 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -172,8 +172,8 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
172 172
173/* Main function: construct the ctxprog skeleton, call the other functions. */ 173/* Main function: construct the ctxprog skeleton, call the other functions. */
174 174
175int 175static int
176nv50_grctx_init(struct nouveau_grctx *ctx) 176nv50_grctx_generate(struct nouveau_grctx *ctx)
177{ 177{
178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
179 179
@@ -210,7 +210,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
210 cp_name(ctx, cp_check_load); 210 cp_name(ctx, cp_check_load);
211 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); 211 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
212 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); 212 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
213 cp_bra (ctx, ALWAYS, TRUE, cp_exit); 213 cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
214 214
215 /* setup for context load */ 215 /* setup for context load */
216 cp_name(ctx, cp_setup_auto_load); 216 cp_name(ctx, cp_setup_auto_load);
@@ -277,6 +277,33 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
277 return 0; 277 return 0;
278} 278}
279 279
280void
281nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
282{
283 nv50_grctx_generate(&(struct nouveau_grctx) {
284 .dev = dev,
285 .mode = NOUVEAU_GRCTX_VALS,
286 .data = mem,
287 });
288}
289
290int
291nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
292{
293 struct nouveau_grctx ctx = {
294 .dev = dev,
295 .mode = NOUVEAU_GRCTX_PROG,
296 .data = data,
297 .ctxprog_max = max
298 };
299 int ret;
300
301 ret = nv50_grctx_generate(&ctx);
302 *cnt = ctx.ctxvals_pos * 4;
303 *len = ctx.ctxprog_len;
304 return ret;
305}
306
280/* 307/*
281 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which 308 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
282 * registers to save/restore and the default values for them. 309 * registers to save/restore and the default values for them.
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c94a5a6..0bba54f11800 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -83,7 +83,7 @@ nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
83 return ret; 83 return ret;
84 } 84 }
85 85
86 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); 86 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
87 if (ret) { 87 if (ret) {
88 nv50_channel_del(&chan); 88 nv50_channel_del(&chan);
89 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d180ad2..90e8ed22cfcb 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
77static void 77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) 78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{ 79{
80 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
81 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 80 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev; 81 struct drm_device *dev = chan->dev;
83 unsigned long flags; 82 int i;
84 u32 inst, i;
85
86 if (!chan->ramin)
87 return;
88
89 inst = chan->ramin->vinst >> 12;
90 inst |= 0x80000000;
91
92 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
93 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
94 if (nv_rd32(dev, 0x00b318) == inst)
95 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
97 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
98 83
99 for (i = 0x00; i <= 0x14; i += 4) 84 for (i = 0x00; i <= 0x14; i += 4)
100 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); 85 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
86
101 nouveau_gpuobj_ref(NULL, &ctx); 87 nouveau_gpuobj_ref(NULL, &ctx);
102 chan->engctx[engine] = NULL; 88 chan->engctx[engine] = NULL;
103} 89}
@@ -162,7 +148,6 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
162static int 148static int
163nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend) 149nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{ 150{
165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 151 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000); 152 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0; 153 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
new file mode 100644
index 000000000000..114d2517d4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nv50_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv50_software_chan {
38 struct nouveau_software_chan base;
39 struct {
40 struct nouveau_gpuobj *object;
41 } vblank;
42};
43
44static int
45mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
46{
47 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
48 struct nouveau_gpuobj *gpuobj;
49
50 gpuobj = nouveau_ramht_find(chan, data);
51 if (!gpuobj)
52 return -ENOENT;
53
54 if (nouveau_notifier_offset(gpuobj, NULL))
55 return -EINVAL;
56
57 pch->vblank.object = gpuobj;
58 pch->base.vblank.offset = ~0;
59 return 0;
60}
61
62static int
63mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
64{
65 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
66
67 if (nouveau_notifier_offset(pch->vblank.object, &data))
68 return -ERANGE;
69
70 pch->base.vblank.offset = data >> 2;
71 return 0;
72}
73
74static int
75mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
76{
77 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
78 pch->base.vblank.value = data;
79 return 0;
80}
81
82static int
83mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
84{
85 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
86 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
87 struct drm_device *dev = chan->dev;
88
89 if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
90 return -EINVAL;
91
92 drm_vblank_get(dev, data);
93
94 pch->base.vblank.head = data;
95 list_add(&pch->base.vblank.list, &psw->base.vblank);
96 return 0;
97}
98
99static int
100mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
101{
102 nouveau_finish_page_flip(chan, NULL);
103 return 0;
104}
105
106static int
107nv50_software_context_new(struct nouveau_channel *chan, int engine)
108{
109 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
110 struct nv50_display *pdisp = nv50_display(chan->dev);
111 struct nv50_software_chan *pch;
112 int ret = 0, i;
113
114 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
115 if (!pch)
116 return -ENOMEM;
117
118 nouveau_software_context_new(&pch->base);
119 pch->base.vblank.bo = chan->notifier_bo;
120 chan->engctx[engine] = pch;
121
122 /* dma objects for display sync channel semaphore blocks */
123 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
124 struct nv50_display_crtc *dispc = &pdisp->crtc[i];
125 struct nouveau_gpuobj *obj = NULL;
126
127 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
128 dispc->sem.bo->bo.offset, 0x1000,
129 NV_MEM_ACCESS_RW,
130 NV_MEM_TARGET_VRAM, &obj);
131 if (ret)
132 break;
133
134 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
135 nouveau_gpuobj_ref(NULL, &obj);
136 }
137
138 if (ret)
139 psw->base.base.context_del(chan, engine);
140 return ret;
141}
142
143static void
144nv50_software_context_del(struct nouveau_channel *chan, int engine)
145{
146 struct nv50_software_chan *pch = chan->engctx[engine];
147 chan->engctx[engine] = NULL;
148 kfree(pch);
149}
150
151static int
152nv50_software_object_new(struct nouveau_channel *chan, int engine,
153 u32 handle, u16 class)
154{
155 struct drm_device *dev = chan->dev;
156 struct nouveau_gpuobj *obj = NULL;
157 int ret;
158
159 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
160 if (ret)
161 return ret;
162 obj->engine = 0;
163 obj->class = class;
164
165 ret = nouveau_ramht_insert(chan, handle, obj);
166 nouveau_gpuobj_ref(NULL, &obj);
167 return ret;
168}
169
170static int
171nv50_software_init(struct drm_device *dev, int engine)
172{
173 return 0;
174}
175
176static int
177nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
178{
179 return 0;
180}
181
182static void
183nv50_software_destroy(struct drm_device *dev, int engine)
184{
185 struct nv50_software_priv *psw = nv_engine(dev, engine);
186
187 NVOBJ_ENGINE_DEL(dev, SW);
188 kfree(psw);
189}
190
191int
192nv50_software_create(struct drm_device *dev)
193{
194 struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
195 if (!psw)
196 return -ENOMEM;
197
198 psw->base.base.destroy = nv50_software_destroy;
199 psw->base.base.init = nv50_software_init;
200 psw->base.base.fini = nv50_software_fini;
201 psw->base.base.context_new = nv50_software_context_new;
202 psw->base.base.context_del = nv50_software_context_del;
203 psw->base.base.object_new = nv50_software_object_new;
204 nouveau_software_create(&psw->base);
205
206 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
207 NVOBJ_CLASS(dev, 0x506e, SW);
208 NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
209 NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
210 NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
211 NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
212 NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
213 return 0;
214}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 274640212475..a9514eaa74c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -242,9 +242,9 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
242 NV_ERROR(dev, "no space while disconnecting SOR\n"); 242 NV_ERROR(dev, "no space while disconnecting SOR\n");
243 return; 243 return;
244 } 244 }
245 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 245 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
246 OUT_RING (evo, 0); 246 OUT_RING (evo, 0);
247 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 247 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
248 OUT_RING (evo, 0); 248 OUT_RING (evo, 0);
249 249
250 nouveau_hdmi_mode_set(encoder, NULL); 250 nouveau_hdmi_mode_set(encoder, NULL);
@@ -430,7 +430,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
430 nv_encoder->crtc = NULL; 430 nv_encoder->crtc = NULL;
431 return; 431 return;
432 } 432 }
433 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 433 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
434 OUT_RING(evo, mode_ctl); 434 OUT_RING(evo, mode_ctl);
435} 435}
436 436
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9c7d93..179bb42a635c 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
147{ 147{
148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
151 int i; 150 int i;
152 151
153 pinstmem->flush(vm->dev); 152 pinstmem->flush(vm->dev);
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
158 return; 157 return;
159 } 158 }
160 159
161 pfifo->tlb_flush(vm->dev);
162 for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 160 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
163 if (atomic_read(&vm->engref[i])) 161 if (atomic_read(&vm->engref[i]))
164 dev_priv->eng[i]->tlb_flush(vm->dev, i); 162 dev_priv->eng[i]->tlb_flush(vm->dev, i);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..c2f889b0d340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h"
31
32struct nv84_fence_chan {
33 struct nouveau_fence_chan base;
34};
35
36struct nv84_fence_priv {
37 struct nouveau_fence_priv base;
38 struct nouveau_gpuobj *mem;
39};
40
41static int
42nv84_fence_emit(struct nouveau_fence *fence)
43{
44 struct nouveau_channel *chan = fence->channel;
45 int ret = RING_SPACE(chan, 7);
46 if (ret == 0) {
47 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
48 OUT_RING (chan, NvSema);
49 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
50 OUT_RING (chan, upper_32_bits(chan->id * 16));
51 OUT_RING (chan, lower_32_bits(chan->id * 16));
52 OUT_RING (chan, fence->sequence);
53 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
54 FIRE_RING (chan);
55 }
56 return ret;
57}
58
59
60static int
61nv84_fence_sync(struct nouveau_fence *fence,
62 struct nouveau_channel *prev, struct nouveau_channel *chan)
63{
64 int ret = RING_SPACE(chan, 7);
65 if (ret == 0) {
66 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
67 OUT_RING (chan, NvSema);
68 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
69 OUT_RING (chan, upper_32_bits(prev->id * 16));
70 OUT_RING (chan, lower_32_bits(prev->id * 16));
71 OUT_RING (chan, fence->sequence);
72 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
73 FIRE_RING (chan);
74 }
75 return ret;
76}
77
78static u32
79nv84_fence_read(struct nouveau_channel *chan)
80{
81 struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
82 return nv_ro32(priv->mem, chan->id * 16);
83}
84
85static void
86nv84_fence_context_del(struct nouveau_channel *chan, int engine)
87{
88 struct nv84_fence_chan *fctx = chan->engctx[engine];
89 nouveau_fence_context_del(&fctx->base);
90 chan->engctx[engine] = NULL;
91 kfree(fctx);
92}
93
94static int
95nv84_fence_context_new(struct nouveau_channel *chan, int engine)
96{
97 struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
98 struct nv84_fence_chan *fctx;
99 struct nouveau_gpuobj *obj;
100 int ret;
101
102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
103 if (!fctx)
104 return -ENOMEM;
105
106 nouveau_fence_context_new(&fctx->base);
107
108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
109 priv->mem->vinst, priv->mem->size,
110 NV_MEM_ACCESS_RW,
111 NV_MEM_TARGET_VRAM, &obj);
112 if (ret == 0) {
113 ret = nouveau_ramht_insert(chan, NvSema, obj);
114 nouveau_gpuobj_ref(NULL, &obj);
115 nv_wo32(priv->mem, chan->id * 16, 0x00000000);
116 }
117
118 if (ret)
119 nv84_fence_context_del(chan, engine);
120 return ret;
121}
122
123static int
124nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
125{
126 return 0;
127}
128
129static int
130nv84_fence_init(struct drm_device *dev, int engine)
131{
132 return 0;
133}
134
135static void
136nv84_fence_destroy(struct drm_device *dev, int engine)
137{
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nv84_fence_priv *priv = nv_engine(dev, engine);
140
141 nouveau_gpuobj_ref(NULL, &priv->mem);
142 dev_priv->eng[engine] = NULL;
143 kfree(priv);
144}
145
146int
147nv84_fence_create(struct drm_device *dev)
148{
149 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nv84_fence_priv *priv;
152 int ret;
153
154 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
155 if (!priv)
156 return -ENOMEM;
157
158 priv->base.engine.destroy = nv84_fence_destroy;
159 priv->base.engine.init = nv84_fence_init;
160 priv->base.engine.fini = nv84_fence_fini;
161 priv->base.engine.context_new = nv84_fence_context_new;
162 priv->base.engine.context_del = nv84_fence_context_del;
163 priv->base.emit = nv84_fence_emit;
164 priv->base.sync = nv84_fence_sync;
165 priv->base.read = nv84_fence_read;
166 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
167
168 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
169 0x1000, 0, &priv->mem);
170 if (ret)
171 goto out;
172
173out:
174 if (ret)
175 nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
176 return ret;
177}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
new file mode 100644
index 000000000000..cc82d799fc3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -0,0 +1,241 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_vm.h"
33
34struct nv84_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv84_fifo_chan {
41 struct nouveau_fifo_chan base;
42 struct nouveau_gpuobj *ramfc;
43 struct nouveau_gpuobj *cache;
44};
45
46static int
47nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
50 struct nv84_fifo_chan *fctx;
51 struct drm_device *dev = chan->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
54 u64 instance;
55 unsigned long flags;
56 int ret;
57
58 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
59 if (!fctx)
60 return -ENOMEM;
61 atomic_inc(&chan->vm->engref[engine]);
62
63 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
64 NV50_USER(chan->id), PAGE_SIZE);
65 if (!chan->user) {
66 ret = -ENOMEM;
67 goto error;
68 }
69
70 ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
71 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
72 if (ret)
73 goto error;
74
75 instance = fctx->ramfc->vinst >> 8;
76
77 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
78 if (ret)
79 goto error;
80
81 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
82 nv_wo32(fctx->ramfc, 0x40, 0x00000000);
83 nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
84 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
85 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
86 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
87 drm_order(chan->dma.ib_max + 1) << 16);
88 nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
89 nv_wo32(fctx->ramfc, 0x78, 0x00000000);
90 nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
91 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
92 (4 << 24) /* SEARCH_FULL */ |
93 (chan->ramht->gpuobj->cinst >> 4));
94 nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
95 nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
96
97 nv_wo32(chan->ramin, 0x00, chan->id);
98 nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
99
100 dev_priv->engine.instmem.flush(dev);
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
104 nv50_fifo_playlist_update(dev);
105 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
106
107error:
108 if (ret)
109 priv->base.base.context_del(chan, engine);
110 return ret;
111}
112
113static void
114nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
115{
116 struct nv84_fifo_chan *fctx = chan->engctx[engine];
117 struct drm_device *dev = chan->dev;
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 unsigned long flags;
120
121 /* remove channel from playlist, will context switch if active */
122 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
123 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
124 nv50_fifo_playlist_update(dev);
125
126 /* tell any engines on this channel to unload their contexts */
127 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
128 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
129 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
130
131 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
132 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
133
134 /* clean up */
135 if (chan->user) {
136 iounmap(chan->user);
137 chan->user = NULL;
138 }
139
140 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
141 nouveau_gpuobj_ref(NULL, &fctx->cache);
142
143 atomic_dec(&chan->vm->engref[engine]);
144 chan->engctx[engine] = NULL;
145 kfree(fctx);
146}
147
148static int
149nv84_fifo_init(struct drm_device *dev, int engine)
150{
151 struct drm_nouveau_private *dev_priv = dev->dev_private;
152 struct nv84_fifo_chan *fctx;
153 u32 instance;
154 int i;
155
156 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
157 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
158 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
159 nv_wr32(dev, 0x002044, 0x01003fff);
160
161 nv_wr32(dev, 0x002100, 0xffffffff);
162 nv_wr32(dev, 0x002140, 0xffffffff);
163
164 for (i = 0; i < 128; i++) {
165 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
166 if (chan && (fctx = chan->engctx[engine]))
167 instance = 0x80000000 | fctx->ramfc->vinst >> 8;
168 else
169 instance = 0x00000000;
170 nv_wr32(dev, 0x002600 + (i * 4), instance);
171 }
172
173 nv50_fifo_playlist_update(dev);
174
175 nv_wr32(dev, 0x003200, 1);
176 nv_wr32(dev, 0x003250, 1);
177 nv_wr32(dev, 0x002500, 1);
178 return 0;
179}
180
181static int
182nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
183{
184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nv84_fifo_priv *priv = nv_engine(dev, engine);
186 int i;
187
188 /* set playlist length to zero, fifo will unload context */
189 nv_wr32(dev, 0x0032ec, 0);
190
191 /* tell all connected engines to unload their contexts */
192 for (i = 0; i < priv->base.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
194 if (chan)
195 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
196 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
197 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
198 return -EBUSY;
199 }
200 }
201
202 nv_wr32(dev, 0x002140, 0);
203 return 0;
204}
205
206int
207nv84_fifo_create(struct drm_device *dev)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nv84_fifo_priv *priv;
211 int ret;
212
213 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214 if (!priv)
215 return -ENOMEM;
216
217 priv->base.base.destroy = nv50_fifo_destroy;
218 priv->base.base.init = nv84_fifo_init;
219 priv->base.base.fini = nv84_fifo_fini;
220 priv->base.base.context_new = nv84_fifo_context_new;
221 priv->base.base.context_del = nv84_fifo_context_del;
222 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
223 priv->base.channels = 127;
224 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
225
226 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
227 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
228 if (ret)
229 goto error;
230
231 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
232 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
233 if (ret)
234 goto error;
235
236 nouveau_irq_register(dev, 8, nv04_fifo_isr);
237error:
238 if (ret)
239 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
240 return ret;
241}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
index db94ff0a9fab..e25e13fb894e 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -23,21 +23,93 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26
26#include "nouveau_drv.h" 27#include "nouveau_drv.h"
27#include "nouveau_util.h" 28#include "nouveau_util.h"
28#include "nouveau_vm.h" 29#include "nouveau_vm.h"
29#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
30 31
31struct nv98_crypt_engine { 32#include "nv98_crypt.fuc.h"
33
34struct nv98_crypt_priv {
32 struct nouveau_exec_engine base; 35 struct nouveau_exec_engine base;
33}; 36};
34 37
38struct nv98_crypt_chan {
39 struct nouveau_gpuobj *mem;
40};
41
35static int 42static int
36nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend) 43nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
44{
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
48 struct nv98_crypt_chan *cctx;
49 int ret;
50
51 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
52 if (!cctx)
53 return -ENOMEM;
54
55 atomic_inc(&chan->vm->engref[engine]);
56
57 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
59 if (ret)
60 goto error;
61
62 nv_wo32(chan->ramin, 0xa0, 0x00190000);
63 nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
64 nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
65 nv_wo32(chan->ramin, 0xac, 0x00000000);
66 nv_wo32(chan->ramin, 0xb0, 0x00000000);
67 nv_wo32(chan->ramin, 0xb4, 0x00000000);
68 dev_priv->engine.instmem.flush(dev);
69
70error:
71 if (ret)
72 priv->base.context_del(chan, engine);
73 return ret;
74}
75
76static void
77nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
78{
79 struct nv98_crypt_chan *cctx = chan->engctx[engine];
80 int i;
81
82 for (i = 0xa0; i < 0xb4; i += 4)
83 nv_wo32(chan->ramin, i, 0x00000000);
84
85 nouveau_gpuobj_ref(NULL, &cctx->mem);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89 kfree(cctx);
90}
91
92static int
93nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
94 u32 handle, u16 class)
37{ 95{
38 if (!(nv_rd32(dev, 0x000200) & 0x00004000)) 96 struct nv98_crypt_chan *cctx = chan->engctx[engine];
39 return 0; 97
98 /* fuc engine doesn't need an object, our ramht code does.. */
99 cctx->mem->engine = 5;
100 cctx->mem->class = class;
101 return nouveau_ramht_insert(chan, handle, cctx->mem);
102}
40 103
104static void
105nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
106{
107 nv50_vm_flush_engine(dev, 0x0a);
108}
109
110static int
111nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
112{
41 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 113 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
42 return 0; 114 return 0;
43} 115}
@@ -45,34 +117,100 @@ nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
45static int 117static int
46nv98_crypt_init(struct drm_device *dev, int engine) 118nv98_crypt_init(struct drm_device *dev, int engine)
47{ 119{
120 int i;
121
122 /* reset! */
48 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 123 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 124 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
125
126 /* wait for exit interrupt to signal */
127 nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
128 nv_wr32(dev, 0x087004, 0x00000010);
129
130 /* upload microcode code and data segments */
131 nv_wr32(dev, 0x087ff8, 0x00100000);
132 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
133 nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
134
135 nv_wr32(dev, 0x087ff8, 0x00000000);
136 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
137 nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
138
139 /* start it running */
140 nv_wr32(dev, 0x08710c, 0x00000000);
141 nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
142 nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
50 return 0; 143 return 0;
51} 144}
52 145
146static struct nouveau_enum nv98_crypt_isr_error_name[] = {
147 { 0x0000, "ILLEGAL_MTHD" },
148 { 0x0001, "INVALID_BITFIELD" },
149 { 0x0002, "INVALID_ENUM" },
150 { 0x0003, "QUERY" },
151 {}
152};
153
154static void
155nv98_crypt_isr(struct drm_device *dev)
156{
157 u32 disp = nv_rd32(dev, 0x08701c);
158 u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
159 u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
160 u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
161 u32 addr = nv_rd32(dev, 0x087040) >> 16;
162 u32 mthd = (addr & 0x07ff) << 2;
163 u32 subc = (addr & 0x3800) >> 11;
164 u32 data = nv_rd32(dev, 0x087044);
165 int chid = nv50_graph_isr_chid(dev, inst);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
169 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
170 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, 0x087004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, 0x087004, stat);
179 }
180
181 nv50_fb_vm_trap(dev, 1);
182}
183
53static void 184static void
54nv98_crypt_destroy(struct drm_device *dev, int engine) 185nv98_crypt_destroy(struct drm_device *dev, int engine)
55{ 186{
56 struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine); 187 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
57 188
189 nouveau_irq_unregister(dev, 14);
58 NVOBJ_ENGINE_DEL(dev, CRYPT); 190 NVOBJ_ENGINE_DEL(dev, CRYPT);
59 191 kfree(priv);
60 kfree(pcrypt);
61} 192}
62 193
63int 194int
64nv98_crypt_create(struct drm_device *dev) 195nv98_crypt_create(struct drm_device *dev)
65{ 196{
66 struct nv98_crypt_engine *pcrypt; 197 struct nv98_crypt_priv *priv;
67 198
68 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL); 199 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
69 if (!pcrypt) 200 if (!priv)
70 return -ENOMEM; 201 return -ENOMEM;
71 202
72 pcrypt->base.destroy = nv98_crypt_destroy; 203 priv->base.destroy = nv98_crypt_destroy;
73 pcrypt->base.init = nv98_crypt_init; 204 priv->base.init = nv98_crypt_init;
74 pcrypt->base.fini = nv98_crypt_fini; 205 priv->base.fini = nv98_crypt_fini;
206 priv->base.context_new = nv98_crypt_context_new;
207 priv->base.context_del = nv98_crypt_context_del;
208 priv->base.object_new = nv98_crypt_object_new;
209 priv->base.tlb_flush = nv98_crypt_tlb_flush;
210
211 nouveau_irq_register(dev, 14, nv98_crypt_isr);
75 212
76 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base); 213 NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
214 NVOBJ_CLASS(dev, 0x88b4, CRYPT);
77 return 0; 215 return 0;
78} 216}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
new file mode 100644
index 000000000000..7393813044de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
@@ -0,0 +1,698 @@
1/*
2 * fuc microcode for nv98 pcrypt engine
3 * Copyright (C) 2010 Marcin Kościelnicki
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20.section #nv98_pcrypt_data
21
22ctx_dma:
23ctx_dma_query: .b32 0
24ctx_dma_src: .b32 0
25ctx_dma_dst: .b32 0
26.equ #dma_count 3
27ctx_query_address_high: .b32 0
28ctx_query_address_low: .b32 0
29ctx_query_counter: .b32 0
30ctx_cond_address_high: .b32 0
31ctx_cond_address_low: .b32 0
32ctx_cond_off: .b32 0
33ctx_src_address_high: .b32 0
34ctx_src_address_low: .b32 0
35ctx_dst_address_high: .b32 0
36ctx_dst_address_low: .b32 0
37ctx_mode: .b32 0
38.align 16
39ctx_key: .skip 16
40ctx_iv: .skip 16
41
42.align 0x80
43swap:
44.skip 32
45
46.align 8
47common_cmd_dtable:
48.b32 #ctx_query_address_high + 0x20000 ~0xff
49.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
50.b32 #ctx_query_counter + 0x20000 ~0xffffffff
51.b32 #cmd_query_get + 0x00000 ~1
52.b32 #ctx_cond_address_high + 0x20000 ~0xff
53.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
54.b32 #cmd_cond_mode + 0x00000 ~7
55.b32 #cmd_wrcache_flush + 0x00000 ~0
56.equ #common_cmd_max 0x88
57
58
59.align 8
60engine_cmd_dtable:
61.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
62.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
63.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
64.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
65.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
66.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
67.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
68.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
69.b32 #ctx_src_address_high + 0x20000 ~0xff
70.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
71.b32 #ctx_dst_address_high + 0x20000 ~0xff
72.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
73.b32 #crypt_cmd_mode + 0x00000 ~0xf
74.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
75.equ #engine_cmd_max 0xce
76
77.align 4
78crypt_dtable:
79.b16 #crypt_copy_prep #crypt_do_inout
80.b16 #crypt_store_prep #crypt_do_out
81.b16 #crypt_ecb_e_prep #crypt_do_inout
82.b16 #crypt_ecb_d_prep #crypt_do_inout
83.b16 #crypt_cbc_e_prep #crypt_do_inout
84.b16 #crypt_cbc_d_prep #crypt_do_inout
85.b16 #crypt_pcbc_e_prep #crypt_do_inout
86.b16 #crypt_pcbc_d_prep #crypt_do_inout
87.b16 #crypt_cfb_e_prep #crypt_do_inout
88.b16 #crypt_cfb_d_prep #crypt_do_inout
89.b16 #crypt_ofb_prep #crypt_do_inout
90.b16 #crypt_ctr_prep #crypt_do_inout
91.b16 #crypt_cbc_mac_prep #crypt_do_in
92.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
93.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
94
95.align 0x100
96
97.section #nv98_pcrypt_code
98
99 // $r0 is always set to 0 in our code - this allows some space savings.
100 clear b32 $r0
101
102 // set up the interrupt handler
103 mov $r1 #ih
104 mov $iv0 $r1
105
106 // init stack pointer
107 mov $sp $r0
108
109 // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
110 movw $r1 0xfff0
111 sethi $r1 0
112 mov $r2 0x400
113 iowr I[$r2 + 0x300] $r1
114
115 // enable the interrupts
116 or $r1 0xc
117 iowr I[$r2] $r1
118
119 // enable fifo access and context switching
120 mov $r1 3
121 mov $r2 0x1200
122 iowr I[$r2] $r1
123
124 // enable i0 delivery
125 bset $flags ie0
126
127 // sleep forver, waking only for interrupts.
128 bset $flags $p0
129 spin:
130 sleep $p0
131 bra #spin
132
133// i0 handler
134ih:
135 // see which interrupts we got
136 iord $r1 I[$r0 + 0x200]
137
138 and $r2 $r1 0x8
139 cmpu b32 $r2 0
140 bra e #noctx
141
142 // context switch... prepare the regs for xfer
143 mov $r2 0x7700
144 mov $xtargets $r2
145 mov $xdbase $r0
146 // 128-byte context.
147 mov $r2 0
148 sethi $r2 0x50000
149
150 // read current channel
151 mov $r3 0x1400
152 iord $r4 I[$r3]
153 // if bit 30 set, it's active, so we have to unload it first.
154 shl b32 $r5 $r4 1
155 cmps b32 $r5 0
156 bra nc #ctxload
157
158 // unload the current channel - save the context
159 xdst $r0 $r2
160 xdwait
161 // and clear bit 30, then write back
162 bclr $r4 0x1e
163 iowr I[$r3] $r4
164 // tell PFIFO we unloaded
165 mov $r4 1
166 iowr I[$r3 + 0x200] $r4
167
168 bra #noctx
169
170 ctxload:
171 // no channel loaded - perhaps we're requested to load one
172 iord $r4 I[$r3 + 0x100]
173 shl b32 $r15 $r4 1
174 cmps b32 $r15 0
175 // if bit 30 of next channel not set, probably PFIFO is just
176 // killing a context. do a faux load, without the active bit.
177 bra nc #dummyload
178
179 // ok, do a real context load.
180 xdld $r0 $r2
181 xdwait
182 mov $r5 #ctx_dma
183 mov $r6 #dma_count - 1
184 ctxload_dma_loop:
185 ld b32 $r7 D[$r5 + $r6 * 4]
186 add b32 $r8 $r6 0x180
187 shl b32 $r8 8
188 iowr I[$r8] $r7
189 sub b32 $r6 1
190 bra nc #ctxload_dma_loop
191
192 dummyload:
193 // tell PFIFO we're done
194 mov $r5 2
195 iowr I[$r3 + 0x200] $r5
196
197 noctx:
198 and $r2 $r1 0x4
199 cmpu b32 $r2 0
200 bra e #nocmd
201
202 // incoming fifo command.
203 mov $r3 0x1900
204 iord $r2 I[$r3 + 0x100]
205 iord $r3 I[$r3]
206 // extract the method
207 and $r4 $r2 0x7ff
208 // shift the addr to proper position if we need to interrupt later
209 shl b32 $r2 0x10
210
211 // mthd 0 and 0x100 [NAME, NOP]: ignore
212 and $r5 $r4 0x7bf
213 cmpu b32 $r5 0
214 bra e #cmddone
215
216 mov $r5 #engine_cmd_dtable - 0xc0 * 8
217 mov $r6 #engine_cmd_max
218 cmpu b32 $r4 0xc0
219 bra nc #dtable_cmd
220 mov $r5 #common_cmd_dtable - 0x80 * 8
221 mov $r6 #common_cmd_max
222 cmpu b32 $r4 0x80
223 bra nc #dtable_cmd
224 cmpu b32 $r4 0x60
225 bra nc #dma_cmd
226 cmpu b32 $r4 0x50
227 bra ne #illegal_mthd
228
229 // mthd 0x140: PM_TRIGGER
230 mov $r2 0x2200
231 clear b32 $r3
232 sethi $r3 0x20000
233 iowr I[$r2] $r3
234 bra #cmddone
235
236 dma_cmd:
237 // mthd 0x180...: DMA_*
238 cmpu b32 $r4 0x60+#dma_count
239 bra nc #illegal_mthd
240 shl b32 $r5 $r4 2
241 add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
242 bset $r3 0x1e
243 st b32 D[$r5] $r3
244 add b32 $r4 0x180 - 0x60
245 shl b32 $r4 8
246 iowr I[$r4] $r3
247 bra #cmddone
248
249 dtable_cmd:
250 cmpu b32 $r4 $r6
251 bra nc #illegal_mthd
252 shl b32 $r4 3
253 add b32 $r4 $r5
254 ld b32 $r5 D[$r4 + 4]
255 and $r5 $r3
256 cmpu b32 $r5 0
257 bra ne #invalid_bitfield
258 ld b16 $r5 D[$r4]
259 ld b16 $r6 D[$r4 + 2]
260 cmpu b32 $r6 2
261 bra e #cmd_setctx
262 ld b32 $r7 D[$r0 + #ctx_cond_off]
263 and $r6 $r7
264 cmpu b32 $r6 1
265 bra e #cmddone
266 call $r5
267 bra $p1 #dispatch_error
268 bra #cmddone
269
270 cmd_setctx:
271 st b32 D[$r5] $r3
272 bra #cmddone
273
274
275 invalid_bitfield:
276 or $r2 1
277 dispatch_error:
278 illegal_mthd:
279 mov $r4 0x1000
280 iowr I[$r4] $r2
281 iowr I[$r4 + 0x100] $r3
282 mov $r4 0x40
283 iowr I[$r0] $r4
284
285 im_loop:
286 iord $r4 I[$r0 + 0x200]
287 and $r4 0x40
288 cmpu b32 $r4 0
289 bra ne #im_loop
290
291 cmddone:
292 // remove the command from FIFO
293 mov $r3 0x1d00
294 mov $r4 1
295 iowr I[$r3] $r4
296
297 nocmd:
298 // ack the processed interrupts
299 and $r1 $r1 0xc
300 iowr I[$r0 + 0x100] $r1
301iret
302
303cmd_query_get:
304 // if bit 0 of param set, trigger interrupt afterwards.
305 setp $p1 $r3
306 or $r2 3
307
308 // read PTIMER, beware of races...
309 mov $r4 0xb00
310 ptimer_retry:
311 iord $r6 I[$r4 + 0x100]
312 iord $r5 I[$r4]
313 iord $r7 I[$r4 + 0x100]
314 cmpu b32 $r6 $r7
315 bra ne #ptimer_retry
316
317 // prepare the query structure
318 ld b32 $r4 D[$r0 + #ctx_query_counter]
319 st b32 D[$r0 + #swap + 0x0] $r4
320 st b32 D[$r0 + #swap + 0x4] $r0
321 st b32 D[$r0 + #swap + 0x8] $r5
322 st b32 D[$r0 + #swap + 0xc] $r6
323
324 // will use target 0, DMA_QUERY.
325 mov $xtargets $r0
326
327 ld b32 $r4 D[$r0 + #ctx_query_address_high]
328 shl b32 $r4 0x18
329 mov $xdbase $r4
330
331 ld b32 $r4 D[$r0 + #ctx_query_address_low]
332 mov $r5 #swap
333 sethi $r5 0x20000
334 xdst $r4 $r5
335 xdwait
336
337 ret
338
339cmd_cond_mode:
340 // if >= 5, INVALID_ENUM
341 bset $flags $p1
342 or $r2 2
343 cmpu b32 $r3 5
344 bra nc #return
345
346 // otherwise, no error.
347 bclr $flags $p1
348
349 // if < 2, no QUERY object is involved
350 cmpu b32 $r3 2
351 bra nc #cmd_cond_mode_queryful
352
353 xor $r3 1
354 st b32 D[$r0 + #ctx_cond_off] $r3
355 return:
356 ret
357
358 cmd_cond_mode_queryful:
359 // ok, will need to pull a QUERY object, prepare offsets
360 ld b32 $r4 D[$r0 + #ctx_cond_address_high]
361 ld b32 $r5 D[$r0 + #ctx_cond_address_low]
362 and $r6 $r5 0xff
363 shr b32 $r5 8
364 shl b32 $r4 0x18
365 or $r4 $r5
366 mov $xdbase $r4
367 mov $xtargets $r0
368
369 // pull the first one
370 mov $r5 #swap
371 sethi $r5 0x20000
372 xdld $r6 $r5
373
374 // if == 2, only a single QUERY is involved...
375 cmpu b32 $r3 2
376 bra ne #cmd_cond_mode_double
377
378 xdwait
379 ld b32 $r4 D[$r0 + #swap + 4]
380 cmpu b32 $r4 0
381 xbit $r4 $flags z
382 st b32 D[$r0 + #ctx_cond_off] $r4
383 ret
384
385 // ok, we'll need to pull second one too
386 cmd_cond_mode_double:
387 add b32 $r6 0x10
388 add b32 $r5 0x10
389 xdld $r6 $r5
390 xdwait
391
392 // compare COUNTERs
393 ld b32 $r5 D[$r0 + #swap + 0x00]
394 ld b32 $r6 D[$r0 + #swap + 0x10]
395 cmpu b32 $r5 $r6
396 xbit $r4 $flags z
397
398 // compare RESen
399 ld b32 $r5 D[$r0 + #swap + 0x04]
400 ld b32 $r6 D[$r0 + #swap + 0x14]
401 cmpu b32 $r5 $r6
402 xbit $r5 $flags z
403 and $r4 $r5
404
405 // and negate or not, depending on mode
406 cmpu b32 $r3 3
407 xbit $r5 $flags z
408 xor $r4 $r5
409 st b32 D[$r0 + #ctx_cond_off] $r4
410 ret
411
412cmd_wrcache_flush:
413 bclr $flags $p1
414 mov $r2 0x2200
415 clear b32 $r3
416 sethi $r3 0x10000
417 iowr I[$r2] $r3
418 ret
419
420crypt_cmd_mode:
421 // if >= 0xf, INVALID_ENUM
422 bset $flags $p1
423 or $r2 2
424 cmpu b32 $r3 0xf
425 bra nc #crypt_cmd_mode_return
426
427 bclr $flags $p1
428 st b32 D[$r0 + #ctx_mode] $r3
429
430 crypt_cmd_mode_return:
431 ret
432
433crypt_cmd_length:
434 // nop if length == 0
435 cmpu b32 $r3 0
436 bra e #crypt_cmd_mode_return
437
438 // init key, IV
439 cxset 3
440 mov $r4 #ctx_key
441 sethi $r4 0x70000
442 xdst $r0 $r4
443 mov $r4 #ctx_iv
444 sethi $r4 0x60000
445 xdst $r0 $r4
446 xdwait
447 ckeyreg $c7
448
449 // prepare the targets
450 mov $r4 0x2100
451 mov $xtargets $r4
452
453 // prepare src address
454 ld b32 $r4 D[$r0 + #ctx_src_address_high]
455 ld b32 $r5 D[$r0 + #ctx_src_address_low]
456 shr b32 $r8 $r5 8
457 shl b32 $r4 0x18
458 or $r4 $r8
459 and $r5 $r5 0xff
460
461 // prepare dst address
462 ld b32 $r6 D[$r0 + #ctx_dst_address_high]
463 ld b32 $r7 D[$r0 + #ctx_dst_address_low]
464 shr b32 $r8 $r7 8
465 shl b32 $r6 0x18
466 or $r6 $r8
467 and $r7 $r7 0xff
468
469 // find the proper prep & do functions
470 ld b32 $r8 D[$r0 + #ctx_mode]
471 shl b32 $r8 2
472
473 // run prep
474 ld b16 $r9 D[$r8 + #crypt_dtable]
475 call $r9
476
477 // do it
478 ld b16 $r9 D[$r8 + #crypt_dtable + 2]
479 call $r9
480 cxset 1
481 xdwait
482 cxset 0x61
483 xdwait
484 xdwait
485
486 // update src address
487 shr b32 $r8 $r4 0x18
488 shl b32 $r9 $r4 8
489 add b32 $r9 $r5
490 adc b32 $r8 0
491 st b32 D[$r0 + #ctx_src_address_high] $r8
492 st b32 D[$r0 + #ctx_src_address_low] $r9
493
494 // update dst address
495 shr b32 $r8 $r6 0x18
496 shl b32 $r9 $r6 8
497 add b32 $r9 $r7
498 adc b32 $r8 0
499 st b32 D[$r0 + #ctx_dst_address_high] $r8
500 st b32 D[$r0 + #ctx_dst_address_low] $r9
501
502 // pull updated IV
503 cxset 2
504 mov $r4 #ctx_iv
505 sethi $r4 0x60000
506 xdld $r0 $r4
507 xdwait
508
509 ret
510
511
512crypt_copy_prep:
513 cs0begin 2
514 cxsin $c0
515 cxsout $c0
516 ret
517
518crypt_store_prep:
519 cs0begin 1
520 cxsout $c6
521 ret
522
523crypt_ecb_e_prep:
524 cs0begin 3
525 cxsin $c0
526 cenc $c0 $c0
527 cxsout $c0
528 ret
529
530crypt_ecb_d_prep:
531 ckexp $c7 $c7
532 cs0begin 3
533 cxsin $c0
534 cdec $c0 $c0
535 cxsout $c0
536 ret
537
538crypt_cbc_e_prep:
539 cs0begin 4
540 cxsin $c0
541 cxor $c6 $c0
542 cenc $c6 $c6
543 cxsout $c6
544 ret
545
546crypt_cbc_d_prep:
547 ckexp $c7 $c7
548 cs0begin 5
549 cmov $c2 $c6
550 cxsin $c6
551 cdec $c0 $c6
552 cxor $c0 $c2
553 cxsout $c0
554 ret
555
556crypt_pcbc_e_prep:
557 cs0begin 5
558 cxsin $c0
559 cxor $c6 $c0
560 cenc $c6 $c6
561 cxsout $c6
562 cxor $c6 $c0
563 ret
564
565crypt_pcbc_d_prep:
566 ckexp $c7 $c7
567 cs0begin 5
568 cxsin $c0
569 cdec $c1 $c0
570 cxor $c6 $c1
571 cxsout $c6
572 cxor $c6 $c0
573 ret
574
575crypt_cfb_e_prep:
576 cs0begin 4
577 cenc $c6 $c6
578 cxsin $c0
579 cxor $c6 $c0
580 cxsout $c6
581 ret
582
583crypt_cfb_d_prep:
584 cs0begin 4
585 cenc $c0 $c6
586 cxsin $c6
587 cxor $c0 $c6
588 cxsout $c0
589 ret
590
591crypt_ofb_prep:
592 cs0begin 4
593 cenc $c6 $c6
594 cxsin $c0
595 cxor $c0 $c6
596 cxsout $c0
597 ret
598
599crypt_ctr_prep:
600 cs0begin 5
601 cenc $c1 $c6
602 cadd $c6 1
603 cxsin $c0
604 cxor $c0 $c1
605 cxsout $c0
606 ret
607
608crypt_cbc_mac_prep:
609 cs0begin 3
610 cxsin $c0
611 cxor $c6 $c0
612 cenc $c6 $c6
613 ret
614
615crypt_cmac_finish_complete_prep:
616 cs0begin 7
617 cxsin $c0
618 cxor $c6 $c0
619 cxor $c0 $c0
620 cenc $c0 $c0
621 cprecmac $c0 $c0
622 cxor $c6 $c0
623 cenc $c6 $c6
624 ret
625
626crypt_cmac_finish_partial_prep:
627 cs0begin 8
628 cxsin $c0
629 cxor $c6 $c0
630 cxor $c0 $c0
631 cenc $c0 $c0
632 cprecmac $c0 $c0
633 cprecmac $c0 $c0
634 cxor $c6 $c0
635 cenc $c6 $c6
636 ret
637
638// TODO
639crypt_do_in:
640 add b32 $r3 $r5
641 mov $xdbase $r4
642 mov $r9 #swap
643 sethi $r9 0x20000
644 crypt_do_in_loop:
645 xdld $r5 $r9
646 xdwait
647 cxset 0x22
648 xdst $r0 $r9
649 cs0exec 1
650 xdwait
651 add b32 $r5 0x10
652 cmpu b32 $r5 $r3
653 bra ne #crypt_do_in_loop
654 cxset 1
655 xdwait
656 ret
657
658crypt_do_out:
659 add b32 $r3 $r7
660 mov $xdbase $r6
661 mov $r9 #swap
662 sethi $r9 0x20000
663 crypt_do_out_loop:
664 cs0exec 1
665 cxset 0x61
666 xdld $r7 $r9
667 xdst $r7 $r9
668 cxset 1
669 xdwait
670 add b32 $r7 0x10
671 cmpu b32 $r7 $r3
672 bra ne #crypt_do_out_loop
673 ret
674
675crypt_do_inout:
676 add b32 $r3 $r5
677 mov $r9 #swap
678 sethi $r9 0x20000
679 crypt_do_inout_loop:
680 mov $xdbase $r4
681 xdld $r5 $r9
682 xdwait
683 cxset 0x21
684 xdst $r0 $r9
685 cs0exec 1
686 cxset 0x61
687 mov $xdbase $r6
688 xdld $r7 $r9
689 xdst $r7 $r9
690 cxset 1
691 xdwait
692 add b32 $r5 0x10
693 add b32 $r7 0x10
694 cmpu b32 $r5 $r3
695 bra ne #crypt_do_inout_loop
696 ret
697
698.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
new file mode 100644
index 000000000000..38676c74e6e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
@@ -0,0 +1,584 @@
1uint32_t nv98_pcrypt_data[] = {
2/* 0x0000: ctx_dma */
3/* 0x0000: ctx_dma_query */
4 0x00000000,
5/* 0x0004: ctx_dma_src */
6 0x00000000,
7/* 0x0008: ctx_dma_dst */
8 0x00000000,
9/* 0x000c: ctx_query_address_high */
10 0x00000000,
11/* 0x0010: ctx_query_address_low */
12 0x00000000,
13/* 0x0014: ctx_query_counter */
14 0x00000000,
15/* 0x0018: ctx_cond_address_high */
16 0x00000000,
17/* 0x001c: ctx_cond_address_low */
18 0x00000000,
19/* 0x0020: ctx_cond_off */
20 0x00000000,
21/* 0x0024: ctx_src_address_high */
22 0x00000000,
23/* 0x0028: ctx_src_address_low */
24 0x00000000,
25/* 0x002c: ctx_dst_address_high */
26 0x00000000,
27/* 0x0030: ctx_dst_address_low */
28 0x00000000,
29/* 0x0034: ctx_mode */
30 0x00000000,
31 0x00000000,
32 0x00000000,
33/* 0x0040: ctx_key */
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38/* 0x0050: ctx_iv */
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51/* 0x0080: swap */
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60/* 0x00a0: common_cmd_dtable */
61 0x0002000c,
62 0xffffff00,
63 0x00020010,
64 0x0000000f,
65 0x00020014,
66 0x00000000,
67 0x00000192,
68 0xfffffffe,
69 0x00020018,
70 0xffffff00,
71 0x0002001c,
72 0x0000000f,
73 0x000001d7,
74 0xfffffff8,
75 0x00000260,
76 0xffffffff,
77/* 0x00e0: engine_cmd_dtable */
78 0x00020040,
79 0x00000000,
80 0x00020044,
81 0x00000000,
82 0x00020048,
83 0x00000000,
84 0x0002004c,
85 0x00000000,
86 0x00020050,
87 0x00000000,
88 0x00020054,
89 0x00000000,
90 0x00020058,
91 0x00000000,
92 0x0002005c,
93 0x00000000,
94 0x00020024,
95 0xffffff00,
96 0x00020028,
97 0x0000000f,
98 0x0002002c,
99 0xffffff00,
100 0x00020030,
101 0x0000000f,
102 0x00000271,
103 0xfffffff0,
104 0x00010285,
105 0xf000000f,
106/* 0x0150: crypt_dtable */
107 0x04db0321,
108 0x04b1032f,
109 0x04db0339,
110 0x04db034b,
111 0x04db0361,
112 0x04db0377,
113 0x04db0395,
114 0x04db03af,
115 0x04db03cd,
116 0x04db03e3,
117 0x04db03f9,
118 0x04db040f,
119 0x04830429,
120 0x0483043b,
121 0x0483045d,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151};
152
153uint32_t nv98_pcrypt_code[] = {
154 0x17f004bd,
155 0x0010fe35,
156 0xf10004fe,
157 0xf0fff017,
158 0x27f10013,
159 0x21d00400,
160 0x0c15f0c0,
161 0xf00021d0,
162 0x27f10317,
163 0x21d01200,
164 0x1031f400,
165/* 0x002f: spin */
166 0xf40031f4,
167 0x0ef40028,
168/* 0x0035: ih */
169 0x8001cffd,
170 0xb00812c4,
171 0x0bf40024,
172 0x0027f167,
173 0x002bfe77,
174 0xf00007fe,
175 0x23f00027,
176 0x0037f105,
177 0x0034cf14,
178 0xb0014594,
179 0x18f40055,
180 0x0602fa17,
181 0x4af003f8,
182 0x0034d01e,
183 0xd00147f0,
184 0x0ef48034,
185/* 0x0075: ctxload */
186 0x4034cf33,
187 0xb0014f94,
188 0x18f400f5,
189 0x0502fa21,
190 0x57f003f8,
191 0x0267f000,
192/* 0x008c: ctxload_dma_loop */
193 0xa07856bc,
194 0xb6018068,
195 0x87d00884,
196 0x0162b600,
197/* 0x009f: dummyload */
198 0xf0f018f4,
199 0x35d00257,
200/* 0x00a5: noctx */
201 0x0412c480,
202 0xf50024b0,
203 0xf100df0b,
204 0xcf190037,
205 0x33cf4032,
206 0xff24e400,
207 0x1024b607,
208 0x07bf45e4,
209 0xf50054b0,
210 0xf100b90b,
211 0xf1fae057,
212 0xb000ce67,
213 0x18f4c044,
214 0xa057f14d,
215 0x8867f1fc,
216 0x8044b000,
217 0xb03f18f4,
218 0x18f46044,
219 0x5044b019,
220 0xf1741bf4,
221 0xbd220027,
222 0x0233f034,
223 0xf50023d0,
224/* 0x0103: dma_cmd */
225 0xb000810e,
226 0x18f46344,
227 0x0245945e,
228 0xfe8050b7,
229 0x801e39f0,
230 0x40b70053,
231 0x44b60120,
232 0x0043d008,
233/* 0x0123: dtable_cmd */
234 0xb8600ef4,
235 0x18f40446,
236 0x0344b63e,
237 0x980045bb,
238 0x53fd0145,
239 0x0054b004,
240 0x58291bf4,
241 0x46580045,
242 0x0264b001,
243 0x98170bf4,
244 0x67fd0807,
245 0x0164b004,
246 0xf9300bf4,
247 0x0f01f455,
248/* 0x015b: cmd_setctx */
249 0x80280ef4,
250 0x0ef40053,
251/* 0x0161: invalid_bitfield */
252 0x0125f022,
253/* 0x0164: dispatch_error */
254/* 0x0164: illegal_mthd */
255 0x100047f1,
256 0xd00042d0,
257 0x47f04043,
258 0x0004d040,
259/* 0x0174: im_loop */
260 0xf08004cf,
261 0x44b04044,
262 0xf71bf400,
263/* 0x0180: cmddone */
264 0x1d0037f1,
265 0xd00147f0,
266/* 0x018a: nocmd */
267 0x11c40034,
268 0x4001d00c,
269/* 0x0192: cmd_query_get */
270 0x38f201f8,
271 0x0325f001,
272 0x0b0047f1,
273/* 0x019c: ptimer_retry */
274 0xcf4046cf,
275 0x47cf0045,
276 0x0467b840,
277 0x98f41bf4,
278 0x04800504,
279 0x21008020,
280 0x80220580,
281 0x0bfe2306,
282 0x03049800,
283 0xfe1844b6,
284 0x04980047,
285 0x8057f104,
286 0x0253f000,
287 0xf80645fa,
288/* 0x01d7: cmd_cond_mode */
289 0xf400f803,
290 0x25f00131,
291 0x0534b002,
292 0xf41218f4,
293 0x34b00132,
294 0x0b18f402,
295 0x800136f0,
296/* 0x01f2: return */
297 0x00f80803,
298/* 0x01f4: cmd_cond_mode_queryful */
299 0x98060498,
300 0x56c40705,
301 0x0855b6ff,
302 0xfd1844b6,
303 0x47fe0545,
304 0x000bfe00,
305 0x008057f1,
306 0xfa0253f0,
307 0x34b00565,
308 0x131bf402,
309 0x049803f8,
310 0x0044b021,
311 0x800b4cf0,
312 0x00f80804,
313/* 0x022c: cmd_cond_mode_double */
314 0xb61060b6,
315 0x65fa1050,
316 0x9803f805,
317 0x06982005,
318 0x0456b824,
319 0x980b4cf0,
320 0x06982105,
321 0x0456b825,
322 0xfd0b5cf0,
323 0x34b00445,
324 0x0b5cf003,
325 0x800645fd,
326 0x00f80804,
327/* 0x0260: cmd_wrcache_flush */
328 0xf10132f4,
329 0xbd220027,
330 0x0133f034,
331 0xf80023d0,
332/* 0x0271: crypt_cmd_mode */
333 0x0131f400,
334 0xb00225f0,
335 0x18f40f34,
336 0x0132f409,
337/* 0x0283: crypt_cmd_mode_return */
338 0xf80d0380,
339/* 0x0285: crypt_cmd_length */
340 0x0034b000,
341 0xf4fb0bf4,
342 0x47f0033c,
343 0x0743f040,
344 0xf00604fa,
345 0x43f05047,
346 0x0604fa06,
347 0x3cf503f8,
348 0x47f1c407,
349 0x4bfe2100,
350 0x09049800,
351 0x950a0598,
352 0x44b60858,
353 0x0548fd18,
354 0x98ff55c4,
355 0x07980b06,
356 0x0878950c,
357 0xfd1864b6,
358 0x77c40568,
359 0x0d0898ff,
360 0x580284b6,
361 0x95f9a889,
362 0xf9a98958,
363 0x013cf495,
364 0x3cf403f8,
365 0xf803f861,
366 0x18489503,
367 0xbb084994,
368 0x81b60095,
369 0x09088000,
370 0x950a0980,
371 0x69941868,
372 0x0097bb08,
373 0x800081b6,
374 0x09800b08,
375 0x023cf40c,
376 0xf05047f0,
377 0x04fa0643,
378 0xf803f805,
379/* 0x0321: crypt_copy_prep */
380 0x203cf500,
381 0x003cf594,
382 0x003cf588,
383/* 0x032f: crypt_store_prep */
384 0xf500f88c,
385 0xf594103c,
386 0xf88c063c,
387/* 0x0339: crypt_ecb_e_prep */
388 0x303cf500,
389 0x003cf594,
390 0x003cf588,
391 0x003cf5d0,
392/* 0x034b: crypt_ecb_d_prep */
393 0xf500f88c,
394 0xf5c8773c,
395 0xf594303c,
396 0xf588003c,
397 0xf5d4003c,
398 0xf88c003c,
399/* 0x0361: crypt_cbc_e_prep */
400 0x403cf500,
401 0x003cf594,
402 0x063cf588,
403 0x663cf5ac,
404 0x063cf5d0,
405/* 0x0377: crypt_cbc_d_prep */
406 0xf500f88c,
407 0xf5c8773c,
408 0xf594503c,
409 0xf584623c,
410 0xf588063c,
411 0xf5d4603c,
412 0xf5ac203c,
413 0xf88c003c,
414/* 0x0395: crypt_pcbc_e_prep */
415 0x503cf500,
416 0x003cf594,
417 0x063cf588,
418 0x663cf5ac,
419 0x063cf5d0,
420 0x063cf58c,
421/* 0x03af: crypt_pcbc_d_prep */
422 0xf500f8ac,
423 0xf5c8773c,
424 0xf594503c,
425 0xf588003c,
426 0xf5d4013c,
427 0xf5ac163c,
428 0xf58c063c,
429 0xf8ac063c,
430/* 0x03cd: crypt_cfb_e_prep */
431 0x403cf500,
432 0x663cf594,
433 0x003cf5d0,
434 0x063cf588,
435 0x063cf5ac,
436/* 0x03e3: crypt_cfb_d_prep */
437 0xf500f88c,
438 0xf594403c,
439 0xf5d0603c,
440 0xf588063c,
441 0xf5ac603c,
442 0xf88c003c,
443/* 0x03f9: crypt_ofb_prep */
444 0x403cf500,
445 0x663cf594,
446 0x003cf5d0,
447 0x603cf588,
448 0x003cf5ac,
449/* 0x040f: crypt_ctr_prep */
450 0xf500f88c,
451 0xf594503c,
452 0xf5d0613c,
453 0xf5b0163c,
454 0xf588003c,
455 0xf5ac103c,
456 0xf88c003c,
457/* 0x0429: crypt_cbc_mac_prep */
458 0x303cf500,
459 0x003cf594,
460 0x063cf588,
461 0x663cf5ac,
462/* 0x043b: crypt_cmac_finish_complete_prep */
463 0xf500f8d0,
464 0xf594703c,
465 0xf588003c,
466 0xf5ac063c,
467 0xf5ac003c,
468 0xf5d0003c,
469 0xf5bc003c,
470 0xf5ac063c,
471 0xf8d0663c,
472/* 0x045d: crypt_cmac_finish_partial_prep */
473 0x803cf500,
474 0x003cf594,
475 0x063cf588,
476 0x003cf5ac,
477 0x003cf5ac,
478 0x003cf5d0,
479 0x003cf5bc,
480 0x063cf5bc,
481 0x663cf5ac,
482/* 0x0483: crypt_do_in */
483 0xbb00f8d0,
484 0x47fe0035,
485 0x8097f100,
486 0x0293f000,
487/* 0x0490: crypt_do_in_loop */
488 0xf80559fa,
489 0x223cf403,
490 0xf50609fa,
491 0xf898103c,
492 0x1050b603,
493 0xf40453b8,
494 0x3cf4e91b,
495 0xf803f801,
496/* 0x04b1: crypt_do_out */
497 0x0037bb00,
498 0xf10067fe,
499 0xf0008097,
500/* 0x04be: crypt_do_out_loop */
501 0x3cf50293,
502 0x3cf49810,
503 0x0579fa61,
504 0xf40679fa,
505 0x03f8013c,
506 0xb81070b6,
507 0x1bf40473,
508/* 0x04db: crypt_do_inout */
509 0xbb00f8e8,
510 0x97f10035,
511 0x93f00080,
512/* 0x04e5: crypt_do_inout_loop */
513 0x0047fe02,
514 0xf80559fa,
515 0x213cf403,
516 0xf50609fa,
517 0xf498103c,
518 0x67fe613c,
519 0x0579fa00,
520 0xf40679fa,
521 0x03f8013c,
522 0xb61050b6,
523 0x53b81070,
524 0xd41bf404,
525 0x000000f8,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d58e409..0387dc7f4f42 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -79,29 +79,13 @@ static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine) 79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{ 80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev; 82 int i;
83 u32 inst;
84
85 inst = (chan->ramin->vinst >> 12);
86 inst |= 0x40000000;
87
88 /* disable fifo access */
89 nv_wr32(dev, 0x104048, 0x00000000);
90 /* mark channel as unloaded if it's currently active */
91 if (nv_rd32(dev, 0x104050) == inst)
92 nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
93 /* mark next channel as invalid if it's about to be loaded */
94 if (nv_rd32(dev, 0x104054) == inst)
95 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
96 /* restore fifo access */
97 nv_wr32(dev, 0x104048, 0x00000003);
98 83
99 for (inst = 0xc0; inst <= 0xd4; inst += 4) 84 for (i = 0xc0; i <= 0xd4; i += 4)
100 nv_wo32(chan->ramin, inst, 0x00000000); 85 nv_wo32(chan->ramin, i, 0x00000000);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103 86
104 atomic_dec(&chan->vm->engref[engine]); 87 atomic_dec(&chan->vm->engref[engine]);
88 nouveau_gpuobj_ref(NULL, &ctx);
105 chan->engctx[engine] = ctx; 89 chan->engctx[engine] = ctx;
106} 90}
107 91
@@ -143,13 +127,6 @@ static int
143nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) 127nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
144{ 128{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 129 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146
147 /* trigger fuc context unload */
148 nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
149 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
150 nv_wr32(dev, 0x104000, 0x00000008);
151 nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
152
153 nv_wr32(dev, 0x104014, 0xffffffff); 130 nv_wr32(dev, 0x104014, 0xffffffff);
154 return 0; 131 return 0;
155} 132}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6ef6d7..798829353fb6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -98,7 +98,9 @@ read_pll(struct drm_device *dev, int clk, u32 pll)
98 sclk = read_clk(dev, 0x10 + clk, false); 98 sclk = read_clk(dev, 0x10 + clk, false);
99 } 99 }
100 100
101 return sclk * N / (M * P); 101 if (M * P)
102 return sclk * N / (M * P);
103 return 0;
102} 104}
103 105
104struct creg { 106struct creg {
@@ -182,23 +184,26 @@ prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
182 const u32 src1 = 0x004160 + (clk * 4); 184 const u32 src1 = 0x004160 + (clk * 4);
183 const u32 ctrl = pll + 0; 185 const u32 ctrl = pll + 0;
184 const u32 coef = pll + 4; 186 const u32 coef = pll + 4;
185 u32 cntl;
186 187
187 if (!reg->clk && !reg->pll) { 188 if (!reg->clk && !reg->pll) {
188 NV_DEBUG(dev, "no clock for %02x\n", clk); 189 NV_DEBUG(dev, "no clock for %02x\n", clk);
189 return; 190 return;
190 } 191 }
191 192
192 cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
193 if (reg->pll) { 193 if (reg->pll) {
194 nv_mask(dev, src0, 0x00000101, 0x00000101); 194 nv_mask(dev, src0, 0x00000101, 0x00000101);
195 nv_wr32(dev, coef, reg->pll); 195 nv_wr32(dev, coef, reg->pll);
196 nv_wr32(dev, ctrl, cntl | 0x00000015); 196 nv_mask(dev, ctrl, 0x00000015, 0x00000015);
197 nv_mask(dev, ctrl, 0x00000010, 0x00000000);
198 nv_wait(dev, ctrl, 0x00020000, 0x00020000);
199 nv_mask(dev, ctrl, 0x00000010, 0x00000010);
200 nv_mask(dev, ctrl, 0x00000008, 0x00000000);
197 nv_mask(dev, src1, 0x00000100, 0x00000000); 201 nv_mask(dev, src1, 0x00000100, 0x00000000);
198 nv_mask(dev, src1, 0x00000001, 0x00000000); 202 nv_mask(dev, src1, 0x00000001, 0x00000000);
199 } else { 203 } else {
200 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); 204 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
201 nv_wr32(dev, ctrl, cntl | 0x0000001d); 205 nv_mask(dev, ctrl, 0x00000018, 0x00000018);
206 udelay(20);
202 nv_mask(dev, ctrl, 0x00000001, 0x00000000); 207 nv_mask(dev, ctrl, 0x00000001, 0x00000000);
203 nv_mask(dev, src0, 0x00000100, 0x00000000); 208 nv_mask(dev, src0, 0x00000100, 0x00000000);
204 nv_mask(dev, src0, 0x00000001, 0x00000000); 209 nv_mask(dev, src0, 0x00000001, 0x00000000);
@@ -230,17 +235,28 @@ nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
230} 235}
231 236
232struct nva3_pm_state { 237struct nva3_pm_state {
238 struct nouveau_pm_level *perflvl;
239
233 struct creg nclk; 240 struct creg nclk;
234 struct creg sclk; 241 struct creg sclk;
235 struct creg mclk;
236 struct creg vdec; 242 struct creg vdec;
237 struct creg unka0; 243 struct creg unka0;
244
245 struct creg mclk;
246 u8 *rammap;
247 u8 rammap_ver;
248 u8 rammap_len;
249 u8 *ramcfg;
250 u8 ramcfg_len;
251 u32 r004018;
252 u32 r100760;
238}; 253};
239 254
240void * 255void *
241nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 256nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
242{ 257{
243 struct nva3_pm_state *info; 258 struct nva3_pm_state *info;
259 u8 ramcfg_cnt;
244 int ret; 260 int ret;
245 261
246 info = kzalloc(sizeof(*info), GFP_KERNEL); 262 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -267,6 +283,20 @@ nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
267 if (ret < 0) 283 if (ret < 0)
268 goto out; 284 goto out;
269 285
286 info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
287 &info->rammap_ver,
288 &info->rammap_len,
289 &ramcfg_cnt, &info->ramcfg_len);
290 if (info->rammap_ver != 0x10 || info->rammap_len < 5)
291 info->rammap = NULL;
292
293 info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
294 &info->rammap_ver,
295 &info->ramcfg_len);
296 if (info->rammap_ver != 0x10)
297 info->ramcfg = NULL;
298
299 info->perflvl = perflvl;
270out: 300out:
271 if (ret < 0) { 301 if (ret < 0) {
272 kfree(info); 302 kfree(info);
@@ -287,6 +317,240 @@ nva3_pm_grcp_idle(void *data)
287 return false; 317 return false;
288} 318}
289 319
320static void
321mclk_precharge(struct nouveau_mem_exec_func *exec)
322{
323 nv_wr32(exec->dev, 0x1002d4, 0x00000001);
324}
325
326static void
327mclk_refresh(struct nouveau_mem_exec_func *exec)
328{
329 nv_wr32(exec->dev, 0x1002d0, 0x00000001);
330}
331
332static void
333mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
334{
335 nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
336}
337
338static void
339mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
340{
341 nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
342}
343
344static void
345mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
346{
347 volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
348 udelay((nsec + 500) / 1000);
349}
350
351static u32
352mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
353{
354 if (mr <= 1)
355 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
356 if (mr <= 3)
357 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
358 return 0;
359}
360
361static void
362mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
363{
364 struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
365
366 if (mr <= 1) {
367 if (dev_priv->vram_rank_B)
368 nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
369 nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
370 } else
371 if (mr <= 3) {
372 if (dev_priv->vram_rank_B)
373 nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
374 nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
375 }
376}
377
378static void
379mclk_clock_set(struct nouveau_mem_exec_func *exec)
380{
381 struct drm_device *dev = exec->dev;
382 struct nva3_pm_state *info = exec->priv;
383 u32 ctrl;
384
385 ctrl = nv_rd32(dev, 0x004000);
386 if (!(ctrl & 0x00000008) && info->mclk.pll) {
387 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
388 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
389 nv_wr32(dev, 0x004018, 0x00001000);
390 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
391 nv_wr32(dev, 0x004004, info->mclk.pll);
392 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
393 udelay(64);
394 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
395 udelay(20);
396 } else
397 if (!info->mclk.pll) {
398 nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
399 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
400 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
401 nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
402 }
403
404 if (info->rammap) {
405 if (info->ramcfg && (info->rammap[4] & 0x08)) {
406 u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
407 info->ramcfg[5];
408 u32 unk5a4 = ROM16(info->ramcfg[7]);
409 u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
410 (info->ramcfg[3] & 0x0f) << 16 |
411 (info->ramcfg[9] & 0x0f) |
412 0x80000000;
413 nv_wr32(dev, 0x1005a0, unk5a0);
414 nv_wr32(dev, 0x1005a4, unk5a4);
415 nv_wr32(dev, 0x10f804, unk804);
416 nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
417 } else {
418 nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
419 nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
420 nv_mask(dev, 0x100760, 0x22222222, info->r100760);
421 nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
422 nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
423 }
424 }
425
426 if (info->mclk.pll) {
427 nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
428 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
429 }
430}
431
432static void
433mclk_timing_set(struct nouveau_mem_exec_func *exec)
434{
435 struct drm_device *dev = exec->dev;
436 struct nva3_pm_state *info = exec->priv;
437 struct nouveau_pm_level *perflvl = info->perflvl;
438 int i;
439
440 for (i = 0; i < 9; i++)
441 nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
442
443 if (info->ramcfg) {
444 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
445 nv_mask(dev, 0x100200, 0x00001000, data);
446 }
447
448 if (info->ramcfg) {
449 u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
450 u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
451 u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
452 if ( (info->ramcfg[2] & 0x20))
453 unk714 |= 0xf0000000;
454 if (!(info->ramcfg[2] & 0x04))
455 unk714 |= 0x00000010;
456 nv_wr32(dev, 0x100714, unk714);
457
458 if (info->ramcfg[2] & 0x01)
459 unk71c |= 0x00000100;
460 nv_wr32(dev, 0x10071c, unk71c);
461
462 if (info->ramcfg[2] & 0x02)
463 unk718 |= 0x00000100;
464 nv_wr32(dev, 0x100718, unk718);
465
466 if (info->ramcfg[2] & 0x10)
467 nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
468 }
469}
470
471static void
472prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
473{
474 struct nouveau_mem_exec_func exec = {
475 .dev = dev,
476 .precharge = mclk_precharge,
477 .refresh = mclk_refresh,
478 .refresh_auto = mclk_refresh_auto,
479 .refresh_self = mclk_refresh_self,
480 .wait = mclk_wait,
481 .mrg = mclk_mrg,
482 .mrs = mclk_mrs,
483 .clock_set = mclk_clock_set,
484 .timing_set = mclk_timing_set,
485 .priv = info
486 };
487 u32 ctrl;
488
489 /* XXX: where the fuck does 750MHz come from? */
490 if (info->perflvl->memory <= 750000) {
491 info->r004018 = 0x10000000;
492 info->r100760 = 0x22222222;
493 }
494
495 ctrl = nv_rd32(dev, 0x004000);
496 if (ctrl & 0x00000008) {
497 if (info->mclk.pll) {
498 nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
499 nv_wr32(dev, 0x004004, info->mclk.pll);
500 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
501 nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
502 nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
503 nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
504 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
505 nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
506 }
507 } else {
508 u32 ssel = 0x00000101;
509 if (info->mclk.clk)
510 ssel |= info->mclk.clk;
511 else
512 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
513 nv_mask(dev, 0x004168, 0x003f3141, ctrl);
514 }
515
516 if (info->ramcfg) {
517 if (info->ramcfg[2] & 0x10) {
518 nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
519 } else {
520 nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
521 nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
522 }
523 }
524 if (info->rammap && !(info->rammap[4] & 0x02))
525 nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
526 nv_wr32(dev, 0x611200, 0x00003300);
527 if (!(info->ramcfg[2] & 0x10))
528 nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
529
530 nouveau_mem_exec(&exec, info->perflvl);
531
532 nv_wr32(dev, 0x611200, 0x00003330);
533 if (info->rammap && (info->rammap[4] & 0x02))
534 nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
535 if (info->ramcfg) {
536 if (info->ramcfg[2] & 0x10) {
537 nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
538 nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
539 } else {
540 nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
541 }
542 }
543
544 if (info->mclk.pll) {
545 nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
546 nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
547 } else {
548 nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
549 nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
550 nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
551 }
552}
553
290int 554int
291nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) 555nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
292{ 556{
@@ -316,18 +580,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
316 prog_clk(dev, 0x20, &info->unka0); 580 prog_clk(dev, 0x20, &info->unka0);
317 prog_clk(dev, 0x21, &info->vdec); 581 prog_clk(dev, 0x21, &info->vdec);
318 582
319 if (info->mclk.clk || info->mclk.pll) { 583 if (info->mclk.clk || info->mclk.pll)
320 nv_wr32(dev, 0x100210, 0); 584 prog_mem(dev, info);
321 nv_wr32(dev, 0x1002dc, 1);
322 nv_wr32(dev, 0x004018, 0x00001000);
323 prog_pll(dev, 0x02, 0x004000, &info->mclk);
324 if (nv_rd32(dev, 0x4000) & 0x00000008)
325 nv_wr32(dev, 0x004018, 0x1000d000);
326 else
327 nv_wr32(dev, 0x004018, 0x10005000);
328 nv_wr32(dev, 0x1002dc, 0);
329 nv_wr32(dev, 0x100210, 0x80000000);
330 }
331 585
332 ret = 0; 586 ret = 0;
333 587
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48197ca..797159e7b7a6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -43,22 +43,22 @@ nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
43 return ret; 43 return ret;
44 44
45 if (rect->rop != ROP_COPY) { 45 if (rect->rop != ROP_COPY) {
46 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 46 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
47 OUT_RING (chan, 1); 47 OUT_RING (chan, 1);
48 } 48 }
49 BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1); 49 BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
50 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 50 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
51 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 51 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
52 OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 52 OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
53 else 53 else
54 OUT_RING (chan, rect->color); 54 OUT_RING (chan, rect->color);
55 BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4); 55 BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
56 OUT_RING (chan, rect->dx); 56 OUT_RING (chan, rect->dx);
57 OUT_RING (chan, rect->dy); 57 OUT_RING (chan, rect->dy);
58 OUT_RING (chan, rect->dx + rect->width); 58 OUT_RING (chan, rect->dx + rect->width);
59 OUT_RING (chan, rect->dy + rect->height); 59 OUT_RING (chan, rect->dy + rect->height);
60 if (rect->rop != ROP_COPY) { 60 if (rect->rop != ROP_COPY) {
61 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 61 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
62 OUT_RING (chan, 3); 62 OUT_RING (chan, 3);
63 } 63 }
64 FIRE_RING(chan); 64 FIRE_RING(chan);
@@ -78,14 +78,14 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
78 if (ret) 78 if (ret)
79 return ret; 79 return ret;
80 80
81 BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1); 81 BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
82 OUT_RING (chan, 0); 82 OUT_RING (chan, 0);
83 BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4); 83 BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
84 OUT_RING (chan, region->dx); 84 OUT_RING (chan, region->dx);
85 OUT_RING (chan, region->dy); 85 OUT_RING (chan, region->dy);
86 OUT_RING (chan, region->width); 86 OUT_RING (chan, region->width);
87 OUT_RING (chan, region->height); 87 OUT_RING (chan, region->height);
88 BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4); 88 BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
89 OUT_RING (chan, 0); 89 OUT_RING (chan, 0);
90 OUT_RING (chan, region->sx); 90 OUT_RING (chan, region->sx);
91 OUT_RING (chan, 0); 91 OUT_RING (chan, 0);
@@ -116,7 +116,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
116 width = ALIGN(image->width, 32); 116 width = ALIGN(image->width, 32);
117 dwords = (width * image->height) >> 5; 117 dwords = (width * image->height) >> 5;
118 118
119 BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2); 119 BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
120 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 120 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
122 OUT_RING (chan, palette[image->bg_color] | mask); 122 OUT_RING (chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING (chan, image->bg_color); 125 OUT_RING (chan, image->bg_color);
126 OUT_RING (chan, image->fg_color); 126 OUT_RING (chan, image->fg_color);
127 } 127 }
128 BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2); 128 BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
129 OUT_RING (chan, image->width); 129 OUT_RING (chan, image->width);
130 OUT_RING (chan, image->height); 130 OUT_RING (chan, image->height);
131 BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4); 131 BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
132 OUT_RING (chan, 0); 132 OUT_RING (chan, 0);
133 OUT_RING (chan, image->dx); 133 OUT_RING (chan, image->dx);
134 OUT_RING (chan, 0); 134 OUT_RING (chan, 0);
@@ -143,7 +143,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
143 143
144 dwords -= push; 144 dwords -= push;
145 145
146 BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push); 146 BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
147 OUT_RINGp(chan, data, push); 147 OUT_RINGp(chan, data, push);
148 data += push; 148 data += push;
149 } 149 }
@@ -200,47 +200,47 @@ nvc0_fbcon_accel_init(struct fb_info *info)
200 return ret; 200 return ret;
201 } 201 }
202 202
203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); 203 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 204 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); 205 BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset)); 206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset)); 207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); 208 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 209 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); 210 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
211 OUT_RING (chan, 1); 211 OUT_RING (chan, 1);
212 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 212 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
213 OUT_RING (chan, 3); 213 OUT_RING (chan, 3);
214 BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1); 214 BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
215 OUT_RING (chan, 0x55); 215 OUT_RING (chan, 0x55);
216 BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4); 216 BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
217 OUT_RING (chan, 0); 217 OUT_RING (chan, 0);
218 OUT_RING (chan, 1); 218 OUT_RING (chan, 1);
219 OUT_RING (chan, 0); 219 OUT_RING (chan, 0);
220 OUT_RING (chan, 1); 220 OUT_RING (chan, 1);
221 BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2); 221 BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
222 OUT_RING (chan, 4); 222 OUT_RING (chan, 4);
223 OUT_RING (chan, format); 223 OUT_RING (chan, format);
224 BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2); 224 BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
225 OUT_RING (chan, 2); 225 OUT_RING (chan, 2);
226 OUT_RING (chan, 1); 226 OUT_RING (chan, 1);
227 227
228 BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1); 228 BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
229 OUT_RING (chan, format); 229 OUT_RING (chan, format);
230 BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1); 230 BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
231 OUT_RING (chan, 1); 231 OUT_RING (chan, 1);
232 BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3); 232 BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
233 OUT_RING (chan, 0); 233 OUT_RING (chan, 0);
234 OUT_RING (chan, 0); 234 OUT_RING (chan, 0);
235 OUT_RING (chan, 1); 235 OUT_RING (chan, 1);
236 BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1); 236 BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
237 OUT_RING (chan, 1); 237 OUT_RING (chan, 1);
238 BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4); 238 BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
239 OUT_RING (chan, 0); 239 OUT_RING (chan, 0);
240 OUT_RING (chan, 1); 240 OUT_RING (chan, 1);
241 OUT_RING (chan, 0); 241 OUT_RING (chan, 0);
242 OUT_RING (chan, 1); 242 OUT_RING (chan, 1);
243 BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10); 243 BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
244 OUT_RING (chan, format); 244 OUT_RING (chan, format);
245 OUT_RING (chan, 1); 245 OUT_RING (chan, 1);
246 OUT_RING (chan, 0); 246 OUT_RING (chan, 0);
@@ -251,7 +251,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
251 OUT_RING (chan, info->var.yres_virtual); 251 OUT_RING (chan, info->var.yres_virtual);
252 OUT_RING (chan, upper_32_bits(fb->vma.offset)); 252 OUT_RING (chan, upper_32_bits(fb->vma.offset));
253 OUT_RING (chan, lower_32_bits(fb->vma.offset)); 253 OUT_RING (chan, lower_32_bits(fb->vma.offset));
254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); 254 BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
255 OUT_RING (chan, format); 255 OUT_RING (chan, format);
256 OUT_RING (chan, 1); 256 OUT_RING (chan, 1);
257 OUT_RING (chan, 0); 257 OUT_RING (chan, 0);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..47ab388a606e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h"
31
32struct nvc0_fence_priv {
33 struct nouveau_fence_priv base;
34 struct nouveau_bo *bo;
35};
36
37struct nvc0_fence_chan {
38 struct nouveau_fence_chan base;
39 struct nouveau_vma vma;
40};
41
42static int
43nvc0_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
47 u64 addr = fctx->vma.offset + chan->id * 16;
48 int ret;
49
50 ret = RING_SPACE(chan, 5);
51 if (ret == 0) {
52 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
53 OUT_RING (chan, upper_32_bits(addr));
54 OUT_RING (chan, lower_32_bits(addr));
55 OUT_RING (chan, fence->sequence);
56 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
57 FIRE_RING (chan);
58 }
59
60 return ret;
61}
62
63static int
64nvc0_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{
67 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
68 u64 addr = fctx->vma.offset + prev->id * 16;
69 int ret;
70
71 ret = RING_SPACE(chan, 5);
72 if (ret == 0) {
73 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
74 OUT_RING (chan, upper_32_bits(addr));
75 OUT_RING (chan, lower_32_bits(addr));
76 OUT_RING (chan, fence->sequence);
77 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
78 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
79 FIRE_RING (chan);
80 }
81
82 return ret;
83}
84
85static u32
86nvc0_fence_read(struct nouveau_channel *chan)
87{
88 struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
89 return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
90}
91
92static void
93nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
94{
95 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
96 struct nvc0_fence_chan *fctx = chan->engctx[engine];
97
98 nouveau_bo_vma_del(priv->bo, &fctx->vma);
99 nouveau_fence_context_del(&fctx->base);
100 chan->engctx[engine] = NULL;
101 kfree(fctx);
102}
103
104static int
105nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
106{
107 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
108 struct nvc0_fence_chan *fctx;
109 int ret;
110
111 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
112 if (!fctx)
113 return -ENOMEM;
114
115 nouveau_fence_context_new(&fctx->base);
116
117 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
118 if (ret)
119 nvc0_fence_context_del(chan, engine);
120
121 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
122 return ret;
123}
124
125static int
126nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
127{
128 return 0;
129}
130
131static int
132nvc0_fence_init(struct drm_device *dev, int engine)
133{
134 return 0;
135}
136
137static void
138nvc0_fence_destroy(struct drm_device *dev, int engine)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nvc0_fence_priv *priv = nv_engine(dev, engine);
142
143 nouveau_bo_unmap(priv->bo);
144 nouveau_bo_ref(NULL, &priv->bo);
145 dev_priv->eng[engine] = NULL;
146 kfree(priv);
147}
148
149int
150nvc0_fence_create(struct drm_device *dev)
151{
152 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 struct nvc0_fence_priv *priv;
155 int ret;
156
157 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
158 if (!priv)
159 return -ENOMEM;
160
161 priv->base.engine.destroy = nvc0_fence_destroy;
162 priv->base.engine.init = nvc0_fence_init;
163 priv->base.engine.fini = nvc0_fence_fini;
164 priv->base.engine.context_new = nvc0_fence_context_new;
165 priv->base.engine.context_del = nvc0_fence_context_del;
166 priv->base.emit = nvc0_fence_emit;
167 priv->base.sync = nvc0_fence_sync;
168 priv->base.read = nvc0_fence_read;
169 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
170
171 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
172 0, 0, NULL, &priv->bo);
173 if (ret == 0) {
174 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
175 if (ret == 0)
176 ret = nouveau_bo_map(priv->bo);
177 if (ret)
178 nouveau_bo_ref(NULL, &priv->bo);
179 }
180
181 if (ret)
182 nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
183 return ret;
184}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 50d68a7a1379..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_mm.h" 28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
29 30
30static void nvc0_fifo_isr(struct drm_device *); 31static void nvc0_fifo_isr(struct drm_device *);
31 32
32struct nvc0_fifo_priv { 33struct nvc0_fifo_priv {
34 struct nouveau_fifo_priv base;
33 struct nouveau_gpuobj *playlist[2]; 35 struct nouveau_gpuobj *playlist[2];
34 int cur_playlist; 36 int cur_playlist;
35 struct nouveau_vma user_vma; 37 struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
37}; 39};
38 40
39struct nvc0_fifo_chan { 41struct nvc0_fifo_chan {
42 struct nouveau_fifo_chan base;
40 struct nouveau_gpuobj *user; 43 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc;
42}; 44};
43 45
44static void 46static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
46{ 48{
47 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 50 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
50 struct nvc0_fifo_priv *priv = pfifo->priv;
51 struct nouveau_gpuobj *cur; 52 struct nouveau_gpuobj *cur;
52 int i, p; 53 int i, p;
53 54
@@ -69,59 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
69 NV_ERROR(dev, "PFIFO - playlist update failed\n"); 70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
70} 71}
71 72
72void 73static int
73nvc0_fifo_disable(struct drm_device *dev) 74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
74{
75}
76
77void
78nvc0_fifo_enable(struct drm_device *dev)
79{
80}
81
82bool
83nvc0_fifo_reassign(struct drm_device *dev, bool enable)
84{
85 return false;
86}
87
88bool
89nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
90{
91 return false;
92}
93
94int
95nvc0_fifo_channel_id(struct drm_device *dev)
96{
97 return 127;
98}
99
100int
101nvc0_fifo_create_context(struct nouveau_channel *chan)
102{ 75{
103 struct drm_device *dev = chan->dev; 76 struct drm_device *dev = chan->dev;
104 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 78 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 79 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
107 struct nvc0_fifo_priv *priv = pfifo->priv; 80 struct nvc0_fifo_chan *fctx;
108 struct nvc0_fifo_chan *fifoch;
109 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
110 int ret; 82 int ret, i;
111 83
112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); 84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
113 if (!chan->fifo_priv) 85 if (!fctx)
114 return -ENOMEM; 86 return -ENOMEM;
115 fifoch = chan->fifo_priv;
116
117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
119 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
120 if (ret)
121 goto error;
122
123 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
124 *(struct nouveau_mem **)fifoch->user->node);
125 87
126 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
127 priv->user_vma.offset + (chan->id * 0x1000), 89 priv->user_vma.offset + (chan->id * 0x1000),
@@ -131,176 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
131 goto error; 93 goto error;
132 } 94 }
133 95
134 /* ramfc */ 96 /* allocate vram for control regs, map into polling area */
135 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 97 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
136 chan->ramin->vinst, 0x100, 98 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
137 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
138 if (ret) 99 if (ret)
139 goto error; 100 goto error;
140 101
141 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); 102 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
142 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); 103 *(struct nouveau_mem **)fctx->user->node);
143 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 104
144 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 105 for (i = 0; i < 0x100; i += 4)
145 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); 106 nv_wo32(chan->ramin, i, 0x00000000);
146 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | 107 nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
108 nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
109 nv_wo32(chan->ramin, 0x10, 0x0000face);
110 nv_wo32(chan->ramin, 0x30, 0xfffff902);
111 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
112 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
147 upper_32_bits(ib_virt)); 113 upper_32_bits(ib_virt));
148 nv_wo32(fifoch->ramfc, 0x54, 0x00000002); 114 nv_wo32(chan->ramin, 0x54, 0x00000002);
149 nv_wo32(fifoch->ramfc, 0x84, 0x20400000); 115 nv_wo32(chan->ramin, 0x84, 0x20400000);
150 nv_wo32(fifoch->ramfc, 0x94, 0x30000001); 116 nv_wo32(chan->ramin, 0x94, 0x30000001);
151 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); 117 nv_wo32(chan->ramin, 0x9c, 0x00000100);
152 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); 118 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
153 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); 119 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
154 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); 120 nv_wo32(chan->ramin, 0xac, 0x0000001f);
155 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); 121 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
156 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ 122 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
157 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ 123 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
158 pinstmem->flush(dev); 124 pinstmem->flush(dev);
159 125
160 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | 126 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
161 (chan->ramin->vinst >> 12)); 127 (chan->ramin->vinst >> 12));
162 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); 128 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
163 nvc0_fifo_playlist_update(dev); 129 nvc0_fifo_playlist_update(dev);
164 return 0;
165 130
166error: 131error:
167 pfifo->destroy_context(chan); 132 if (ret)
133 priv->base.base.context_del(chan, engine);
168 return ret; 134 return ret;
169} 135}
170 136
171void 137static void
172nvc0_fifo_destroy_context(struct nouveau_channel *chan) 138nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
173{ 139{
140 struct nvc0_fifo_chan *fctx = chan->engctx[engine];
174 struct drm_device *dev = chan->dev; 141 struct drm_device *dev = chan->dev;
175 struct nvc0_fifo_chan *fifoch;
176 142
177 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); 143 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
178 nv_wr32(dev, 0x002634, chan->id); 144 nv_wr32(dev, 0x002634, chan->id);
179 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) 145 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
180 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); 146 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
181
182 nvc0_fifo_playlist_update(dev); 147 nvc0_fifo_playlist_update(dev);
183
184 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); 148 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
185 149
150 nouveau_gpuobj_ref(NULL, &fctx->user);
186 if (chan->user) { 151 if (chan->user) {
187 iounmap(chan->user); 152 iounmap(chan->user);
188 chan->user = NULL; 153 chan->user = NULL;
189 } 154 }
190 155
191 fifoch = chan->fifo_priv; 156 chan->engctx[engine] = NULL;
192 chan->fifo_priv = NULL; 157 kfree(fctx);
193 if (!fifoch)
194 return;
195
196 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
197 nouveau_gpuobj_ref(NULL, &fifoch->user);
198 kfree(fifoch);
199}
200
201int
202nvc0_fifo_load_context(struct nouveau_channel *chan)
203{
204 return 0;
205}
206
207int
208nvc0_fifo_unload_context(struct drm_device *dev)
209{
210 int i;
211
212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
214 continue;
215
216 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
220 i, nv_rd32(dev, 0x002634));
221 return -EBUSY;
222 }
223 }
224
225 return 0;
226}
227
228static void
229nvc0_fifo_destroy(struct drm_device *dev)
230{
231 struct drm_nouveau_private *dev_priv = dev->dev_private;
232 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
233 struct nvc0_fifo_priv *priv;
234
235 priv = pfifo->priv;
236 if (!priv)
237 return;
238
239 nouveau_vm_put(&priv->user_vma);
240 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
241 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
242 kfree(priv);
243}
244
245void
246nvc0_fifo_takedown(struct drm_device *dev)
247{
248 nv_wr32(dev, 0x002140, 0x00000000);
249 nvc0_fifo_destroy(dev);
250} 158}
251 159
252static int 160static int
253nvc0_fifo_create(struct drm_device *dev) 161nvc0_fifo_init(struct drm_device *dev, int engine)
254{
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
257 struct nvc0_fifo_priv *priv;
258 int ret;
259
260 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
261 if (!priv)
262 return -ENOMEM;
263 pfifo->priv = priv;
264
265 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
266 &priv->playlist[0]);
267 if (ret)
268 goto error;
269
270 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
271 &priv->playlist[1]);
272 if (ret)
273 goto error;
274
275 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
276 12, NV_MEM_ACCESS_RW, &priv->user_vma);
277 if (ret)
278 goto error;
279
280 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
281 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
282 return 0;
283
284error:
285 nvc0_fifo_destroy(dev);
286 return ret;
287}
288
289int
290nvc0_fifo_init(struct drm_device *dev)
291{ 162{
292 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct drm_nouveau_private *dev_priv = dev->dev_private;
293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 164 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
294 struct nouveau_channel *chan; 165 struct nouveau_channel *chan;
295 struct nvc0_fifo_priv *priv; 166 int i;
296 int ret, i;
297
298 if (!pfifo->priv) {
299 ret = nvc0_fifo_create(dev);
300 if (ret)
301 return ret;
302 }
303 priv = pfifo->priv;
304 167
305 /* reset PFIFO, enable all available PSUBFIFO areas */ 168 /* reset PFIFO, enable all available PSUBFIFO areas */
306 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 169 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -338,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
338 /* restore PFIFO context table */ 201 /* restore PFIFO context table */
339 for (i = 0; i < 128; i++) { 202 for (i = 0; i < 128; i++) {
340 chan = dev_priv->channels.ptr[i]; 203 chan = dev_priv->channels.ptr[i];
341 if (!chan || !chan->fifo_priv) 204 if (!chan || !chan->engctx[engine])
342 continue; 205 continue;
343 206
344 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 207 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -350,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
350 return 0; 213 return 0;
351} 214}
352 215
216static int
217nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 int i;
220
221 for (i = 0; i < 128; i++) {
222 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
223 continue;
224
225 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
226 nv_wr32(dev, 0x002634, i);
227 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
228 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i, nv_rd32(dev, 0x002634));
230 return -EBUSY;
231 }
232 }
233
234 nv_wr32(dev, 0x002140, 0x00000000);
235 return 0;
236}
237
238
353struct nouveau_enum nvc0_fifo_fault_unit[] = { 239struct nouveau_enum nvc0_fifo_fault_unit[] = {
354 { 0x00, "PGRAPH" }, 240 { 0x00, "PGRAPH" },
355 { 0x03, "PEEPHOLE" }, 241 { 0x03, "PEEPHOLE" },
@@ -439,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
439static int 325static int
440nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) 326nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
441{ 327{
328 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
442 struct drm_nouveau_private *dev_priv = dev->dev_private; 329 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nouveau_channel *chan = NULL; 330 struct nouveau_channel *chan = NULL;
444 unsigned long flags; 331 unsigned long flags;
445 int ret = -EINVAL; 332 int ret = -EINVAL;
446 333
447 spin_lock_irqsave(&dev_priv->channels.lock, flags); 334 spin_lock_irqsave(&dev_priv->channels.lock, flags);
448 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) { 335 if (likely(chid >= 0 && chid < priv->base.channels)) {
449 chan = dev_priv->channels.ptr[chid]; 336 chan = dev_priv->channels.ptr[chid];
450 if (likely(chan)) 337 if (likely(chan))
451 ret = nouveau_finish_page_flip(chan, NULL); 338 ret = nouveau_finish_page_flip(chan, NULL);
@@ -534,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
534 nv_wr32(dev, 0x002140, 0); 421 nv_wr32(dev, 0x002140, 0);
535 } 422 }
536} 423}
424
425static void
426nvc0_fifo_destroy(struct drm_device *dev, int engine)
427{
428 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430
431 nouveau_vm_put(&priv->user_vma);
432 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
433 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
434
435 dev_priv->eng[engine] = NULL;
436 kfree(priv);
437}
438
439int
440nvc0_fifo_create(struct drm_device *dev)
441{
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nvc0_fifo_priv *priv;
444 int ret;
445
446 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
447 if (!priv)
448 return -ENOMEM;
449
450 priv->base.base.destroy = nvc0_fifo_destroy;
451 priv->base.base.init = nvc0_fifo_init;
452 priv->base.base.fini = nvc0_fifo_fini;
453 priv->base.base.context_new = nvc0_fifo_context_new;
454 priv->base.base.context_del = nvc0_fifo_context_del;
455 priv->base.channels = 128;
456 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
457
458 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
459 if (ret)
460 goto error;
461
462 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
463 if (ret)
464 goto error;
465
466 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
467 12, NV_MEM_ACCESS_RW, &priv->user_vma);
468 if (ret)
469 goto error;
470
471 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
472error:
473 if (ret)
474 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
475 return ret;
476}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 9066102d1159..2a01e6e47724 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -29,6 +29,7 @@
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_mm.h" 31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
32 33
33#include "nvc0_graph.h" 34#include "nvc0_graph.h"
34#include "nvc0_grhub.fuc.h" 35#include "nvc0_grhub.fuc.h"
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine)
620int 621int
621nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 622nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
622{ 623{
624 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
623 struct drm_nouveau_private *dev_priv = dev->dev_private; 625 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct nouveau_channel *chan; 626 struct nouveau_channel *chan;
625 unsigned long flags; 627 unsigned long flags;
626 int i; 628 int i;
627 629
628 spin_lock_irqsave(&dev_priv->channels.lock, flags); 630 spin_lock_irqsave(&dev_priv->channels.lock, flags);
629 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 631 for (i = 0; i < pfifo->channels; i++) {
630 chan = dev_priv->channels.ptr[i]; 632 chan = dev_priv->channels.ptr[i];
631 if (!chan || !chan->ramin) 633 if (!chan || !chan->ramin)
632 continue; 634 continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index ce65f81bb871..7c95c44e2887 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -164,7 +164,9 @@ struct nvc0_pm_clock {
164}; 164};
165 165
166struct nvc0_pm_state { 166struct nvc0_pm_state {
167 struct nouveau_pm_level *perflvl;
167 struct nvc0_pm_clock eng[16]; 168 struct nvc0_pm_clock eng[16];
169 struct nvc0_pm_clock mem;
168}; 170};
169 171
170static u32 172static u32
@@ -303,6 +305,48 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
303 return 0; 305 return 0;
304} 306}
305 307
308static int
309calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
310{
311 struct pll_lims pll;
312 int N, M, P, ret;
313 u32 ctrl;
314
315 /* mclk pll input freq comes from another pll, make sure it's on */
316 ctrl = nv_rd32(dev, 0x132020);
317 if (!(ctrl & 0x00000001)) {
318 /* if not, program it to 567MHz. nfi where this value comes
319 * from - it looks like it's in the pll limits table for
320 * 132000 but the binary driver ignores all my attempts to
321 * change this value.
322 */
323 nv_wr32(dev, 0x137320, 0x00000103);
324 nv_wr32(dev, 0x137330, 0x81200606);
325 nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
326 nv_wr32(dev, 0x132024, 0x0001150f);
327 nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
328 nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
329 nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
330 }
331
332 /* for the moment, until the clock tree is better understood, use
333 * pll mode for all clock frequencies
334 */
335 ret = get_pll_limits(dev, 0x132000, &pll);
336 if (ret == 0) {
337 pll.refclk = read_pll(dev, 0x132020);
338 if (pll.refclk) {
339 ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
340 if (ret > 0) {
341 info->coef = (P << 16) | (N << 8) | M;
342 return 0;
343 }
344 }
345 }
346
347 return -EINVAL;
348}
349
306void * 350void *
307nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 351nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
308{ 352{
@@ -335,6 +379,15 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
335 return ERR_PTR(ret); 379 return ERR_PTR(ret);
336 } 380 }
337 381
382 if (perflvl->memory) {
383 ret = calc_mem(dev, &info->mem, perflvl->memory);
384 if (ret) {
385 kfree(info);
386 return ERR_PTR(ret);
387 }
388 }
389
390 info->perflvl = perflvl;
338 return info; 391 return info;
339} 392}
340 393
@@ -375,12 +428,148 @@ prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
375 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 428 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
376} 429}
377 430
431static void
432mclk_precharge(struct nouveau_mem_exec_func *exec)
433{
434}
435
436static void
437mclk_refresh(struct nouveau_mem_exec_func *exec)
438{
439}
440
441static void
442mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
443{
444 nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
445}
446
447static void
448mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
449{
450}
451
452static void
453mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
454{
455 udelay((nsec + 500) / 1000);
456}
457
458static u32
459mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
460{
461 struct drm_device *dev = exec->dev;
462 struct drm_nouveau_private *dev_priv = dev->dev_private;
463 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
464 if (mr <= 1)
465 return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
466 return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
467 } else {
468 if (mr == 0)
469 return nv_rd32(dev, 0x10f300 + (mr * 4));
470 else
471 if (mr <= 7)
472 return nv_rd32(dev, 0x10f32c + (mr * 4));
473 return nv_rd32(dev, 0x10f34c);
474 }
475}
476
477static void
478mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
479{
480 struct drm_device *dev = exec->dev;
481 struct drm_nouveau_private *dev_priv = dev->dev_private;
482 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
483 if (mr <= 1) {
484 nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
485 if (dev_priv->vram_rank_B)
486 nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
487 } else
488 if (mr <= 3) {
489 nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
490 if (dev_priv->vram_rank_B)
491 nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
492 }
493 } else {
494 if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
495 else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
496 else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
497 }
498}
499
500static void
501mclk_clock_set(struct nouveau_mem_exec_func *exec)
502{
503 struct nvc0_pm_state *info = exec->priv;
504 struct drm_device *dev = exec->dev;
505 u32 ctrl = nv_rd32(dev, 0x132000);
506
507 nv_wr32(dev, 0x137360, 0x00000001);
508 nv_wr32(dev, 0x137370, 0x00000000);
509 nv_wr32(dev, 0x137380, 0x00000000);
510 if (ctrl & 0x00000001)
511 nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
512
513 nv_wr32(dev, 0x132004, info->mem.coef);
514 nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
515 nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
516 nv_wr32(dev, 0x132018, 0x00005000);
517
518 nv_wr32(dev, 0x137370, 0x00000001);
519 nv_wr32(dev, 0x137380, 0x00000001);
520 nv_wr32(dev, 0x137360, 0x00000000);
521}
522
523static void
524mclk_timing_set(struct nouveau_mem_exec_func *exec)
525{
526 struct nvc0_pm_state *info = exec->priv;
527 struct nouveau_pm_level *perflvl = info->perflvl;
528 int i;
529
530 for (i = 0; i < 5; i++)
531 nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
532}
533
534static void
535prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
536{
537 struct drm_nouveau_private *dev_priv = dev->dev_private;
538 struct nouveau_mem_exec_func exec = {
539 .dev = dev,
540 .precharge = mclk_precharge,
541 .refresh = mclk_refresh,
542 .refresh_auto = mclk_refresh_auto,
543 .refresh_self = mclk_refresh_self,
544 .wait = mclk_wait,
545 .mrg = mclk_mrg,
546 .mrs = mclk_mrs,
547 .clock_set = mclk_clock_set,
548 .timing_set = mclk_timing_set,
549 .priv = info
550 };
551
552 if (dev_priv->chipset < 0xd0)
553 nv_wr32(dev, 0x611200, 0x00003300);
554 else
555 nv_wr32(dev, 0x62c000, 0x03030000);
556
557 nouveau_mem_exec(&exec, info->perflvl);
558
559 if (dev_priv->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003300);
561 else
562 nv_wr32(dev, 0x62c000, 0x03030300);
563}
378int 564int
379nvc0_pm_clocks_set(struct drm_device *dev, void *data) 565nvc0_pm_clocks_set(struct drm_device *dev, void *data)
380{ 566{
381 struct nvc0_pm_state *info = data; 567 struct nvc0_pm_state *info = data;
382 int i; 568 int i;
383 569
570 if (info->mem.coef)
571 prog_mem(dev, info);
572
384 for (i = 0; i < 16; i++) { 573 for (i = 0; i < 16; i++) {
385 if (!info->eng[i].freq) 574 if (!info->eng[i].freq)
386 continue; 575 continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
new file mode 100644
index 000000000000..93e8c164fec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_software.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nvc0_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nvc0_software_chan {
38 struct nouveau_software_chan base;
39 struct nouveau_vma dispc_vma[4];
40};
41
42u64
43nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
44{
45 struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
46 return pch->dispc_vma[crtc].offset;
47}
48
49static int
50nvc0_software_context_new(struct nouveau_channel *chan, int engine)
51{
52 struct drm_device *dev = chan->dev;
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
55 struct nvc0_software_chan *pch;
56 int ret = 0, i;
57
58 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
59 if (!pch)
60 return -ENOMEM;
61
62 nouveau_software_context_new(&pch->base);
63 chan->engctx[engine] = pch;
64
65 /* map display semaphore buffers into channel's vm */
66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
67 struct nouveau_bo *bo;
68 if (dev_priv->card_type >= NV_D0)
69 bo = nvd0_display_crtc_sema(dev, i);
70 else
71 bo = nv50_display(dev)->crtc[i].sem.bo;
72
73 ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
74 }
75
76 if (ret)
77 psw->base.base.context_del(chan, engine);
78 return ret;
79}
80
81static void
82nvc0_software_context_del(struct nouveau_channel *chan, int engine)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nvc0_software_chan *pch = chan->engctx[engine];
87 int i;
88
89 if (dev_priv->card_type >= NV_D0) {
90 for (i = 0; i < dev->mode_config.num_crtc; i++) {
91 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
92 nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
93 }
94 } else
95 if (dev_priv->card_type >= NV_50) {
96 struct nv50_display *disp = nv50_display(dev);
97 for (i = 0; i < dev->mode_config.num_crtc; i++) {
98 struct nv50_display_crtc *dispc = &disp->crtc[i];
99 nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
100 }
101 }
102
103 chan->engctx[engine] = NULL;
104 kfree(pch);
105}
106
107static int
108nvc0_software_object_new(struct nouveau_channel *chan, int engine,
109 u32 handle, u16 class)
110{
111 return 0;
112}
113
114static int
115nvc0_software_init(struct drm_device *dev, int engine)
116{
117 return 0;
118}
119
120static int
121nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
122{
123 return 0;
124}
125
126static void
127nvc0_software_destroy(struct drm_device *dev, int engine)
128{
129 struct nvc0_software_priv *psw = nv_engine(dev, engine);
130
131 NVOBJ_ENGINE_DEL(dev, SW);
132 kfree(psw);
133}
134
135int
136nvc0_software_create(struct drm_device *dev)
137{
138 struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
139 if (!psw)
140 return -ENOMEM;
141
142 psw->base.base.destroy = nvc0_software_destroy;
143 psw->base.base.init = nvc0_software_init;
144 psw->base.base.fini = nvc0_software_fini;
145 psw->base.base.context_new = nvc0_software_context_new;
146 psw->base.base.context_del = nvc0_software_context_del;
147 psw->base.base.object_new = nvc0_software_object_new;
148 nouveau_software_create(&psw->base);
149
150 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
151 NVOBJ_CLASS(dev, 0x906e, SW);
152 return 0;
153}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 0247250939e8..c486d3ce3c2c 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -33,6 +33,7 @@
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_fb.h" 35#include "nouveau_fb.h"
36#include "nouveau_software.h"
36#include "nv50_display.h" 37#include "nv50_display.h"
37 38
38#define EVO_DMA_NR 9 39#define EVO_DMA_NR 9
@@ -284,8 +285,6 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
284 u32 *push; 285 u32 *push;
285 int ret; 286 int ret;
286 287
287 evo_sync(crtc->dev, EVO_MASTER);
288
289 swap_interval <<= 4; 288 swap_interval <<= 4;
290 if (swap_interval == 0) 289 if (swap_interval == 0)
291 swap_interval |= 0x100; 290 swap_interval |= 0x100;
@@ -300,15 +299,16 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
300 if (ret) 299 if (ret)
301 return ret; 300 return ret;
302 301
303 offset = chan->dispc_vma[nv_crtc->index].offset; 302
303 offset = nvc0_software_crtc(chan, nv_crtc->index);
304 offset += evo->sem.offset; 304 offset += evo->sem.offset;
305 305
306 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 306 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
307 OUT_RING (chan, upper_32_bits(offset)); 307 OUT_RING (chan, upper_32_bits(offset));
308 OUT_RING (chan, lower_32_bits(offset)); 308 OUT_RING (chan, lower_32_bits(offset));
309 OUT_RING (chan, 0xf00d0000 | evo->sem.value); 309 OUT_RING (chan, 0xf00d0000 | evo->sem.value);
310 OUT_RING (chan, 0x1002); 310 OUT_RING (chan, 0x1002);
311 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 311 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
312 OUT_RING (chan, upper_32_bits(offset)); 312 OUT_RING (chan, upper_32_bits(offset));
313 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 313 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
314 OUT_RING (chan, 0x74b1e000); 314 OUT_RING (chan, 0x74b1e000);
@@ -882,7 +882,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
882 drm_mode_crtc_set_gamma_size(crtc, 256); 882 drm_mode_crtc_set_gamma_size(crtc, 256);
883 883
884 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, 884 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
885 0, 0x0000, &nv_crtc->cursor.nvbo); 885 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
886 if (!ret) { 886 if (!ret) {
887 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 887 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
888 if (!ret) 888 if (!ret)
@@ -895,7 +895,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
895 goto out; 895 goto out;
896 896
897 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, 897 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
898 0, 0x0000, &nv_crtc->lut.nvbo); 898 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
899 if (!ret) { 899 if (!ret) {
900 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); 900 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
901 if (!ret) 901 if (!ret)
@@ -2030,7 +2030,7 @@ nvd0_display_create(struct drm_device *dev)
2030 2030
2031 /* small shared memory area we use for notifiers and semaphores */ 2031 /* small shared memory area we use for notifiers and semaphores */
2032 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2032 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2033 0, 0x0000, &disp->sync); 2033 0, 0x0000, NULL, &disp->sync);
2034 if (!ret) { 2034 if (!ret) {
2035 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); 2035 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
2036 if (!ret) 2036 if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
new file mode 100644
index 000000000000..1855ecbd843b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -0,0 +1,423 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31#define NVE0_FIFO_ENGINE_NUM 32
32
33static void nve0_fifo_isr(struct drm_device *);
34
35struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nve0_fifo_priv {
41 struct nouveau_fifo_priv base;
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48};
49
50struct nve0_fifo_chan {
51 struct nouveau_fifo_chan base;
52 u32 engine;
53};
54
55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
79 for (i = 0, p = 0; i < priv->base.channels; i++) {
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93}
94
95static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i;
106
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
109 return -ENOMEM;
110
111 fctx->engine = 0; /* PGRAPH */
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129 upper_32_bits(ib_virt));
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146error:
147 if (ret)
148 priv->base.base.context_del(chan, engine);
149 return ret;
150}
151
152static void
153nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154{
155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
156 struct drm_device *dev = chan->dev;
157
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162 nve0_fifo_playlist_update(dev, fctx->engine);
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
172}
173
174static int
175nve0_fifo_init(struct drm_device *dev, int engine)
176{
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
207 continue;
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217}
218
219static int
220nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221{
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240}
241
242struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244};
245
246struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257};
258
259struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261};
262
263struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265};
266
267struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271};
272
273static void
274nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275{
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295}
296
297static void
298nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
299{
300 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
301 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
302 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
303 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
304 u32 subc = (addr & 0x00070000);
305 u32 mthd = (addr & 0x00003ffc);
306
307 NV_INFO(dev, "PSUBFIFO %d:", unit);
308 nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
309 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
310 unit, chid, subc, mthd, data);
311
312 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
313 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
314}
315
316static void
317nve0_fifo_isr(struct drm_device *dev)
318{
319 u32 stat = nv_rd32(dev, 0x002100);
320
321 if (stat & 0x00000100) {
322 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
323 nv_wr32(dev, 0x002100, 0x00000100);
324 stat &= ~0x00000100;
325 }
326
327 if (stat & 0x10000000) {
328 u32 units = nv_rd32(dev, 0x00259c);
329 u32 u = units;
330
331 while (u) {
332 int i = ffs(u) - 1;
333 nve0_fifo_isr_vm_fault(dev, i);
334 u &= ~(1 << i);
335 }
336
337 nv_wr32(dev, 0x00259c, units);
338 stat &= ~0x10000000;
339 }
340
341 if (stat & 0x20000000) {
342 u32 units = nv_rd32(dev, 0x0025a0);
343 u32 u = units;
344
345 while (u) {
346 int i = ffs(u) - 1;
347 nve0_fifo_isr_subfifo_intr(dev, i);
348 u &= ~(1 << i);
349 }
350
351 nv_wr32(dev, 0x0025a0, units);
352 stat &= ~0x20000000;
353 }
354
355 if (stat & 0x40000000) {
356 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
357 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
358 stat &= ~0x40000000;
359 }
360
361 if (stat) {
362 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
363 nv_wr32(dev, 0x002100, stat);
364 nv_wr32(dev, 0x002140, 0);
365 }
366}
367
368static void
369nve0_fifo_destroy(struct drm_device *dev, int engine)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
373 int i;
374
375 nouveau_vm_put(&priv->user.bar);
376 nouveau_gpuobj_ref(NULL, &priv->user.mem);
377
378 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
379 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
380 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
381 }
382
383 dev_priv->eng[engine] = NULL;
384 kfree(priv);
385}
386
387int
388nve0_fifo_create(struct drm_device *dev)
389{
390 struct drm_nouveau_private *dev_priv = dev->dev_private;
391 struct nve0_fifo_priv *priv;
392 int ret;
393
394 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
395 if (!priv)
396 return -ENOMEM;
397
398 priv->base.base.destroy = nve0_fifo_destroy;
399 priv->base.base.init = nve0_fifo_init;
400 priv->base.base.fini = nve0_fifo_fini;
401 priv->base.base.context_new = nve0_fifo_context_new;
402 priv->base.base.context_del = nve0_fifo_context_del;
403 priv->base.channels = 4096;
404 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
405
406 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
407 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
408 if (ret)
409 goto error;
410
411 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
412 12, NV_MEM_ACCESS_RW, &priv->user.bar);
413 if (ret)
414 goto error;
415
416 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
417
418 nouveau_irq_register(dev, 8, nve0_fifo_isr);
419error:
420 if (ret)
421 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
422 return ret;
423}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
new file mode 100644
index 000000000000..8a8051b68f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -0,0 +1,831 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nve0_graph.h"
35
36static void
37nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nve0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nve0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
59
60static int
61nve0_graph_load_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64
65 nv_wr32(dev, 0x409840, 0x00000030);
66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
67 nv_wr32(dev, 0x409504, 0x00000003);
68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
70
71 return 0;
72}
73
74static int
75nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
76{
77 nv_wr32(dev, 0x409840, 0x00000003);
78 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
79 nv_wr32(dev, 0x409504, 0x00000009);
80 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
82 return -EBUSY;
83 }
84
85 return 0;
86}
87
88static int
89nve0_graph_construct_context(struct nouveau_channel *chan)
90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct drm_device *dev = chan->dev;
95 int ret, i;
96 u32 *ctx;
97
98 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
99 if (!ctx)
100 return -ENOMEM;
101
102 nve0_graph_load_context(chan);
103
104 nv_wo32(grch->grctx, 0x1c, 1);
105 nv_wo32(grch->grctx, 0x20, 0);
106 nv_wo32(grch->grctx, 0x28, 0);
107 nv_wo32(grch->grctx, 0x2c, 0);
108 dev_priv->engine.instmem.flush(dev);
109
110 ret = nve0_grctx_generate(chan);
111 if (ret)
112 goto err;
113
114 ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
115 if (ret)
116 goto err;
117
118 for (i = 0; i < priv->grctx_size; i += 4)
119 ctx[i / 4] = nv_ro32(grch->grctx, i);
120
121 priv->grctx_vals = ctx;
122 return 0;
123
124err:
125 kfree(ctx);
126 return ret;
127}
128
129static int
130nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
131{
132 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
133 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
134 struct drm_device *dev = chan->dev;
135 u32 magic[GPC_MAX][2];
136 u16 offset = 0x0000;
137 int gpc;
138 int ret;
139
140 ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
141 &grch->unk408004);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
146 &grch->unk40800c);
147 if (ret)
148 return ret;
149
150 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
151 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
152 &grch->unk418810);
153 if (ret)
154 return ret;
155
156 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
157 &grch->mmio);
158 if (ret)
159 return ret;
160
161#define mmio(r,v) do { \
162 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
163 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
164 grch->mmio_nr++; \
165} while (0)
166 mmio(0x40800c, grch->unk40800c->linst >> 8);
167 mmio(0x408010, 0x80000000);
168 mmio(0x419004, grch->unk40800c->linst >> 8);
169 mmio(0x419008, 0x00000000);
170 mmio(0x4064cc, 0x80000000);
171 mmio(0x408004, grch->unk408004->linst >> 8);
172 mmio(0x408008, 0x80000030);
173 mmio(0x418808, grch->unk408004->linst >> 8);
174 mmio(0x41880c, 0x80000030);
175 mmio(0x4064c8, 0x01800600);
176 mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
177 mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
178 mmio(0x405830, 0x02180648);
179 mmio(0x4064c4, 0x0192ffff);
180
181 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
182 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
183 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
184 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
185 magic[gpc][1] = 0x00000000 | (magic1 << 16);
186 offset += 0x0324 * priv->tpc_nr[gpc];
187 }
188
189 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
190 mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
191 mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
192 offset += 0x07ff * priv->tpc_nr[gpc];
193 }
194
195 mmio(0x17e91c, 0x06060609);
196 mmio(0x17e920, 0x00090a05);
197#undef mmio
198 return 0;
199}
200
201static int
202nve0_graph_context_new(struct nouveau_channel *chan, int engine)
203{
204 struct drm_device *dev = chan->dev;
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
207 struct nve0_graph_priv *priv = nv_engine(dev, engine);
208 struct nve0_graph_chan *grch;
209 struct nouveau_gpuobj *grctx;
210 int ret, i;
211
212 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
213 if (!grch)
214 return -ENOMEM;
215 chan->engctx[NVOBJ_ENGINE_GR] = grch;
216
217 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
218 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
219 &grch->grctx);
220 if (ret)
221 goto error;
222 grctx = grch->grctx;
223
224 ret = nve0_graph_create_context_mmio_list(chan);
225 if (ret)
226 goto error;
227
228 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
229 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
230 pinstmem->flush(dev);
231
232 if (!priv->grctx_vals) {
233 ret = nve0_graph_construct_context(chan);
234 if (ret)
235 goto error;
236 }
237
238 for (i = 0; i < priv->grctx_size; i += 4)
239 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
240 nv_wo32(grctx, 0xf4, 0);
241 nv_wo32(grctx, 0xf8, 0);
242 nv_wo32(grctx, 0x10, grch->mmio_nr);
243 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
244 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
245 nv_wo32(grctx, 0x1c, 1);
246 nv_wo32(grctx, 0x20, 0);
247 nv_wo32(grctx, 0x28, 0);
248 nv_wo32(grctx, 0x2c, 0);
249
250 pinstmem->flush(dev);
251 return 0;
252
253error:
254 priv->base.context_del(chan, engine);
255 return ret;
256}
257
258static void
259nve0_graph_context_del(struct nouveau_channel *chan, int engine)
260{
261 struct nve0_graph_chan *grch = chan->engctx[engine];
262
263 nouveau_gpuobj_ref(NULL, &grch->mmio);
264 nouveau_gpuobj_ref(NULL, &grch->unk418810);
265 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
266 nouveau_gpuobj_ref(NULL, &grch->unk408004);
267 nouveau_gpuobj_ref(NULL, &grch->grctx);
268 chan->engctx[engine] = NULL;
269}
270
271static int
272nve0_graph_object_new(struct nouveau_channel *chan, int engine,
273 u32 handle, u16 class)
274{
275 return 0;
276}
277
278static int
279nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
280{
281 return 0;
282}
283
284static void
285nve0_graph_init_obj418880(struct drm_device *dev)
286{
287 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
288 int i;
289
290 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
291 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
292 for (i = 0; i < 4; i++)
293 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
294 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
295 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
296}
297
298static void
299nve0_graph_init_regs(struct drm_device *dev)
300{
301 nv_wr32(dev, 0x400080, 0x003083c2);
302 nv_wr32(dev, 0x400088, 0x0001ffe7);
303 nv_wr32(dev, 0x40008c, 0x00000000);
304 nv_wr32(dev, 0x400090, 0x00000030);
305 nv_wr32(dev, 0x40013c, 0x003901f7);
306 nv_wr32(dev, 0x400140, 0x00000100);
307 nv_wr32(dev, 0x400144, 0x00000000);
308 nv_wr32(dev, 0x400148, 0x00000110);
309 nv_wr32(dev, 0x400138, 0x00000000);
310 nv_wr32(dev, 0x400130, 0x00000000);
311 nv_wr32(dev, 0x400134, 0x00000000);
312 nv_wr32(dev, 0x400124, 0x00000002);
313}
314
315static void
316nve0_graph_init_units(struct drm_device *dev)
317{
318 nv_wr32(dev, 0x409ffc, 0x00000000);
319 nv_wr32(dev, 0x409c14, 0x00003e3e);
320 nv_wr32(dev, 0x409c24, 0x000f0000);
321
322 nv_wr32(dev, 0x404000, 0xc0000000);
323 nv_wr32(dev, 0x404600, 0xc0000000);
324 nv_wr32(dev, 0x408030, 0xc0000000);
325 nv_wr32(dev, 0x404490, 0xc0000000);
326 nv_wr32(dev, 0x406018, 0xc0000000);
327 nv_wr32(dev, 0x407020, 0xc0000000);
328 nv_wr32(dev, 0x405840, 0xc0000000);
329 nv_wr32(dev, 0x405844, 0x00ffffff);
330
331 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
332 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
333
334}
335
336static void
337nve0_graph_init_gpc_0(struct drm_device *dev)
338{
339 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
340 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
341 u32 data[TPC_MAX / 8];
342 u8 tpcnr[GPC_MAX];
343 int i, gpc, tpc;
344
345 nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
346
347 memset(data, 0x00, sizeof(data));
348 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
349 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
350 do {
351 gpc = (gpc + 1) % priv->gpc_nr;
352 } while (!tpcnr[gpc]);
353 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
354
355 data[i / 8] |= tpc << ((i % 8) * 4);
356 }
357
358 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
359 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
360 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
361 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
362
363 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
364 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
365 priv->tpc_nr[gpc]);
366 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
367 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
368 }
369
370 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
371 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
372}
373
374static void
375nve0_graph_init_gpc_1(struct drm_device *dev)
376{
377 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
378 int gpc, tpc;
379
380 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
381 nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
382 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
383 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
384 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
385 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
386 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
387 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
388 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
389 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
390 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
391 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
392 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
393 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
394 }
395 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
396 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
397 }
398}
399
400static void
401nve0_graph_init_rop(struct drm_device *dev)
402{
403 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
404 int rop;
405
406 for (rop = 0; rop < priv->rop_nr; rop++) {
407 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
408 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
409 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
410 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
411 }
412}
413
414static void
415nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
416 struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
417{
418 int i;
419
420 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
421 for (i = 0; i < data->size / 4; i++)
422 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
423
424 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
425 for (i = 0; i < code->size / 4; i++) {
426 if ((i & 0x3f) == 0)
427 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
428 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
429 }
430}
431
432static int
433nve0_graph_init_ctxctl(struct drm_device *dev)
434{
435 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
436 u32 r000260;
437
438 /* load fuc microcode */
439 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
440 nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
441 nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
442 nv_wr32(dev, 0x000260, r000260);
443
444 /* start both of them running */
445 nv_wr32(dev, 0x409840, 0xffffffff);
446 nv_wr32(dev, 0x41a10c, 0x00000000);
447 nv_wr32(dev, 0x40910c, 0x00000000);
448 nv_wr32(dev, 0x41a100, 0x00000002);
449 nv_wr32(dev, 0x409100, 0x00000002);
450 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
451 NV_INFO(dev, "0x409800 wait failed\n");
452
453 nv_wr32(dev, 0x409840, 0xffffffff);
454 nv_wr32(dev, 0x409500, 0x7fffffff);
455 nv_wr32(dev, 0x409504, 0x00000021);
456
457 nv_wr32(dev, 0x409840, 0xffffffff);
458 nv_wr32(dev, 0x409500, 0x00000000);
459 nv_wr32(dev, 0x409504, 0x00000010);
460 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
461 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
462 return -EBUSY;
463 }
464 priv->grctx_size = nv_rd32(dev, 0x409800);
465
466 nv_wr32(dev, 0x409840, 0xffffffff);
467 nv_wr32(dev, 0x409500, 0x00000000);
468 nv_wr32(dev, 0x409504, 0x00000016);
469 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
470 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
471 return -EBUSY;
472 }
473
474 nv_wr32(dev, 0x409840, 0xffffffff);
475 nv_wr32(dev, 0x409500, 0x00000000);
476 nv_wr32(dev, 0x409504, 0x00000025);
477 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
478 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
479 return -EBUSY;
480 }
481
482 nv_wr32(dev, 0x409800, 0x00000000);
483 nv_wr32(dev, 0x409500, 0x00000001);
484 nv_wr32(dev, 0x409504, 0x00000030);
485 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
486 NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
487 return -EBUSY;
488 }
489
490 nv_wr32(dev, 0x409810, 0xb00095c8);
491 nv_wr32(dev, 0x409800, 0x00000000);
492 nv_wr32(dev, 0x409500, 0x00000001);
493 nv_wr32(dev, 0x409504, 0x00000031);
494 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
495 NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
496 return -EBUSY;
497 }
498
499 nv_wr32(dev, 0x409810, 0x00080420);
500 nv_wr32(dev, 0x409800, 0x00000000);
501 nv_wr32(dev, 0x409500, 0x00000001);
502 nv_wr32(dev, 0x409504, 0x00000032);
503 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
504 NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
505 return -EBUSY;
506 }
507
508 nv_wr32(dev, 0x409614, 0x00000070);
509 nv_wr32(dev, 0x409614, 0x00000770);
510 nv_wr32(dev, 0x40802c, 0x00000001);
511 return 0;
512}
513
514static int
515nve0_graph_init(struct drm_device *dev, int engine)
516{
517 int ret;
518
519 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
520 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
521
522 nve0_graph_init_obj418880(dev);
523 nve0_graph_init_regs(dev);
524 nve0_graph_init_gpc_0(dev);
525
526 nv_wr32(dev, 0x400500, 0x00010001);
527 nv_wr32(dev, 0x400100, 0xffffffff);
528 nv_wr32(dev, 0x40013c, 0xffffffff);
529
530 nve0_graph_init_units(dev);
531 nve0_graph_init_gpc_1(dev);
532 nve0_graph_init_rop(dev);
533
534 nv_wr32(dev, 0x400108, 0xffffffff);
535 nv_wr32(dev, 0x400138, 0xffffffff);
536 nv_wr32(dev, 0x400118, 0xffffffff);
537 nv_wr32(dev, 0x400130, 0xffffffff);
538 nv_wr32(dev, 0x40011c, 0xffffffff);
539 nv_wr32(dev, 0x400134, 0xffffffff);
540 nv_wr32(dev, 0x400054, 0x34ce3464);
541
542 ret = nve0_graph_init_ctxctl(dev);
543 if (ret)
544 return ret;
545
546 return 0;
547}
548
549int
550nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
551{
552 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
553 struct drm_nouveau_private *dev_priv = dev->dev_private;
554 struct nouveau_channel *chan;
555 unsigned long flags;
556 int i;
557
558 spin_lock_irqsave(&dev_priv->channels.lock, flags);
559 for (i = 0; i < pfifo->channels; i++) {
560 chan = dev_priv->channels.ptr[i];
561 if (!chan || !chan->ramin)
562 continue;
563
564 if (inst == chan->ramin->vinst)
565 break;
566 }
567 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
568 return i;
569}
570
571static void
572nve0_graph_ctxctl_isr(struct drm_device *dev)
573{
574 u32 ustat = nv_rd32(dev, 0x409c18);
575
576 if (ustat & 0x00000001)
577 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
578 if (ustat & 0x00080000)
579 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
580 if (ustat & ~0x00080001)
581 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
582
583 nve0_graph_ctxctl_debug(dev);
584 nv_wr32(dev, 0x409c20, ustat);
585}
586
587static void
588nve0_graph_trap_isr(struct drm_device *dev, int chid)
589{
590 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
591 u32 trap = nv_rd32(dev, 0x400108);
592 int rop;
593
594 if (trap & 0x00000001) {
595 u32 stat = nv_rd32(dev, 0x404000);
596 NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
597 nv_wr32(dev, 0x404000, 0xc0000000);
598 nv_wr32(dev, 0x400108, 0x00000001);
599 trap &= ~0x00000001;
600 }
601
602 if (trap & 0x00000010) {
603 u32 stat = nv_rd32(dev, 0x405840);
604 NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
605 nv_wr32(dev, 0x405840, 0xc0000000);
606 nv_wr32(dev, 0x400108, 0x00000010);
607 trap &= ~0x00000010;
608 }
609
610 if (trap & 0x02000000) {
611 for (rop = 0; rop < priv->rop_nr; rop++) {
612 u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
613 u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
614 NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
615 rop, chid, statz, statc);
616 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
617 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
618 }
619 nv_wr32(dev, 0x400108, 0x02000000);
620 trap &= ~0x02000000;
621 }
622
623 if (trap) {
624 NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
625 nv_wr32(dev, 0x400108, trap);
626 }
627}
628
629static void
630nve0_graph_isr(struct drm_device *dev)
631{
632 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
633 u32 chid = nve0_graph_isr_chid(dev, inst);
634 u32 stat = nv_rd32(dev, 0x400100);
635 u32 addr = nv_rd32(dev, 0x400704);
636 u32 mthd = (addr & 0x00003ffc);
637 u32 subc = (addr & 0x00070000) >> 16;
638 u32 data = nv_rd32(dev, 0x400708);
639 u32 code = nv_rd32(dev, 0x400110);
640 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
641
642 if (stat & 0x00000010) {
643 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
644 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
645 "subc %d class 0x%04x mthd 0x%04x "
646 "data 0x%08x\n",
647 chid, inst, subc, class, mthd, data);
648 }
649 nv_wr32(dev, 0x400100, 0x00000010);
650 stat &= ~0x00000010;
651 }
652
653 if (stat & 0x00000020) {
654 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
655 "class 0x%04x mthd 0x%04x data 0x%08x\n",
656 chid, inst, subc, class, mthd, data);
657 nv_wr32(dev, 0x400100, 0x00000020);
658 stat &= ~0x00000020;
659 }
660
661 if (stat & 0x00100000) {
662 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
663 nouveau_enum_print(nv50_data_error_names, code);
664 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
665 "mthd 0x%04x data 0x%08x\n",
666 chid, inst, subc, class, mthd, data);
667 nv_wr32(dev, 0x400100, 0x00100000);
668 stat &= ~0x00100000;
669 }
670
671 if (stat & 0x00200000) {
672 nve0_graph_trap_isr(dev, chid);
673 nv_wr32(dev, 0x400100, 0x00200000);
674 stat &= ~0x00200000;
675 }
676
677 if (stat & 0x00080000) {
678 nve0_graph_ctxctl_isr(dev);
679 nv_wr32(dev, 0x400100, 0x00080000);
680 stat &= ~0x00080000;
681 }
682
683 if (stat) {
684 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
685 nv_wr32(dev, 0x400100, stat);
686 }
687
688 nv_wr32(dev, 0x400500, 0x00010001);
689}
690
691static int
692nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
693 struct nve0_graph_fuc *fuc)
694{
695 struct drm_nouveau_private *dev_priv = dev->dev_private;
696 const struct firmware *fw;
697 char f[32];
698 int ret;
699
700 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
701 ret = request_firmware(&fw, f, &dev->pdev->dev);
702 if (ret)
703 return ret;
704
705 fuc->size = fw->size;
706 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
707 release_firmware(fw);
708 return (fuc->data != NULL) ? 0 : -ENOMEM;
709}
710
711static void
712nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
713{
714 if (fuc->data) {
715 kfree(fuc->data);
716 fuc->data = NULL;
717 }
718}
719
720static void
721nve0_graph_destroy(struct drm_device *dev, int engine)
722{
723 struct nve0_graph_priv *priv = nv_engine(dev, engine);
724
725 nve0_graph_destroy_fw(&priv->fuc409c);
726 nve0_graph_destroy_fw(&priv->fuc409d);
727 nve0_graph_destroy_fw(&priv->fuc41ac);
728 nve0_graph_destroy_fw(&priv->fuc41ad);
729
730 nouveau_irq_unregister(dev, 12);
731
732 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
733 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
734
735 if (priv->grctx_vals)
736 kfree(priv->grctx_vals);
737
738 NVOBJ_ENGINE_DEL(dev, GR);
739 kfree(priv);
740}
741
742int
743nve0_graph_create(struct drm_device *dev)
744{
745 struct drm_nouveau_private *dev_priv = dev->dev_private;
746 struct nve0_graph_priv *priv;
747 int ret, gpc, i;
748 u32 kepler;
749
750 kepler = nve0_graph_class(dev);
751 if (!kepler) {
752 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
753 return 0;
754 }
755
756 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
757 if (!priv)
758 return -ENOMEM;
759
760 priv->base.destroy = nve0_graph_destroy;
761 priv->base.init = nve0_graph_init;
762 priv->base.fini = nve0_graph_fini;
763 priv->base.context_new = nve0_graph_context_new;
764 priv->base.context_del = nve0_graph_context_del;
765 priv->base.object_new = nve0_graph_object_new;
766
767 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
768 nouveau_irq_register(dev, 12, nve0_graph_isr);
769
770 NV_INFO(dev, "PGRAPH: using external firmware\n");
771 if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
772 nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
773 nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
774 nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
775 ret = 0;
776 goto error;
777 }
778
779 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
780 if (ret)
781 goto error;
782
783 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
784 if (ret)
785 goto error;
786
787 for (i = 0; i < 0x1000; i += 4) {
788 nv_wo32(priv->unk4188b4, i, 0x00000010);
789 nv_wo32(priv->unk4188b8, i, 0x00000010);
790 }
791
792 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
793 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
794 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
795 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
796 priv->tpc_total += priv->tpc_nr[gpc];
797 }
798
799 switch (dev_priv->chipset) {
800 case 0xe4:
801 if (priv->tpc_total == 8)
802 priv->magic_not_rop_nr = 3;
803 else
804 if (priv->tpc_total == 7)
805 priv->magic_not_rop_nr = 1;
806 break;
807 case 0xe7:
808 priv->magic_not_rop_nr = 1;
809 break;
810 default:
811 break;
812 }
813
814 if (!priv->magic_not_rop_nr) {
815 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
816 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
817 priv->tpc_nr[3], priv->rop_nr);
818 priv->magic_not_rop_nr = 0x00;
819 }
820
821 NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
825 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
826 return 0;
827
828error:
829 nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
830 return ret;
831}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
new file mode 100644
index 000000000000..2ba70449ba01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVE0_GRAPH_H__
26#define __NVE0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TPC_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nve0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nve0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nve0_graph_fuc fuc409c;
46 struct nve0_graph_fuc fuc409d;
47 struct nve0_graph_fuc fuc41ac;
48 struct nve0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tpc_nr[GPC_MAX];
53 u8 tpc_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nve0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nve0_grctx_generate(struct nouveau_channel *);
73
74/* nve0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nve0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xe4:
82 case 0xe7:
83 return 0xa097;
84 default:
85 return 0;
86 }
87}
88
89#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
new file mode 100644
index 000000000000..d8cb360e92c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_grctx.c
@@ -0,0 +1,2777 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nve0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 0x00000002) {}
36}
37
38static void
39nve0_grctx_generate_icmd(struct drm_device *dev)
40{
41 nv_wr32(dev, 0x400208, 0x80000000);
42 nv_icmd(dev, 0x001000, 0x00000004);
43 nv_icmd(dev, 0x000039, 0x00000000);
44 nv_icmd(dev, 0x00003a, 0x00000000);
45 nv_icmd(dev, 0x00003b, 0x00000000);
46 nv_icmd(dev, 0x0000a9, 0x0000ffff);
47 nv_icmd(dev, 0x000038, 0x0fac6881);
48 nv_icmd(dev, 0x00003d, 0x00000001);
49 nv_icmd(dev, 0x0000e8, 0x00000400);
50 nv_icmd(dev, 0x0000e9, 0x00000400);
51 nv_icmd(dev, 0x0000ea, 0x00000400);
52 nv_icmd(dev, 0x0000eb, 0x00000400);
53 nv_icmd(dev, 0x0000ec, 0x00000400);
54 nv_icmd(dev, 0x0000ed, 0x00000400);
55 nv_icmd(dev, 0x0000ee, 0x00000400);
56 nv_icmd(dev, 0x0000ef, 0x00000400);
57 nv_icmd(dev, 0x000078, 0x00000300);
58 nv_icmd(dev, 0x000079, 0x00000300);
59 nv_icmd(dev, 0x00007a, 0x00000300);
60 nv_icmd(dev, 0x00007b, 0x00000300);
61 nv_icmd(dev, 0x00007c, 0x00000300);
62 nv_icmd(dev, 0x00007d, 0x00000300);
63 nv_icmd(dev, 0x00007e, 0x00000300);
64 nv_icmd(dev, 0x00007f, 0x00000300);
65 nv_icmd(dev, 0x000050, 0x00000011);
66 nv_icmd(dev, 0x000058, 0x00000008);
67 nv_icmd(dev, 0x000059, 0x00000008);
68 nv_icmd(dev, 0x00005a, 0x00000008);
69 nv_icmd(dev, 0x00005b, 0x00000008);
70 nv_icmd(dev, 0x00005c, 0x00000008);
71 nv_icmd(dev, 0x00005d, 0x00000008);
72 nv_icmd(dev, 0x00005e, 0x00000008);
73 nv_icmd(dev, 0x00005f, 0x00000008);
74 nv_icmd(dev, 0x000208, 0x00000001);
75 nv_icmd(dev, 0x000209, 0x00000001);
76 nv_icmd(dev, 0x00020a, 0x00000001);
77 nv_icmd(dev, 0x00020b, 0x00000001);
78 nv_icmd(dev, 0x00020c, 0x00000001);
79 nv_icmd(dev, 0x00020d, 0x00000001);
80 nv_icmd(dev, 0x00020e, 0x00000001);
81 nv_icmd(dev, 0x00020f, 0x00000001);
82 nv_icmd(dev, 0x000081, 0x00000001);
83 nv_icmd(dev, 0x000085, 0x00000004);
84 nv_icmd(dev, 0x000088, 0x00000400);
85 nv_icmd(dev, 0x000090, 0x00000300);
86 nv_icmd(dev, 0x000098, 0x00001001);
87 nv_icmd(dev, 0x0000e3, 0x00000001);
88 nv_icmd(dev, 0x0000da, 0x00000001);
89 nv_icmd(dev, 0x0000f8, 0x00000003);
90 nv_icmd(dev, 0x0000fa, 0x00000001);
91 nv_icmd(dev, 0x00009f, 0x0000ffff);
92 nv_icmd(dev, 0x0000a0, 0x0000ffff);
93 nv_icmd(dev, 0x0000a1, 0x0000ffff);
94 nv_icmd(dev, 0x0000a2, 0x0000ffff);
95 nv_icmd(dev, 0x0000b1, 0x00000001);
96 nv_icmd(dev, 0x0000ad, 0x0000013e);
97 nv_icmd(dev, 0x0000e1, 0x00000010);
98 nv_icmd(dev, 0x000290, 0x00000000);
99 nv_icmd(dev, 0x000291, 0x00000000);
100 nv_icmd(dev, 0x000292, 0x00000000);
101 nv_icmd(dev, 0x000293, 0x00000000);
102 nv_icmd(dev, 0x000294, 0x00000000);
103 nv_icmd(dev, 0x000295, 0x00000000);
104 nv_icmd(dev, 0x000296, 0x00000000);
105 nv_icmd(dev, 0x000297, 0x00000000);
106 nv_icmd(dev, 0x000298, 0x00000000);
107 nv_icmd(dev, 0x000299, 0x00000000);
108 nv_icmd(dev, 0x00029a, 0x00000000);
109 nv_icmd(dev, 0x00029b, 0x00000000);
110 nv_icmd(dev, 0x00029c, 0x00000000);
111 nv_icmd(dev, 0x00029d, 0x00000000);
112 nv_icmd(dev, 0x00029e, 0x00000000);
113 nv_icmd(dev, 0x00029f, 0x00000000);
114 nv_icmd(dev, 0x0003b0, 0x00000000);
115 nv_icmd(dev, 0x0003b1, 0x00000000);
116 nv_icmd(dev, 0x0003b2, 0x00000000);
117 nv_icmd(dev, 0x0003b3, 0x00000000);
118 nv_icmd(dev, 0x0003b4, 0x00000000);
119 nv_icmd(dev, 0x0003b5, 0x00000000);
120 nv_icmd(dev, 0x0003b6, 0x00000000);
121 nv_icmd(dev, 0x0003b7, 0x00000000);
122 nv_icmd(dev, 0x0003b8, 0x00000000);
123 nv_icmd(dev, 0x0003b9, 0x00000000);
124 nv_icmd(dev, 0x0003ba, 0x00000000);
125 nv_icmd(dev, 0x0003bb, 0x00000000);
126 nv_icmd(dev, 0x0003bc, 0x00000000);
127 nv_icmd(dev, 0x0003bd, 0x00000000);
128 nv_icmd(dev, 0x0003be, 0x00000000);
129 nv_icmd(dev, 0x0003bf, 0x00000000);
130 nv_icmd(dev, 0x0002a0, 0x00000000);
131 nv_icmd(dev, 0x0002a1, 0x00000000);
132 nv_icmd(dev, 0x0002a2, 0x00000000);
133 nv_icmd(dev, 0x0002a3, 0x00000000);
134 nv_icmd(dev, 0x0002a4, 0x00000000);
135 nv_icmd(dev, 0x0002a5, 0x00000000);
136 nv_icmd(dev, 0x0002a6, 0x00000000);
137 nv_icmd(dev, 0x0002a7, 0x00000000);
138 nv_icmd(dev, 0x0002a8, 0x00000000);
139 nv_icmd(dev, 0x0002a9, 0x00000000);
140 nv_icmd(dev, 0x0002aa, 0x00000000);
141 nv_icmd(dev, 0x0002ab, 0x00000000);
142 nv_icmd(dev, 0x0002ac, 0x00000000);
143 nv_icmd(dev, 0x0002ad, 0x00000000);
144 nv_icmd(dev, 0x0002ae, 0x00000000);
145 nv_icmd(dev, 0x0002af, 0x00000000);
146 nv_icmd(dev, 0x000420, 0x00000000);
147 nv_icmd(dev, 0x000421, 0x00000000);
148 nv_icmd(dev, 0x000422, 0x00000000);
149 nv_icmd(dev, 0x000423, 0x00000000);
150 nv_icmd(dev, 0x000424, 0x00000000);
151 nv_icmd(dev, 0x000425, 0x00000000);
152 nv_icmd(dev, 0x000426, 0x00000000);
153 nv_icmd(dev, 0x000427, 0x00000000);
154 nv_icmd(dev, 0x000428, 0x00000000);
155 nv_icmd(dev, 0x000429, 0x00000000);
156 nv_icmd(dev, 0x00042a, 0x00000000);
157 nv_icmd(dev, 0x00042b, 0x00000000);
158 nv_icmd(dev, 0x00042c, 0x00000000);
159 nv_icmd(dev, 0x00042d, 0x00000000);
160 nv_icmd(dev, 0x00042e, 0x00000000);
161 nv_icmd(dev, 0x00042f, 0x00000000);
162 nv_icmd(dev, 0x0002b0, 0x00000000);
163 nv_icmd(dev, 0x0002b1, 0x00000000);
164 nv_icmd(dev, 0x0002b2, 0x00000000);
165 nv_icmd(dev, 0x0002b3, 0x00000000);
166 nv_icmd(dev, 0x0002b4, 0x00000000);
167 nv_icmd(dev, 0x0002b5, 0x00000000);
168 nv_icmd(dev, 0x0002b6, 0x00000000);
169 nv_icmd(dev, 0x0002b7, 0x00000000);
170 nv_icmd(dev, 0x0002b8, 0x00000000);
171 nv_icmd(dev, 0x0002b9, 0x00000000);
172 nv_icmd(dev, 0x0002ba, 0x00000000);
173 nv_icmd(dev, 0x0002bb, 0x00000000);
174 nv_icmd(dev, 0x0002bc, 0x00000000);
175 nv_icmd(dev, 0x0002bd, 0x00000000);
176 nv_icmd(dev, 0x0002be, 0x00000000);
177 nv_icmd(dev, 0x0002bf, 0x00000000);
178 nv_icmd(dev, 0x000430, 0x00000000);
179 nv_icmd(dev, 0x000431, 0x00000000);
180 nv_icmd(dev, 0x000432, 0x00000000);
181 nv_icmd(dev, 0x000433, 0x00000000);
182 nv_icmd(dev, 0x000434, 0x00000000);
183 nv_icmd(dev, 0x000435, 0x00000000);
184 nv_icmd(dev, 0x000436, 0x00000000);
185 nv_icmd(dev, 0x000437, 0x00000000);
186 nv_icmd(dev, 0x000438, 0x00000000);
187 nv_icmd(dev, 0x000439, 0x00000000);
188 nv_icmd(dev, 0x00043a, 0x00000000);
189 nv_icmd(dev, 0x00043b, 0x00000000);
190 nv_icmd(dev, 0x00043c, 0x00000000);
191 nv_icmd(dev, 0x00043d, 0x00000000);
192 nv_icmd(dev, 0x00043e, 0x00000000);
193 nv_icmd(dev, 0x00043f, 0x00000000);
194 nv_icmd(dev, 0x0002c0, 0x00000000);
195 nv_icmd(dev, 0x0002c1, 0x00000000);
196 nv_icmd(dev, 0x0002c2, 0x00000000);
197 nv_icmd(dev, 0x0002c3, 0x00000000);
198 nv_icmd(dev, 0x0002c4, 0x00000000);
199 nv_icmd(dev, 0x0002c5, 0x00000000);
200 nv_icmd(dev, 0x0002c6, 0x00000000);
201 nv_icmd(dev, 0x0002c7, 0x00000000);
202 nv_icmd(dev, 0x0002c8, 0x00000000);
203 nv_icmd(dev, 0x0002c9, 0x00000000);
204 nv_icmd(dev, 0x0002ca, 0x00000000);
205 nv_icmd(dev, 0x0002cb, 0x00000000);
206 nv_icmd(dev, 0x0002cc, 0x00000000);
207 nv_icmd(dev, 0x0002cd, 0x00000000);
208 nv_icmd(dev, 0x0002ce, 0x00000000);
209 nv_icmd(dev, 0x0002cf, 0x00000000);
210 nv_icmd(dev, 0x0004d0, 0x00000000);
211 nv_icmd(dev, 0x0004d1, 0x00000000);
212 nv_icmd(dev, 0x0004d2, 0x00000000);
213 nv_icmd(dev, 0x0004d3, 0x00000000);
214 nv_icmd(dev, 0x0004d4, 0x00000000);
215 nv_icmd(dev, 0x0004d5, 0x00000000);
216 nv_icmd(dev, 0x0004d6, 0x00000000);
217 nv_icmd(dev, 0x0004d7, 0x00000000);
218 nv_icmd(dev, 0x0004d8, 0x00000000);
219 nv_icmd(dev, 0x0004d9, 0x00000000);
220 nv_icmd(dev, 0x0004da, 0x00000000);
221 nv_icmd(dev, 0x0004db, 0x00000000);
222 nv_icmd(dev, 0x0004dc, 0x00000000);
223 nv_icmd(dev, 0x0004dd, 0x00000000);
224 nv_icmd(dev, 0x0004de, 0x00000000);
225 nv_icmd(dev, 0x0004df, 0x00000000);
226 nv_icmd(dev, 0x000720, 0x00000000);
227 nv_icmd(dev, 0x000721, 0x00000000);
228 nv_icmd(dev, 0x000722, 0x00000000);
229 nv_icmd(dev, 0x000723, 0x00000000);
230 nv_icmd(dev, 0x000724, 0x00000000);
231 nv_icmd(dev, 0x000725, 0x00000000);
232 nv_icmd(dev, 0x000726, 0x00000000);
233 nv_icmd(dev, 0x000727, 0x00000000);
234 nv_icmd(dev, 0x000728, 0x00000000);
235 nv_icmd(dev, 0x000729, 0x00000000);
236 nv_icmd(dev, 0x00072a, 0x00000000);
237 nv_icmd(dev, 0x00072b, 0x00000000);
238 nv_icmd(dev, 0x00072c, 0x00000000);
239 nv_icmd(dev, 0x00072d, 0x00000000);
240 nv_icmd(dev, 0x00072e, 0x00000000);
241 nv_icmd(dev, 0x00072f, 0x00000000);
242 nv_icmd(dev, 0x0008c0, 0x00000000);
243 nv_icmd(dev, 0x0008c1, 0x00000000);
244 nv_icmd(dev, 0x0008c2, 0x00000000);
245 nv_icmd(dev, 0x0008c3, 0x00000000);
246 nv_icmd(dev, 0x0008c4, 0x00000000);
247 nv_icmd(dev, 0x0008c5, 0x00000000);
248 nv_icmd(dev, 0x0008c6, 0x00000000);
249 nv_icmd(dev, 0x0008c7, 0x00000000);
250 nv_icmd(dev, 0x0008c8, 0x00000000);
251 nv_icmd(dev, 0x0008c9, 0x00000000);
252 nv_icmd(dev, 0x0008ca, 0x00000000);
253 nv_icmd(dev, 0x0008cb, 0x00000000);
254 nv_icmd(dev, 0x0008cc, 0x00000000);
255 nv_icmd(dev, 0x0008cd, 0x00000000);
256 nv_icmd(dev, 0x0008ce, 0x00000000);
257 nv_icmd(dev, 0x0008cf, 0x00000000);
258 nv_icmd(dev, 0x000890, 0x00000000);
259 nv_icmd(dev, 0x000891, 0x00000000);
260 nv_icmd(dev, 0x000892, 0x00000000);
261 nv_icmd(dev, 0x000893, 0x00000000);
262 nv_icmd(dev, 0x000894, 0x00000000);
263 nv_icmd(dev, 0x000895, 0x00000000);
264 nv_icmd(dev, 0x000896, 0x00000000);
265 nv_icmd(dev, 0x000897, 0x00000000);
266 nv_icmd(dev, 0x000898, 0x00000000);
267 nv_icmd(dev, 0x000899, 0x00000000);
268 nv_icmd(dev, 0x00089a, 0x00000000);
269 nv_icmd(dev, 0x00089b, 0x00000000);
270 nv_icmd(dev, 0x00089c, 0x00000000);
271 nv_icmd(dev, 0x00089d, 0x00000000);
272 nv_icmd(dev, 0x00089e, 0x00000000);
273 nv_icmd(dev, 0x00089f, 0x00000000);
274 nv_icmd(dev, 0x0008e0, 0x00000000);
275 nv_icmd(dev, 0x0008e1, 0x00000000);
276 nv_icmd(dev, 0x0008e2, 0x00000000);
277 nv_icmd(dev, 0x0008e3, 0x00000000);
278 nv_icmd(dev, 0x0008e4, 0x00000000);
279 nv_icmd(dev, 0x0008e5, 0x00000000);
280 nv_icmd(dev, 0x0008e6, 0x00000000);
281 nv_icmd(dev, 0x0008e7, 0x00000000);
282 nv_icmd(dev, 0x0008e8, 0x00000000);
283 nv_icmd(dev, 0x0008e9, 0x00000000);
284 nv_icmd(dev, 0x0008ea, 0x00000000);
285 nv_icmd(dev, 0x0008eb, 0x00000000);
286 nv_icmd(dev, 0x0008ec, 0x00000000);
287 nv_icmd(dev, 0x0008ed, 0x00000000);
288 nv_icmd(dev, 0x0008ee, 0x00000000);
289 nv_icmd(dev, 0x0008ef, 0x00000000);
290 nv_icmd(dev, 0x0008a0, 0x00000000);
291 nv_icmd(dev, 0x0008a1, 0x00000000);
292 nv_icmd(dev, 0x0008a2, 0x00000000);
293 nv_icmd(dev, 0x0008a3, 0x00000000);
294 nv_icmd(dev, 0x0008a4, 0x00000000);
295 nv_icmd(dev, 0x0008a5, 0x00000000);
296 nv_icmd(dev, 0x0008a6, 0x00000000);
297 nv_icmd(dev, 0x0008a7, 0x00000000);
298 nv_icmd(dev, 0x0008a8, 0x00000000);
299 nv_icmd(dev, 0x0008a9, 0x00000000);
300 nv_icmd(dev, 0x0008aa, 0x00000000);
301 nv_icmd(dev, 0x0008ab, 0x00000000);
302 nv_icmd(dev, 0x0008ac, 0x00000000);
303 nv_icmd(dev, 0x0008ad, 0x00000000);
304 nv_icmd(dev, 0x0008ae, 0x00000000);
305 nv_icmd(dev, 0x0008af, 0x00000000);
306 nv_icmd(dev, 0x0008f0, 0x00000000);
307 nv_icmd(dev, 0x0008f1, 0x00000000);
308 nv_icmd(dev, 0x0008f2, 0x00000000);
309 nv_icmd(dev, 0x0008f3, 0x00000000);
310 nv_icmd(dev, 0x0008f4, 0x00000000);
311 nv_icmd(dev, 0x0008f5, 0x00000000);
312 nv_icmd(dev, 0x0008f6, 0x00000000);
313 nv_icmd(dev, 0x0008f7, 0x00000000);
314 nv_icmd(dev, 0x0008f8, 0x00000000);
315 nv_icmd(dev, 0x0008f9, 0x00000000);
316 nv_icmd(dev, 0x0008fa, 0x00000000);
317 nv_icmd(dev, 0x0008fb, 0x00000000);
318 nv_icmd(dev, 0x0008fc, 0x00000000);
319 nv_icmd(dev, 0x0008fd, 0x00000000);
320 nv_icmd(dev, 0x0008fe, 0x00000000);
321 nv_icmd(dev, 0x0008ff, 0x00000000);
322 nv_icmd(dev, 0x00094c, 0x000000ff);
323 nv_icmd(dev, 0x00094d, 0xffffffff);
324 nv_icmd(dev, 0x00094e, 0x00000002);
325 nv_icmd(dev, 0x0002ec, 0x00000001);
326 nv_icmd(dev, 0x000303, 0x00000001);
327 nv_icmd(dev, 0x0002e6, 0x00000001);
328 nv_icmd(dev, 0x000466, 0x00000052);
329 nv_icmd(dev, 0x000301, 0x3f800000);
330 nv_icmd(dev, 0x000304, 0x30201000);
331 nv_icmd(dev, 0x000305, 0x70605040);
332 nv_icmd(dev, 0x000306, 0xb8a89888);
333 nv_icmd(dev, 0x000307, 0xf8e8d8c8);
334 nv_icmd(dev, 0x00030a, 0x00ffff00);
335 nv_icmd(dev, 0x00030b, 0x0000001a);
336 nv_icmd(dev, 0x00030c, 0x00000001);
337 nv_icmd(dev, 0x000318, 0x00000001);
338 nv_icmd(dev, 0x000340, 0x00000000);
339 nv_icmd(dev, 0x000375, 0x00000001);
340 nv_icmd(dev, 0x00037d, 0x00000006);
341 nv_icmd(dev, 0x0003a0, 0x00000002);
342 nv_icmd(dev, 0x0003aa, 0x00000001);
343 nv_icmd(dev, 0x0003a9, 0x00000001);
344 nv_icmd(dev, 0x000380, 0x00000001);
345 nv_icmd(dev, 0x000383, 0x00000011);
346 nv_icmd(dev, 0x000360, 0x00000040);
347 nv_icmd(dev, 0x000366, 0x00000000);
348 nv_icmd(dev, 0x000367, 0x00000000);
349 nv_icmd(dev, 0x000368, 0x00000fff);
350 nv_icmd(dev, 0x000370, 0x00000000);
351 nv_icmd(dev, 0x000371, 0x00000000);
352 nv_icmd(dev, 0x000372, 0x000fffff);
353 nv_icmd(dev, 0x00037a, 0x00000012);
354 nv_icmd(dev, 0x000619, 0x00000003);
355 nv_icmd(dev, 0x000811, 0x00000003);
356 nv_icmd(dev, 0x000812, 0x00000004);
357 nv_icmd(dev, 0x000813, 0x00000006);
358 nv_icmd(dev, 0x000814, 0x00000008);
359 nv_icmd(dev, 0x000815, 0x0000000b);
360 nv_icmd(dev, 0x000800, 0x00000001);
361 nv_icmd(dev, 0x000801, 0x00000001);
362 nv_icmd(dev, 0x000802, 0x00000001);
363 nv_icmd(dev, 0x000803, 0x00000001);
364 nv_icmd(dev, 0x000804, 0x00000001);
365 nv_icmd(dev, 0x000805, 0x00000001);
366 nv_icmd(dev, 0x000632, 0x00000001);
367 nv_icmd(dev, 0x000633, 0x00000002);
368 nv_icmd(dev, 0x000634, 0x00000003);
369 nv_icmd(dev, 0x000635, 0x00000004);
370 nv_icmd(dev, 0x000654, 0x3f800000);
371 nv_icmd(dev, 0x000657, 0x3f800000);
372 nv_icmd(dev, 0x000655, 0x3f800000);
373 nv_icmd(dev, 0x000656, 0x3f800000);
374 nv_icmd(dev, 0x0006cd, 0x3f800000);
375 nv_icmd(dev, 0x0007f5, 0x3f800000);
376 nv_icmd(dev, 0x0007dc, 0x39291909);
377 nv_icmd(dev, 0x0007dd, 0x79695949);
378 nv_icmd(dev, 0x0007de, 0xb9a99989);
379 nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
380 nv_icmd(dev, 0x0007e8, 0x00003210);
381 nv_icmd(dev, 0x0007e9, 0x00007654);
382 nv_icmd(dev, 0x0007ea, 0x00000098);
383 nv_icmd(dev, 0x0007ec, 0x39291909);
384 nv_icmd(dev, 0x0007ed, 0x79695949);
385 nv_icmd(dev, 0x0007ee, 0xb9a99989);
386 nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
387 nv_icmd(dev, 0x0007f0, 0x00003210);
388 nv_icmd(dev, 0x0007f1, 0x00007654);
389 nv_icmd(dev, 0x0007f2, 0x00000098);
390 nv_icmd(dev, 0x0005a5, 0x00000001);
391 nv_icmd(dev, 0x000980, 0x00000000);
392 nv_icmd(dev, 0x000981, 0x00000000);
393 nv_icmd(dev, 0x000982, 0x00000000);
394 nv_icmd(dev, 0x000983, 0x00000000);
395 nv_icmd(dev, 0x000984, 0x00000000);
396 nv_icmd(dev, 0x000985, 0x00000000);
397 nv_icmd(dev, 0x000986, 0x00000000);
398 nv_icmd(dev, 0x000987, 0x00000000);
399 nv_icmd(dev, 0x000988, 0x00000000);
400 nv_icmd(dev, 0x000989, 0x00000000);
401 nv_icmd(dev, 0x00098a, 0x00000000);
402 nv_icmd(dev, 0x00098b, 0x00000000);
403 nv_icmd(dev, 0x00098c, 0x00000000);
404 nv_icmd(dev, 0x00098d, 0x00000000);
405 nv_icmd(dev, 0x00098e, 0x00000000);
406 nv_icmd(dev, 0x00098f, 0x00000000);
407 nv_icmd(dev, 0x000990, 0x00000000);
408 nv_icmd(dev, 0x000991, 0x00000000);
409 nv_icmd(dev, 0x000992, 0x00000000);
410 nv_icmd(dev, 0x000993, 0x00000000);
411 nv_icmd(dev, 0x000994, 0x00000000);
412 nv_icmd(dev, 0x000995, 0x00000000);
413 nv_icmd(dev, 0x000996, 0x00000000);
414 nv_icmd(dev, 0x000997, 0x00000000);
415 nv_icmd(dev, 0x000998, 0x00000000);
416 nv_icmd(dev, 0x000999, 0x00000000);
417 nv_icmd(dev, 0x00099a, 0x00000000);
418 nv_icmd(dev, 0x00099b, 0x00000000);
419 nv_icmd(dev, 0x00099c, 0x00000000);
420 nv_icmd(dev, 0x00099d, 0x00000000);
421 nv_icmd(dev, 0x00099e, 0x00000000);
422 nv_icmd(dev, 0x00099f, 0x00000000);
423 nv_icmd(dev, 0x0009a0, 0x00000000);
424 nv_icmd(dev, 0x0009a1, 0x00000000);
425 nv_icmd(dev, 0x0009a2, 0x00000000);
426 nv_icmd(dev, 0x0009a3, 0x00000000);
427 nv_icmd(dev, 0x0009a4, 0x00000000);
428 nv_icmd(dev, 0x0009a5, 0x00000000);
429 nv_icmd(dev, 0x0009a6, 0x00000000);
430 nv_icmd(dev, 0x0009a7, 0x00000000);
431 nv_icmd(dev, 0x0009a8, 0x00000000);
432 nv_icmd(dev, 0x0009a9, 0x00000000);
433 nv_icmd(dev, 0x0009aa, 0x00000000);
434 nv_icmd(dev, 0x0009ab, 0x00000000);
435 nv_icmd(dev, 0x0009ac, 0x00000000);
436 nv_icmd(dev, 0x0009ad, 0x00000000);
437 nv_icmd(dev, 0x0009ae, 0x00000000);
438 nv_icmd(dev, 0x0009af, 0x00000000);
439 nv_icmd(dev, 0x0009b0, 0x00000000);
440 nv_icmd(dev, 0x0009b1, 0x00000000);
441 nv_icmd(dev, 0x0009b2, 0x00000000);
442 nv_icmd(dev, 0x0009b3, 0x00000000);
443 nv_icmd(dev, 0x0009b4, 0x00000000);
444 nv_icmd(dev, 0x0009b5, 0x00000000);
445 nv_icmd(dev, 0x0009b6, 0x00000000);
446 nv_icmd(dev, 0x0009b7, 0x00000000);
447 nv_icmd(dev, 0x0009b8, 0x00000000);
448 nv_icmd(dev, 0x0009b9, 0x00000000);
449 nv_icmd(dev, 0x0009ba, 0x00000000);
450 nv_icmd(dev, 0x0009bb, 0x00000000);
451 nv_icmd(dev, 0x0009bc, 0x00000000);
452 nv_icmd(dev, 0x0009bd, 0x00000000);
453 nv_icmd(dev, 0x0009be, 0x00000000);
454 nv_icmd(dev, 0x0009bf, 0x00000000);
455 nv_icmd(dev, 0x0009c0, 0x00000000);
456 nv_icmd(dev, 0x0009c1, 0x00000000);
457 nv_icmd(dev, 0x0009c2, 0x00000000);
458 nv_icmd(dev, 0x0009c3, 0x00000000);
459 nv_icmd(dev, 0x0009c4, 0x00000000);
460 nv_icmd(dev, 0x0009c5, 0x00000000);
461 nv_icmd(dev, 0x0009c6, 0x00000000);
462 nv_icmd(dev, 0x0009c7, 0x00000000);
463 nv_icmd(dev, 0x0009c8, 0x00000000);
464 nv_icmd(dev, 0x0009c9, 0x00000000);
465 nv_icmd(dev, 0x0009ca, 0x00000000);
466 nv_icmd(dev, 0x0009cb, 0x00000000);
467 nv_icmd(dev, 0x0009cc, 0x00000000);
468 nv_icmd(dev, 0x0009cd, 0x00000000);
469 nv_icmd(dev, 0x0009ce, 0x00000000);
470 nv_icmd(dev, 0x0009cf, 0x00000000);
471 nv_icmd(dev, 0x0009d0, 0x00000000);
472 nv_icmd(dev, 0x0009d1, 0x00000000);
473 nv_icmd(dev, 0x0009d2, 0x00000000);
474 nv_icmd(dev, 0x0009d3, 0x00000000);
475 nv_icmd(dev, 0x0009d4, 0x00000000);
476 nv_icmd(dev, 0x0009d5, 0x00000000);
477 nv_icmd(dev, 0x0009d6, 0x00000000);
478 nv_icmd(dev, 0x0009d7, 0x00000000);
479 nv_icmd(dev, 0x0009d8, 0x00000000);
480 nv_icmd(dev, 0x0009d9, 0x00000000);
481 nv_icmd(dev, 0x0009da, 0x00000000);
482 nv_icmd(dev, 0x0009db, 0x00000000);
483 nv_icmd(dev, 0x0009dc, 0x00000000);
484 nv_icmd(dev, 0x0009dd, 0x00000000);
485 nv_icmd(dev, 0x0009de, 0x00000000);
486 nv_icmd(dev, 0x0009df, 0x00000000);
487 nv_icmd(dev, 0x0009e0, 0x00000000);
488 nv_icmd(dev, 0x0009e1, 0x00000000);
489 nv_icmd(dev, 0x0009e2, 0x00000000);
490 nv_icmd(dev, 0x0009e3, 0x00000000);
491 nv_icmd(dev, 0x0009e4, 0x00000000);
492 nv_icmd(dev, 0x0009e5, 0x00000000);
493 nv_icmd(dev, 0x0009e6, 0x00000000);
494 nv_icmd(dev, 0x0009e7, 0x00000000);
495 nv_icmd(dev, 0x0009e8, 0x00000000);
496 nv_icmd(dev, 0x0009e9, 0x00000000);
497 nv_icmd(dev, 0x0009ea, 0x00000000);
498 nv_icmd(dev, 0x0009eb, 0x00000000);
499 nv_icmd(dev, 0x0009ec, 0x00000000);
500 nv_icmd(dev, 0x0009ed, 0x00000000);
501 nv_icmd(dev, 0x0009ee, 0x00000000);
502 nv_icmd(dev, 0x0009ef, 0x00000000);
503 nv_icmd(dev, 0x0009f0, 0x00000000);
504 nv_icmd(dev, 0x0009f1, 0x00000000);
505 nv_icmd(dev, 0x0009f2, 0x00000000);
506 nv_icmd(dev, 0x0009f3, 0x00000000);
507 nv_icmd(dev, 0x0009f4, 0x00000000);
508 nv_icmd(dev, 0x0009f5, 0x00000000);
509 nv_icmd(dev, 0x0009f6, 0x00000000);
510 nv_icmd(dev, 0x0009f7, 0x00000000);
511 nv_icmd(dev, 0x0009f8, 0x00000000);
512 nv_icmd(dev, 0x0009f9, 0x00000000);
513 nv_icmd(dev, 0x0009fa, 0x00000000);
514 nv_icmd(dev, 0x0009fb, 0x00000000);
515 nv_icmd(dev, 0x0009fc, 0x00000000);
516 nv_icmd(dev, 0x0009fd, 0x00000000);
517 nv_icmd(dev, 0x0009fe, 0x00000000);
518 nv_icmd(dev, 0x0009ff, 0x00000000);
519 nv_icmd(dev, 0x000468, 0x00000004);
520 nv_icmd(dev, 0x00046c, 0x00000001);
521 nv_icmd(dev, 0x000470, 0x00000000);
522 nv_icmd(dev, 0x000471, 0x00000000);
523 nv_icmd(dev, 0x000472, 0x00000000);
524 nv_icmd(dev, 0x000473, 0x00000000);
525 nv_icmd(dev, 0x000474, 0x00000000);
526 nv_icmd(dev, 0x000475, 0x00000000);
527 nv_icmd(dev, 0x000476, 0x00000000);
528 nv_icmd(dev, 0x000477, 0x00000000);
529 nv_icmd(dev, 0x000478, 0x00000000);
530 nv_icmd(dev, 0x000479, 0x00000000);
531 nv_icmd(dev, 0x00047a, 0x00000000);
532 nv_icmd(dev, 0x00047b, 0x00000000);
533 nv_icmd(dev, 0x00047c, 0x00000000);
534 nv_icmd(dev, 0x00047d, 0x00000000);
535 nv_icmd(dev, 0x00047e, 0x00000000);
536 nv_icmd(dev, 0x00047f, 0x00000000);
537 nv_icmd(dev, 0x000480, 0x00000000);
538 nv_icmd(dev, 0x000481, 0x00000000);
539 nv_icmd(dev, 0x000482, 0x00000000);
540 nv_icmd(dev, 0x000483, 0x00000000);
541 nv_icmd(dev, 0x000484, 0x00000000);
542 nv_icmd(dev, 0x000485, 0x00000000);
543 nv_icmd(dev, 0x000486, 0x00000000);
544 nv_icmd(dev, 0x000487, 0x00000000);
545 nv_icmd(dev, 0x000488, 0x00000000);
546 nv_icmd(dev, 0x000489, 0x00000000);
547 nv_icmd(dev, 0x00048a, 0x00000000);
548 nv_icmd(dev, 0x00048b, 0x00000000);
549 nv_icmd(dev, 0x00048c, 0x00000000);
550 nv_icmd(dev, 0x00048d, 0x00000000);
551 nv_icmd(dev, 0x00048e, 0x00000000);
552 nv_icmd(dev, 0x00048f, 0x00000000);
553 nv_icmd(dev, 0x000490, 0x00000000);
554 nv_icmd(dev, 0x000491, 0x00000000);
555 nv_icmd(dev, 0x000492, 0x00000000);
556 nv_icmd(dev, 0x000493, 0x00000000);
557 nv_icmd(dev, 0x000494, 0x00000000);
558 nv_icmd(dev, 0x000495, 0x00000000);
559 nv_icmd(dev, 0x000496, 0x00000000);
560 nv_icmd(dev, 0x000497, 0x00000000);
561 nv_icmd(dev, 0x000498, 0x00000000);
562 nv_icmd(dev, 0x000499, 0x00000000);
563 nv_icmd(dev, 0x00049a, 0x00000000);
564 nv_icmd(dev, 0x00049b, 0x00000000);
565 nv_icmd(dev, 0x00049c, 0x00000000);
566 nv_icmd(dev, 0x00049d, 0x00000000);
567 nv_icmd(dev, 0x00049e, 0x00000000);
568 nv_icmd(dev, 0x00049f, 0x00000000);
569 nv_icmd(dev, 0x0004a0, 0x00000000);
570 nv_icmd(dev, 0x0004a1, 0x00000000);
571 nv_icmd(dev, 0x0004a2, 0x00000000);
572 nv_icmd(dev, 0x0004a3, 0x00000000);
573 nv_icmd(dev, 0x0004a4, 0x00000000);
574 nv_icmd(dev, 0x0004a5, 0x00000000);
575 nv_icmd(dev, 0x0004a6, 0x00000000);
576 nv_icmd(dev, 0x0004a7, 0x00000000);
577 nv_icmd(dev, 0x0004a8, 0x00000000);
578 nv_icmd(dev, 0x0004a9, 0x00000000);
579 nv_icmd(dev, 0x0004aa, 0x00000000);
580 nv_icmd(dev, 0x0004ab, 0x00000000);
581 nv_icmd(dev, 0x0004ac, 0x00000000);
582 nv_icmd(dev, 0x0004ad, 0x00000000);
583 nv_icmd(dev, 0x0004ae, 0x00000000);
584 nv_icmd(dev, 0x0004af, 0x00000000);
585 nv_icmd(dev, 0x0004b0, 0x00000000);
586 nv_icmd(dev, 0x0004b1, 0x00000000);
587 nv_icmd(dev, 0x0004b2, 0x00000000);
588 nv_icmd(dev, 0x0004b3, 0x00000000);
589 nv_icmd(dev, 0x0004b4, 0x00000000);
590 nv_icmd(dev, 0x0004b5, 0x00000000);
591 nv_icmd(dev, 0x0004b6, 0x00000000);
592 nv_icmd(dev, 0x0004b7, 0x00000000);
593 nv_icmd(dev, 0x0004b8, 0x00000000);
594 nv_icmd(dev, 0x0004b9, 0x00000000);
595 nv_icmd(dev, 0x0004ba, 0x00000000);
596 nv_icmd(dev, 0x0004bb, 0x00000000);
597 nv_icmd(dev, 0x0004bc, 0x00000000);
598 nv_icmd(dev, 0x0004bd, 0x00000000);
599 nv_icmd(dev, 0x0004be, 0x00000000);
600 nv_icmd(dev, 0x0004bf, 0x00000000);
601 nv_icmd(dev, 0x0004c0, 0x00000000);
602 nv_icmd(dev, 0x0004c1, 0x00000000);
603 nv_icmd(dev, 0x0004c2, 0x00000000);
604 nv_icmd(dev, 0x0004c3, 0x00000000);
605 nv_icmd(dev, 0x0004c4, 0x00000000);
606 nv_icmd(dev, 0x0004c5, 0x00000000);
607 nv_icmd(dev, 0x0004c6, 0x00000000);
608 nv_icmd(dev, 0x0004c7, 0x00000000);
609 nv_icmd(dev, 0x0004c8, 0x00000000);
610 nv_icmd(dev, 0x0004c9, 0x00000000);
611 nv_icmd(dev, 0x0004ca, 0x00000000);
612 nv_icmd(dev, 0x0004cb, 0x00000000);
613 nv_icmd(dev, 0x0004cc, 0x00000000);
614 nv_icmd(dev, 0x0004cd, 0x00000000);
615 nv_icmd(dev, 0x0004ce, 0x00000000);
616 nv_icmd(dev, 0x0004cf, 0x00000000);
617 nv_icmd(dev, 0x000510, 0x3f800000);
618 nv_icmd(dev, 0x000511, 0x3f800000);
619 nv_icmd(dev, 0x000512, 0x3f800000);
620 nv_icmd(dev, 0x000513, 0x3f800000);
621 nv_icmd(dev, 0x000514, 0x3f800000);
622 nv_icmd(dev, 0x000515, 0x3f800000);
623 nv_icmd(dev, 0x000516, 0x3f800000);
624 nv_icmd(dev, 0x000517, 0x3f800000);
625 nv_icmd(dev, 0x000518, 0x3f800000);
626 nv_icmd(dev, 0x000519, 0x3f800000);
627 nv_icmd(dev, 0x00051a, 0x3f800000);
628 nv_icmd(dev, 0x00051b, 0x3f800000);
629 nv_icmd(dev, 0x00051c, 0x3f800000);
630 nv_icmd(dev, 0x00051d, 0x3f800000);
631 nv_icmd(dev, 0x00051e, 0x3f800000);
632 nv_icmd(dev, 0x00051f, 0x3f800000);
633 nv_icmd(dev, 0x000520, 0x000002b6);
634 nv_icmd(dev, 0x000529, 0x00000001);
635 nv_icmd(dev, 0x000530, 0xffff0000);
636 nv_icmd(dev, 0x000531, 0xffff0000);
637 nv_icmd(dev, 0x000532, 0xffff0000);
638 nv_icmd(dev, 0x000533, 0xffff0000);
639 nv_icmd(dev, 0x000534, 0xffff0000);
640 nv_icmd(dev, 0x000535, 0xffff0000);
641 nv_icmd(dev, 0x000536, 0xffff0000);
642 nv_icmd(dev, 0x000537, 0xffff0000);
643 nv_icmd(dev, 0x000538, 0xffff0000);
644 nv_icmd(dev, 0x000539, 0xffff0000);
645 nv_icmd(dev, 0x00053a, 0xffff0000);
646 nv_icmd(dev, 0x00053b, 0xffff0000);
647 nv_icmd(dev, 0x00053c, 0xffff0000);
648 nv_icmd(dev, 0x00053d, 0xffff0000);
649 nv_icmd(dev, 0x00053e, 0xffff0000);
650 nv_icmd(dev, 0x00053f, 0xffff0000);
651 nv_icmd(dev, 0x000585, 0x0000003f);
652 nv_icmd(dev, 0x000576, 0x00000003);
653 nv_icmd(dev, 0x00057b, 0x00000059);
654 nv_icmd(dev, 0x000586, 0x00000040);
655 nv_icmd(dev, 0x000582, 0x00000080);
656 nv_icmd(dev, 0x000583, 0x00000080);
657 nv_icmd(dev, 0x0005c2, 0x00000001);
658 nv_icmd(dev, 0x000638, 0x00000001);
659 nv_icmd(dev, 0x000639, 0x00000001);
660 nv_icmd(dev, 0x00063a, 0x00000002);
661 nv_icmd(dev, 0x00063b, 0x00000001);
662 nv_icmd(dev, 0x00063c, 0x00000001);
663 nv_icmd(dev, 0x00063d, 0x00000002);
664 nv_icmd(dev, 0x00063e, 0x00000001);
665 nv_icmd(dev, 0x0008b8, 0x00000001);
666 nv_icmd(dev, 0x0008b9, 0x00000001);
667 nv_icmd(dev, 0x0008ba, 0x00000001);
668 nv_icmd(dev, 0x0008bb, 0x00000001);
669 nv_icmd(dev, 0x0008bc, 0x00000001);
670 nv_icmd(dev, 0x0008bd, 0x00000001);
671 nv_icmd(dev, 0x0008be, 0x00000001);
672 nv_icmd(dev, 0x0008bf, 0x00000001);
673 nv_icmd(dev, 0x000900, 0x00000001);
674 nv_icmd(dev, 0x000901, 0x00000001);
675 nv_icmd(dev, 0x000902, 0x00000001);
676 nv_icmd(dev, 0x000903, 0x00000001);
677 nv_icmd(dev, 0x000904, 0x00000001);
678 nv_icmd(dev, 0x000905, 0x00000001);
679 nv_icmd(dev, 0x000906, 0x00000001);
680 nv_icmd(dev, 0x000907, 0x00000001);
681 nv_icmd(dev, 0x000908, 0x00000002);
682 nv_icmd(dev, 0x000909, 0x00000002);
683 nv_icmd(dev, 0x00090a, 0x00000002);
684 nv_icmd(dev, 0x00090b, 0x00000002);
685 nv_icmd(dev, 0x00090c, 0x00000002);
686 nv_icmd(dev, 0x00090d, 0x00000002);
687 nv_icmd(dev, 0x00090e, 0x00000002);
688 nv_icmd(dev, 0x00090f, 0x00000002);
689 nv_icmd(dev, 0x000910, 0x00000001);
690 nv_icmd(dev, 0x000911, 0x00000001);
691 nv_icmd(dev, 0x000912, 0x00000001);
692 nv_icmd(dev, 0x000913, 0x00000001);
693 nv_icmd(dev, 0x000914, 0x00000001);
694 nv_icmd(dev, 0x000915, 0x00000001);
695 nv_icmd(dev, 0x000916, 0x00000001);
696 nv_icmd(dev, 0x000917, 0x00000001);
697 nv_icmd(dev, 0x000918, 0x00000001);
698 nv_icmd(dev, 0x000919, 0x00000001);
699 nv_icmd(dev, 0x00091a, 0x00000001);
700 nv_icmd(dev, 0x00091b, 0x00000001);
701 nv_icmd(dev, 0x00091c, 0x00000001);
702 nv_icmd(dev, 0x00091d, 0x00000001);
703 nv_icmd(dev, 0x00091e, 0x00000001);
704 nv_icmd(dev, 0x00091f, 0x00000001);
705 nv_icmd(dev, 0x000920, 0x00000002);
706 nv_icmd(dev, 0x000921, 0x00000002);
707 nv_icmd(dev, 0x000922, 0x00000002);
708 nv_icmd(dev, 0x000923, 0x00000002);
709 nv_icmd(dev, 0x000924, 0x00000002);
710 nv_icmd(dev, 0x000925, 0x00000002);
711 nv_icmd(dev, 0x000926, 0x00000002);
712 nv_icmd(dev, 0x000927, 0x00000002);
713 nv_icmd(dev, 0x000928, 0x00000001);
714 nv_icmd(dev, 0x000929, 0x00000001);
715 nv_icmd(dev, 0x00092a, 0x00000001);
716 nv_icmd(dev, 0x00092b, 0x00000001);
717 nv_icmd(dev, 0x00092c, 0x00000001);
718 nv_icmd(dev, 0x00092d, 0x00000001);
719 nv_icmd(dev, 0x00092e, 0x00000001);
720 nv_icmd(dev, 0x00092f, 0x00000001);
721 nv_icmd(dev, 0x000648, 0x00000001);
722 nv_icmd(dev, 0x000649, 0x00000001);
723 nv_icmd(dev, 0x00064a, 0x00000001);
724 nv_icmd(dev, 0x00064b, 0x00000001);
725 nv_icmd(dev, 0x00064c, 0x00000001);
726 nv_icmd(dev, 0x00064d, 0x00000001);
727 nv_icmd(dev, 0x00064e, 0x00000001);
728 nv_icmd(dev, 0x00064f, 0x00000001);
729 nv_icmd(dev, 0x000650, 0x00000001);
730 nv_icmd(dev, 0x000658, 0x0000000f);
731 nv_icmd(dev, 0x0007ff, 0x0000000a);
732 nv_icmd(dev, 0x00066a, 0x40000000);
733 nv_icmd(dev, 0x00066b, 0x10000000);
734 nv_icmd(dev, 0x00066c, 0xffff0000);
735 nv_icmd(dev, 0x00066d, 0xffff0000);
736 nv_icmd(dev, 0x0007af, 0x00000008);
737 nv_icmd(dev, 0x0007b0, 0x00000008);
738 nv_icmd(dev, 0x0007f6, 0x00000001);
739 nv_icmd(dev, 0x0006b2, 0x00000055);
740 nv_icmd(dev, 0x0007ad, 0x00000003);
741 nv_icmd(dev, 0x000937, 0x00000001);
742 nv_icmd(dev, 0x000971, 0x00000008);
743 nv_icmd(dev, 0x000972, 0x00000040);
744 nv_icmd(dev, 0x000973, 0x0000012c);
745 nv_icmd(dev, 0x00097c, 0x00000040);
746 nv_icmd(dev, 0x000979, 0x00000003);
747 nv_icmd(dev, 0x000975, 0x00000020);
748 nv_icmd(dev, 0x000976, 0x00000001);
749 nv_icmd(dev, 0x000977, 0x00000020);
750 nv_icmd(dev, 0x000978, 0x00000001);
751 nv_icmd(dev, 0x000957, 0x00000003);
752 nv_icmd(dev, 0x00095e, 0x20164010);
753 nv_icmd(dev, 0x00095f, 0x00000020);
754 nv_icmd(dev, 0x00097d, 0x00000020);
755 nv_icmd(dev, 0x000683, 0x00000006);
756 nv_icmd(dev, 0x000685, 0x003fffff);
757 nv_icmd(dev, 0x000687, 0x003fffff);
758 nv_icmd(dev, 0x0006a0, 0x00000005);
759 nv_icmd(dev, 0x000840, 0x00400008);
760 nv_icmd(dev, 0x000841, 0x08000080);
761 nv_icmd(dev, 0x000842, 0x00400008);
762 nv_icmd(dev, 0x000843, 0x08000080);
763 nv_icmd(dev, 0x000818, 0x00000000);
764 nv_icmd(dev, 0x000819, 0x00000000);
765 nv_icmd(dev, 0x00081a, 0x00000000);
766 nv_icmd(dev, 0x00081b, 0x00000000);
767 nv_icmd(dev, 0x00081c, 0x00000000);
768 nv_icmd(dev, 0x00081d, 0x00000000);
769 nv_icmd(dev, 0x00081e, 0x00000000);
770 nv_icmd(dev, 0x00081f, 0x00000000);
771 nv_icmd(dev, 0x000848, 0x00000000);
772 nv_icmd(dev, 0x000849, 0x00000000);
773 nv_icmd(dev, 0x00084a, 0x00000000);
774 nv_icmd(dev, 0x00084b, 0x00000000);
775 nv_icmd(dev, 0x00084c, 0x00000000);
776 nv_icmd(dev, 0x00084d, 0x00000000);
777 nv_icmd(dev, 0x00084e, 0x00000000);
778 nv_icmd(dev, 0x00084f, 0x00000000);
779 nv_icmd(dev, 0x000850, 0x00000000);
780 nv_icmd(dev, 0x000851, 0x00000000);
781 nv_icmd(dev, 0x000852, 0x00000000);
782 nv_icmd(dev, 0x000853, 0x00000000);
783 nv_icmd(dev, 0x000854, 0x00000000);
784 nv_icmd(dev, 0x000855, 0x00000000);
785 nv_icmd(dev, 0x000856, 0x00000000);
786 nv_icmd(dev, 0x000857, 0x00000000);
787 nv_icmd(dev, 0x000738, 0x00000000);
788 nv_icmd(dev, 0x0006aa, 0x00000001);
789 nv_icmd(dev, 0x0006ab, 0x00000002);
790 nv_icmd(dev, 0x0006ac, 0x00000080);
791 nv_icmd(dev, 0x0006ad, 0x00000100);
792 nv_icmd(dev, 0x0006ae, 0x00000100);
793 nv_icmd(dev, 0x0006b1, 0x00000011);
794 nv_icmd(dev, 0x0006bb, 0x000000cf);
795 nv_icmd(dev, 0x0006ce, 0x2a712488);
796 nv_icmd(dev, 0x000739, 0x4085c000);
797 nv_icmd(dev, 0x00073a, 0x00000080);
798 nv_icmd(dev, 0x000786, 0x80000100);
799 nv_icmd(dev, 0x00073c, 0x00010100);
800 nv_icmd(dev, 0x00073d, 0x02800000);
801 nv_icmd(dev, 0x000787, 0x000000cf);
802 nv_icmd(dev, 0x00078c, 0x00000008);
803 nv_icmd(dev, 0x000792, 0x00000001);
804 nv_icmd(dev, 0x000794, 0x00000001);
805 nv_icmd(dev, 0x000795, 0x00000001);
806 nv_icmd(dev, 0x000796, 0x00000001);
807 nv_icmd(dev, 0x000797, 0x000000cf);
808 nv_icmd(dev, 0x000836, 0x00000001);
809 nv_icmd(dev, 0x00079a, 0x00000002);
810 nv_icmd(dev, 0x000833, 0x04444480);
811 nv_icmd(dev, 0x0007a1, 0x00000001);
812 nv_icmd(dev, 0x0007a3, 0x00000001);
813 nv_icmd(dev, 0x0007a4, 0x00000001);
814 nv_icmd(dev, 0x0007a5, 0x00000001);
815 nv_icmd(dev, 0x000831, 0x00000004);
816 nv_icmd(dev, 0x000b07, 0x00000002);
817 nv_icmd(dev, 0x000b08, 0x00000100);
818 nv_icmd(dev, 0x000b09, 0x00000100);
819 nv_icmd(dev, 0x000b0a, 0x00000001);
820 nv_icmd(dev, 0x000a04, 0x000000ff);
821 nv_icmd(dev, 0x000a0b, 0x00000040);
822 nv_icmd(dev, 0x00097f, 0x00000100);
823 nv_icmd(dev, 0x000a02, 0x00000001);
824 nv_icmd(dev, 0x000809, 0x00000007);
825 nv_icmd(dev, 0x00c221, 0x00000040);
826 nv_icmd(dev, 0x00c1b0, 0x0000000f);
827 nv_icmd(dev, 0x00c1b1, 0x0000000f);
828 nv_icmd(dev, 0x00c1b2, 0x0000000f);
829 nv_icmd(dev, 0x00c1b3, 0x0000000f);
830 nv_icmd(dev, 0x00c1b4, 0x0000000f);
831 nv_icmd(dev, 0x00c1b5, 0x0000000f);
832 nv_icmd(dev, 0x00c1b6, 0x0000000f);
833 nv_icmd(dev, 0x00c1b7, 0x0000000f);
834 nv_icmd(dev, 0x00c1b8, 0x0fac6881);
835 nv_icmd(dev, 0x00c1b9, 0x00fac688);
836 nv_icmd(dev, 0x00c401, 0x00000001);
837 nv_icmd(dev, 0x00c402, 0x00010001);
838 nv_icmd(dev, 0x00c403, 0x00000001);
839 nv_icmd(dev, 0x00c404, 0x00000001);
840 nv_icmd(dev, 0x00c40e, 0x00000020);
841 nv_icmd(dev, 0x00c500, 0x00000003);
842 nv_icmd(dev, 0x01e100, 0x00000001);
843 nv_icmd(dev, 0x001000, 0x00000002);
844 nv_icmd(dev, 0x0006aa, 0x00000001);
845 nv_icmd(dev, 0x0006ad, 0x00000100);
846 nv_icmd(dev, 0x0006ae, 0x00000100);
847 nv_icmd(dev, 0x0006b1, 0x00000011);
848 nv_icmd(dev, 0x00078c, 0x00000008);
849 nv_icmd(dev, 0x000792, 0x00000001);
850 nv_icmd(dev, 0x000794, 0x00000001);
851 nv_icmd(dev, 0x000795, 0x00000001);
852 nv_icmd(dev, 0x000796, 0x00000001);
853 nv_icmd(dev, 0x000797, 0x000000cf);
854 nv_icmd(dev, 0x00079a, 0x00000002);
855 nv_icmd(dev, 0x000833, 0x04444480);
856 nv_icmd(dev, 0x0007a1, 0x00000001);
857 nv_icmd(dev, 0x0007a3, 0x00000001);
858 nv_icmd(dev, 0x0007a4, 0x00000001);
859 nv_icmd(dev, 0x0007a5, 0x00000001);
860 nv_icmd(dev, 0x000831, 0x00000004);
861 nv_icmd(dev, 0x01e100, 0x00000001);
862 nv_icmd(dev, 0x001000, 0x00000008);
863 nv_icmd(dev, 0x000039, 0x00000000);
864 nv_icmd(dev, 0x00003a, 0x00000000);
865 nv_icmd(dev, 0x00003b, 0x00000000);
866 nv_icmd(dev, 0x000380, 0x00000001);
867 nv_icmd(dev, 0x000366, 0x00000000);
868 nv_icmd(dev, 0x000367, 0x00000000);
869 nv_icmd(dev, 0x000368, 0x00000fff);
870 nv_icmd(dev, 0x000370, 0x00000000);
871 nv_icmd(dev, 0x000371, 0x00000000);
872 nv_icmd(dev, 0x000372, 0x000fffff);
873 nv_icmd(dev, 0x000813, 0x00000006);
874 nv_icmd(dev, 0x000814, 0x00000008);
875 nv_icmd(dev, 0x000957, 0x00000003);
876 nv_icmd(dev, 0x000818, 0x00000000);
877 nv_icmd(dev, 0x000819, 0x00000000);
878 nv_icmd(dev, 0x00081a, 0x00000000);
879 nv_icmd(dev, 0x00081b, 0x00000000);
880 nv_icmd(dev, 0x00081c, 0x00000000);
881 nv_icmd(dev, 0x00081d, 0x00000000);
882 nv_icmd(dev, 0x00081e, 0x00000000);
883 nv_icmd(dev, 0x00081f, 0x00000000);
884 nv_icmd(dev, 0x000848, 0x00000000);
885 nv_icmd(dev, 0x000849, 0x00000000);
886 nv_icmd(dev, 0x00084a, 0x00000000);
887 nv_icmd(dev, 0x00084b, 0x00000000);
888 nv_icmd(dev, 0x00084c, 0x00000000);
889 nv_icmd(dev, 0x00084d, 0x00000000);
890 nv_icmd(dev, 0x00084e, 0x00000000);
891 nv_icmd(dev, 0x00084f, 0x00000000);
892 nv_icmd(dev, 0x000850, 0x00000000);
893 nv_icmd(dev, 0x000851, 0x00000000);
894 nv_icmd(dev, 0x000852, 0x00000000);
895 nv_icmd(dev, 0x000853, 0x00000000);
896 nv_icmd(dev, 0x000854, 0x00000000);
897 nv_icmd(dev, 0x000855, 0x00000000);
898 nv_icmd(dev, 0x000856, 0x00000000);
899 nv_icmd(dev, 0x000857, 0x00000000);
900 nv_icmd(dev, 0x000738, 0x00000000);
901 nv_icmd(dev, 0x000b07, 0x00000002);
902 nv_icmd(dev, 0x000b08, 0x00000100);
903 nv_icmd(dev, 0x000b09, 0x00000100);
904 nv_icmd(dev, 0x000b0a, 0x00000001);
905 nv_icmd(dev, 0x000a04, 0x000000ff);
906 nv_icmd(dev, 0x00097f, 0x00000100);
907 nv_icmd(dev, 0x000a02, 0x00000001);
908 nv_icmd(dev, 0x000809, 0x00000007);
909 nv_icmd(dev, 0x00c221, 0x00000040);
910 nv_icmd(dev, 0x00c401, 0x00000001);
911 nv_icmd(dev, 0x00c402, 0x00010001);
912 nv_icmd(dev, 0x00c403, 0x00000001);
913 nv_icmd(dev, 0x00c404, 0x00000001);
914 nv_icmd(dev, 0x00c40e, 0x00000020);
915 nv_icmd(dev, 0x00c500, 0x00000003);
916 nv_icmd(dev, 0x01e100, 0x00000001);
917 nv_icmd(dev, 0x001000, 0x00000001);
918 nv_icmd(dev, 0x000b07, 0x00000002);
919 nv_icmd(dev, 0x000b08, 0x00000100);
920 nv_icmd(dev, 0x000b09, 0x00000100);
921 nv_icmd(dev, 0x000b0a, 0x00000001);
922 nv_icmd(dev, 0x01e100, 0x00000001);
923 nv_wr32(dev, 0x400208, 0x00000000);
924}
925
926static void
927nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
928{
929 nv_wr32(dev, 0x40448c, data);
930 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
931}
932
933static void
934nve0_grctx_generate_a097(struct drm_device *dev)
935{
936 nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
937 nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
938 nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
939 nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
940 nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
941 nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
942 nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
943 nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
944 nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
945 nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
946 nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
947 nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
948 nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
949 nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
950 nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
951 nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
952 nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
953 nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
954 nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
955 nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
956 nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
957 nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
958 nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
959 nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
960 nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
961 nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
962 nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
963 nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
964 nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
965 nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
966 nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
967 nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
968 nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
969 nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
970 nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
971 nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
972 nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
973 nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
974 nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
975 nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
976 nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
977 nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
978 nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
979 nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
980 nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
981 nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
982 nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
983 nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
984 nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
985 nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
986 nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
987 nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
988 nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
989 nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
990 nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
991 nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
992 nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
993 nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
994 nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
995 nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
996 nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
997 nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
998 nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
999 nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
1000 nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
1001 nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
1002 nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
1003 nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
1004 nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
1005 nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
1006 nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
1007 nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
1008 nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
1009 nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
1010 nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
1011 nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
1012 nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
1013 nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
1014 nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
1015 nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
1016 nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
1017 nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
1018 nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
1019 nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
1020 nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
1021 nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
1022 nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
1023 nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
1024 nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
1025 nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
1026 nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
1027 nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
1028 nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
1029 nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
1030 nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
1031 nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
1032 nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
1033 nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
1034 nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
1035 nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
1036 nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
1037 nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
1038 nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
1039 nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
1040 nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
1041 nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
1042 nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
1043 nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
1044 nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
1045 nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
1046 nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
1047 nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
1048 nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
1049 nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
1050 nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
1051 nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
1052 nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
1053 nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
1054 nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
1055 nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
1056 nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
1057 nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
1058 nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
1059 nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
1060 nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
1061 nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
1062 nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
1063 nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
1064 nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
1065 nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
1066 nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
1067 nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
1068 nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
1069 nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
1070 nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
1071 nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
1072 nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
1073 nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
1074 nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
1075 nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
1076 nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
1077 nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
1078 nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
1079 nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
1080 nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
1081 nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
1082 nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
1083 nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
1084 nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
1085 nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
1086 nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
1087 nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
1088 nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
1089 nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
1090 nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
1091 nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
1092 nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
1093 nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
1094 nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
1095 nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
1096 nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
1097 nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
1098 nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
1099 nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
1100 nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
1101 nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
1102 nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
1103 nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
1104 nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
1105 nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
1106 nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
1107 nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
1108 nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
1109 nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
1110 nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
1111 nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
1112 nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
1113 nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
1114 nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
1115 nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
1116 nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
1117 nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
1118 nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
1119 nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
1120 nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
1121 nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
1122 nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
1123 nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
1124 nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
1125 nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
1126 nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
1127 nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
1128 nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
1129 nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
1130 nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
1131 nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
1132 nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
1133 nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
1134 nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
1135 nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
1136 nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
1137 nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
1138 nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
1139 nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
1140 nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
1141 nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
1142 nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
1143 nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
1144 nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
1145 nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
1146 nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
1147 nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
1148 nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
1149 nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
1150 nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
1151 nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
1152 nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
1153 nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
1154 nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
1155 nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
1156 nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
1157 nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
1158 nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
1159 nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
1160 nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
1161 nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
1162 nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
1163 nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
1164 nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
1165 nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
1166 nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
1167 nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
1168 nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
1169 nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
1170 nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
1171 nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
1172 nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
1173 nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
1174 nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
1175 nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
1176 nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
1177 nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
1178 nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
1179 nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
1180 nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
1181 nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
1182 nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
1183 nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
1184 nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
1185 nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
1186 nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
1187 nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
1188 nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
1189 nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
1190 nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
1191 nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
1192 nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
1193 nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
1194 nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
1195 nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
1196 nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
1197 nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
1198 nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
1199 nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
1200 nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
1201 nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
1202 nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
1203 nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
1204 nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
1205 nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
1206 nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
1207 nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
1208 nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
1209 nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
1210 nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
1211 nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
1212 nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
1213 nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
1214 nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
1215 nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
1216 nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
1217 nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
1218 nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
1219 nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
1220 nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
1221 nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
1222 nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
1223 nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
1224 nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
1225 nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
1226 nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
1227 nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
1228 nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
1229 nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
1230 nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
1231 nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
1232 nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
1233 nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
1234 nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
1235 nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
1236 nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
1237 nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
1238 nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
1239 nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
1240 nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
1241 nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
1242 nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
1243 nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
1244 nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
1245 nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
1246 nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
1247 nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
1248 nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
1249 nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
1250 nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
1251 nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
1252 nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
1253 nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
1254 nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
1255 nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
1256 nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
1257 nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
1258 nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
1259 nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
1260 nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
1261 nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
1262 nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
1263 nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
1264 nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
1265 nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
1266 nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
1267 nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
1268 nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
1269 nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
1270 nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
1271 nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
1272 nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
1273 nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
1274 nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
1275 nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
1276 nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
1277 nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
1278 nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
1279 nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
1280 nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
1281 nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
1282 nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
1283 nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
1284 nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
1285 nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
1286 nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
1287 nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
1288 nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
1289 nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
1290 nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
1291 nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
1292 nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
1293 nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
1294 nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
1295 nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
1296 nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
1297 nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
1298 nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
1299 nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
1300 nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
1301 nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
1302 nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
1303 nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
1304 nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
1305 nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
1306 nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
1307 nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
1308 nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
1309 nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
1310 nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
1311 nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
1312 nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
1313 nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
1314 nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
1315 nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
1316 nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
1317 nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
1318 nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
1319 nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
1320 nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
1321 nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
1322 nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
1323 nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
1324 nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
1325 nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
1326 nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
1327 nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
1328 nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
1329 nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
1330 nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
1331 nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
1332 nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
1333 nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
1334 nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
1335 nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
1336 nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
1337 nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
1338 nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
1339 nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
1340 nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
1341 nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
1342 nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
1343 nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
1344 nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
1345 nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
1346 nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
1347 nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
1348 nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
1349 nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
1350 nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
1351 nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
1352 nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
1353 nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
1354 nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
1355 nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
1356 nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
1357 nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
1358 nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
1359 nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
1360 nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
1361 nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
1362 nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
1363 nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
1364 nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
1365 nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
1366 nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
1367 nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
1368 nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
1369 nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
1370 nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
1371 nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
1372 nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
1373 nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
1374 nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
1375 nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
1376 nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
1377 nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
1378 nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
1379 nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
1380 nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
1381 nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
1382 nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
1383 nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
1384 nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
1385 nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
1386 nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
1387 nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
1388 nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
1389 nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
1390 nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
1391 nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
1392 nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
1393 nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
1394 nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
1395 nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
1396 nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
1397 nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
1398 nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
1399 nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
1400 nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
1401 nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
1402 nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
1403 nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
1404 nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
1405 nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
1406 nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
1407 nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
1408 nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
1409 nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
1410 nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
1411 nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
1412 nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
1413 nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
1414 nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
1415 nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
1416 nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
1417 nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
1418 nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
1419 nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
1420 nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
1421 nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
1422 nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
1423 nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
1424 nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
1425 nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
1426 nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
1427 nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
1428 nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
1429 nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
1430 nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
1431 nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
1432 nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
1433 nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
1434 nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
1435 nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
1436 nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
1437 nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
1438 nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
1439 nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
1440 nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
1441 nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
1442 nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
1443 nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
1444 nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
1445 nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
1446 nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
1447 nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
1448 nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
1449 nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
1450 nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
1451 nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
1452 nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
1453 nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
1454 nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
1455 nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
1456 nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
1457 nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
1458 nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
1459 nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
1460 nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
1461 nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
1462 nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
1463 nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
1464 nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
1465 nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
1466 nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
1467 nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
1468 nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
1469 nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
1470 nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
1471 nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
1472 nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
1473 nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
1474 nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
1475 nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
1476 nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
1477 nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
1478 nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
1479 nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
1480 nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
1481 nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
1482 nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
1483 nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
1484 nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
1485 nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
1486 nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
1487 nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
1488 nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
1489 nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
1490 nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
1491 nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
1492 nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
1493 nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
1494 nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
1495 nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
1496 nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
1497 nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
1498 nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
1499 nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
1500 nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
1501 nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
1502 nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
1503 nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
1504 nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
1505 nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
1506 nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
1507 nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
1508 nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
1509 nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
1510 nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
1511 nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
1512 nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
1513 nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
1514 nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
1515 nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
1516 nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
1517 nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
1518 nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
1519 nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
1520 nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
1521 nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
1522 nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
1523 nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
1524 nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
1525 nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
1526 nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
1527 nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
1528 nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
1529 nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
1530 nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
1531 nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
1532 nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
1533 nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
1534 nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
1535 nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
1536 nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
1537 nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
1538 nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
1539 nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
1540 nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
1541 nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
1542 nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
1543 nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
1544 nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
1545 nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
1546 nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
1547 nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
1548 nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
1549 nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
1550 nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
1551 nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
1552 nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
1553 nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
1554 nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
1555 nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
1556 nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
1557 nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
1558 nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
1559 nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
1560 nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
1561 nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
1562 nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
1563 nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
1564 nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
1565 nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
1566 nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
1567 nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
1568 nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
1569 nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
1570 nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
1571 nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
1572 nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
1573 nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
1574 nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
1575 nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
1576 nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
1577 nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
1578 nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
1579 nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
1580 nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
1581 nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
1582 nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
1583 nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
1584 nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
1585 nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
1586 nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
1587 nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
1588 nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
1589 nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
1590 nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
1591 nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
1592 nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
1593 nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
1594 nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
1595 nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
1596 nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
1597 nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
1598 nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
1599 nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
1600 nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
1601 nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
1602 nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
1603 nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
1604 nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
1605 nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
1606 nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
1607 nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
1608 nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
1609 nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
1610 nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
1611 nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
1612 nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
1613 nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
1614 nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
1615 nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
1616 nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
1617 nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
1618 nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
1619 nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
1620 nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
1621 nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
1622 nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
1623 nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
1624 nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
1625 nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
1626 nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
1627 nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
1628 nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
1629 nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
1630 nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
1631 nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
1632 nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
1633 nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
1634 nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
1635 nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
1636 nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
1637 nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
1638 nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
1639 nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
1640 nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
1641 nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
1642 nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
1643 nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
1644 nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
1645 nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
1646 nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
1647 nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
1648 nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
1649 nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
1650 nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
1651 nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
1652 nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
1653 nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
1654 nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
1655 nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
1656 nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
1657 nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
1658 nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
1659 nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
1660 nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
1661 nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
1662 nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
1663 nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
1664 nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
1665 nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
1666 nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
1667 nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
1668 nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
1669 nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
1670 nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
1671 nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
1672 nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
1673 nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
1674 nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
1675 nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
1676 nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
1677 nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
1678 nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
1679 nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
1680 nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
1681 nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
1682 nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
1683 nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
1684 nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
1685 nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
1686 nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
1687 nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
1688 nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
1689 nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
1690 nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
1691 nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
1692 nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
1693 nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
1694 nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
1695 nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
1696 nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
1697 nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
1698 nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
1699 nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
1700 nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
1701 nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
1702 nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
1703 nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
1704 nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
1705 nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
1706 nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
1707 nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
1708 nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
1709 nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
1710 nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
1711 nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
1712 nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
1713 nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
1714 nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
1715 nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
1716 nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
1717 nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
1718 nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
1719 nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
1720 nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
1721 nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
1722 nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
1723 nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
1724 nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
1725 nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
1726 nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
1727 nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
1728 nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
1729 nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
1730 nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
1731 nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
1732 nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
1733 nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
1734 nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
1735 nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
1736 nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
1737 nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
1738 nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
1739 nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
1740 nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
1741 nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
1742 nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
1743 nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
1744 nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
1745 nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
1746 nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
1747 nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
1748 nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
1749 nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
1750 nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
1751 nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
1752 nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
1753 nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
1754 nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
1755 nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
1756 nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
1757 nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
1758 nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
1759 nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
1760 nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
1761 nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
1762 nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
1763 nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
1764 nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
1765 nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
1766 nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
1767 nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
1768 nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
1769 nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
1770 nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
1771 nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
1772 nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
1773 nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
1774 nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
1775 nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
1776 nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
1777 nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
1778 nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
1779 nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
1780 nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
1781 nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
1782 nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
1783 nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
1784 nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
1785 nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
1786 nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
1787 nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
1788 nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
1789 nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
1790 nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
1791 nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
1792 nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
1793 nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
1794 nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
1795 nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
1796 nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
1797 nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
1798 nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
1799 nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
1800 nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
1801 nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
1802 nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
1803 nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
1804 nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
1805 nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
1806 nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
1807 nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
1808 nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
1809 nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
1810 nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
1811 nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
1812 nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
1813 nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
1814 nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
1815 nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
1816 nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
1817 nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
1818 nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
1819 nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
1820 nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
1821 nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
1822 nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
1823 nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
1824 nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
1825 nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
1826 nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
1827 nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
1828 nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
1829 nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
1830 nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
1831 nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
1832 nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
1833 nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
1834 nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
1835 nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
1836 nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
1837 nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
1838 nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
1839 nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
1840 nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
1841 nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
1842 nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
1843 nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
1844 nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
1845 nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
1846 nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
1847 nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
1848 nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
1849 nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
1850 nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
1851 nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
1852 nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
1853 nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
1854 nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
1855 nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
1856 nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
1857 nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
1858 nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
1859 nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
1860 nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
1861 nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
1862 nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
1863 nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
1864 nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
1865 nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
1866 nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
1867 nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
1868 nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
1869 nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
1870 nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
1871 nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
1872 nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
1873 nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
1874 nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
1875 nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
1876 nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
1877 nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
1878 nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
1879 nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
1880 nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
1881 nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
1882 nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
1883 nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
1884 nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
1885 nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
1886 nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
1887 nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
1888 nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
1889 nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
1890 nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
1891 nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
1892 nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
1893 nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
1894 nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
1895 nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
1896 nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
1897 nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
1898 nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
1899 nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
1900 nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
1901 nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
1902 nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
1903 nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
1904 nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
1905 nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
1906 nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
1907 nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
1908 nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
1909 nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
1910 nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
1911 nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
1912 nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
1913 nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
1914 nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
1915 nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
1916 nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
1917 nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
1918 nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
1919 nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
1920 nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
1921 nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
1922 nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
1923 nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
1924 nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
1925 nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
1926 nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
1927 nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
1928 nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
1929 nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
1930 nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
1931 nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
1932 nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
1933 nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
1934 nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
1935 nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
1936 nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
1937 nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
1938 nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
1939 nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
1940 nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
1941 nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
1942 nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
1943 nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
1944 nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
1945 nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
1946 nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
1947 nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
1948 nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
1949 nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
1950 nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
1951 nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
1952 nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
1953 nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
1954 nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
1955 nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
1956 nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
1957 nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
1958 nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
1959 nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
1960 nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
1961 nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
1962 nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
1963 nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
1964 nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
1965 nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
1966 nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
1967 nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
1968 nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
1969 nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
1970 nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
1971 nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
1972 nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
1973 nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
1974 nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
1975 nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
1976 nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
1977 nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
1978 nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
1979 nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
1980 nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
1981 nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
1982 nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
1983 nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
1984 nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
1985 nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
1986 nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
1987 nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
1988 nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
1989 nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
1990 nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
1991 nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
1992 nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
1993 nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
1994 nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
1995 nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
1996 nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
1997 nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
1998 nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
1999 nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
2000 nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
2001 nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
2002 nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
2003 nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
2004 nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
2005 nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
2006 nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
2007 nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
2008 nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
2009 nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
2010 nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
2011 nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
2012 nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
2013 nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
2014 nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
2015 nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
2016 nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
2017 nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
2018 nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
2019 nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
2020 nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
2021 nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
2022 nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
2023 nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
2024 nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
2025 nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
2026 nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
2027 nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
2028 nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
2029 nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
2030 nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
2031 nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
2032 nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
2033 nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
2034 nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
2035 nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
2036 nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
2037 nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
2038 nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
2039 nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
2040 nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
2041 nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
2042 nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
2043 nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
2044 nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
2045 nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
2046 nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
2047 nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
2048 nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
2049 nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
2050 nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
2051 nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
2052 nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
2053 nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
2054 nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
2055 nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
2056 nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
2057 nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
2058 nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
2059 nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
2060 nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
2061 nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
2062 nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
2063 nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
2064 nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
2065 nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
2066 nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
2067 nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
2068 nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
2069 nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
2070 nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
2071 nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
2072 nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
2073 nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
2074 nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
2075 nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
2076 nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
2077 nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
2078 nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
2079 nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
2080 nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
2081 nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
2082 nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
2083 nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
2084 nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
2085 nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
2086 nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
2087 nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
2088 nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
2089 nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
2090 nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
2091 nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
2092 nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
2093 nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
2094 nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
2095 nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
2096 nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
2097 nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
2098 nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
2099 nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
2100 nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
2101 nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
2102 nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
2103 nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
2104 nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
2105 nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
2106 nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
2107 nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
2108 nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
2109 nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
2110 nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
2111 nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
2112 nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
2113 nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
2114 nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
2115 nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
2116 nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
2117 nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
2118 nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
2119 nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
2120 nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
2121 nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
2122 nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
2123 nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
2124 nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
2125 nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
2126 nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
2127 nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
2128 nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
2129 nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
2130 nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
2131 nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
2132 nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
2133 nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
2134 nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
2135 nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
2136 nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
2137 nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
2138 nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
2139 nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
2140 nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
2141 nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
2142 nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
2143 nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
2144 nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
2145 nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
2146 nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
2147 nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
2148 nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
2149 nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
2150 nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
2151 nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
2152 nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
2153 nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
2154 nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
2155 nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
2156 nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
2157 nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
2158 nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
2159 nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
2160 nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
2161}
2162
2163static void
2164nve0_grctx_generate_902d(struct drm_device *dev)
2165{
2166 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
2167 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
2168 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
2169 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
2170 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
2171 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
2172 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
2173 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
2174 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
2175 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
2176 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
2177 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
2178 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
2179 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
2180 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
2181 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
2182 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
2183 nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
2184}
2185
2186static void
2187nve0_graph_generate_unk40xx(struct drm_device *dev)
2188{
2189 nv_wr32(dev, 0x404010, 0x0);
2190 nv_wr32(dev, 0x404014, 0x0);
2191 nv_wr32(dev, 0x404018, 0x0);
2192 nv_wr32(dev, 0x40401c, 0x0);
2193 nv_wr32(dev, 0x404020, 0x0);
2194 nv_wr32(dev, 0x404024, 0xe000);
2195 nv_wr32(dev, 0x404028, 0x0);
2196 nv_wr32(dev, 0x4040a8, 0x0);
2197 nv_wr32(dev, 0x4040ac, 0x0);
2198 nv_wr32(dev, 0x4040b0, 0x0);
2199 nv_wr32(dev, 0x4040b4, 0x0);
2200 nv_wr32(dev, 0x4040b8, 0x0);
2201 nv_wr32(dev, 0x4040bc, 0x0);
2202 nv_wr32(dev, 0x4040c0, 0x0);
2203 nv_wr32(dev, 0x4040c4, 0x0);
2204 nv_wr32(dev, 0x4040c8, 0xf800008f);
2205 nv_wr32(dev, 0x4040d0, 0x0);
2206 nv_wr32(dev, 0x4040d4, 0x0);
2207 nv_wr32(dev, 0x4040d8, 0x0);
2208 nv_wr32(dev, 0x4040dc, 0x0);
2209 nv_wr32(dev, 0x4040e0, 0x0);
2210 nv_wr32(dev, 0x4040e4, 0x0);
2211 nv_wr32(dev, 0x4040e8, 0x1000);
2212 nv_wr32(dev, 0x4040f8, 0x0);
2213 nv_wr32(dev, 0x404130, 0x0);
2214 nv_wr32(dev, 0x404134, 0x0);
2215 nv_wr32(dev, 0x404138, 0x20000040);
2216 nv_wr32(dev, 0x404150, 0x2e);
2217 nv_wr32(dev, 0x404154, 0x400);
2218 nv_wr32(dev, 0x404158, 0x200);
2219 nv_wr32(dev, 0x404164, 0x55);
2220 nv_wr32(dev, 0x4041a0, 0x0);
2221 nv_wr32(dev, 0x4041a4, 0x0);
2222 nv_wr32(dev, 0x4041a8, 0x0);
2223 nv_wr32(dev, 0x4041ac, 0x0);
2224 nv_wr32(dev, 0x404200, 0x0);
2225 nv_wr32(dev, 0x404204, 0x0);
2226 nv_wr32(dev, 0x404208, 0x0);
2227 nv_wr32(dev, 0x40420c, 0x0);
2228}
2229
2230static void
2231nve0_graph_generate_unk44xx(struct drm_device *dev)
2232{
2233 nv_wr32(dev, 0x404404, 0x0);
2234 nv_wr32(dev, 0x404408, 0x0);
2235 nv_wr32(dev, 0x40440c, 0x0);
2236 nv_wr32(dev, 0x404410, 0x0);
2237 nv_wr32(dev, 0x404414, 0x0);
2238 nv_wr32(dev, 0x404418, 0x0);
2239 nv_wr32(dev, 0x40441c, 0x0);
2240 nv_wr32(dev, 0x404420, 0x0);
2241 nv_wr32(dev, 0x404424, 0x0);
2242 nv_wr32(dev, 0x404428, 0x0);
2243 nv_wr32(dev, 0x40442c, 0x0);
2244 nv_wr32(dev, 0x404430, 0x0);
2245 nv_wr32(dev, 0x404434, 0x0);
2246 nv_wr32(dev, 0x404438, 0x0);
2247 nv_wr32(dev, 0x404460, 0x0);
2248 nv_wr32(dev, 0x404464, 0x0);
2249 nv_wr32(dev, 0x404468, 0xffffff);
2250 nv_wr32(dev, 0x40446c, 0x0);
2251 nv_wr32(dev, 0x404480, 0x1);
2252 nv_wr32(dev, 0x404498, 0x1);
2253}
2254
2255static void
2256nve0_graph_generate_unk46xx(struct drm_device *dev)
2257{
2258 nv_wr32(dev, 0x404604, 0x14);
2259 nv_wr32(dev, 0x404608, 0x0);
2260 nv_wr32(dev, 0x40460c, 0x3fff);
2261 nv_wr32(dev, 0x404610, 0x100);
2262 nv_wr32(dev, 0x404618, 0x0);
2263 nv_wr32(dev, 0x40461c, 0x0);
2264 nv_wr32(dev, 0x404620, 0x0);
2265 nv_wr32(dev, 0x404624, 0x0);
2266 nv_wr32(dev, 0x40462c, 0x0);
2267 nv_wr32(dev, 0x404630, 0x0);
2268 nv_wr32(dev, 0x404640, 0x0);
2269 nv_wr32(dev, 0x404654, 0x0);
2270 nv_wr32(dev, 0x404660, 0x0);
2271 nv_wr32(dev, 0x404678, 0x0);
2272 nv_wr32(dev, 0x40467c, 0x2);
2273 nv_wr32(dev, 0x404680, 0x0);
2274 nv_wr32(dev, 0x404684, 0x0);
2275 nv_wr32(dev, 0x404688, 0x0);
2276 nv_wr32(dev, 0x40468c, 0x0);
2277 nv_wr32(dev, 0x404690, 0x0);
2278 nv_wr32(dev, 0x404694, 0x0);
2279 nv_wr32(dev, 0x404698, 0x0);
2280 nv_wr32(dev, 0x40469c, 0x0);
2281 nv_wr32(dev, 0x4046a0, 0x7f0080);
2282 nv_wr32(dev, 0x4046a4, 0x0);
2283 nv_wr32(dev, 0x4046a8, 0x0);
2284 nv_wr32(dev, 0x4046ac, 0x0);
2285 nv_wr32(dev, 0x4046b0, 0x0);
2286 nv_wr32(dev, 0x4046b4, 0x0);
2287 nv_wr32(dev, 0x4046b8, 0x0);
2288 nv_wr32(dev, 0x4046bc, 0x0);
2289 nv_wr32(dev, 0x4046c0, 0x0);
2290 nv_wr32(dev, 0x4046c8, 0x0);
2291 nv_wr32(dev, 0x4046cc, 0x0);
2292 nv_wr32(dev, 0x4046d0, 0x0);
2293}
2294
2295static void
2296nve0_graph_generate_unk47xx(struct drm_device *dev)
2297{
2298 nv_wr32(dev, 0x404700, 0x0);
2299 nv_wr32(dev, 0x404704, 0x0);
2300 nv_wr32(dev, 0x404708, 0x0);
2301 nv_wr32(dev, 0x404718, 0x0);
2302 nv_wr32(dev, 0x40471c, 0x0);
2303 nv_wr32(dev, 0x404720, 0x0);
2304 nv_wr32(dev, 0x404724, 0x0);
2305 nv_wr32(dev, 0x404728, 0x0);
2306 nv_wr32(dev, 0x40472c, 0x0);
2307 nv_wr32(dev, 0x404730, 0x0);
2308 nv_wr32(dev, 0x404734, 0x100);
2309 nv_wr32(dev, 0x404738, 0x0);
2310 nv_wr32(dev, 0x40473c, 0x0);
2311 nv_wr32(dev, 0x404744, 0x0);
2312 nv_wr32(dev, 0x404748, 0x0);
2313 nv_wr32(dev, 0x404754, 0x0);
2314}
2315
2316static void
2317nve0_graph_generate_unk58xx(struct drm_device *dev)
2318{
2319 nv_wr32(dev, 0x405800, 0xf8000bf);
2320 nv_wr32(dev, 0x405830, 0x2180648);
2321 nv_wr32(dev, 0x405834, 0x8000000);
2322 nv_wr32(dev, 0x405838, 0x0);
2323 nv_wr32(dev, 0x405854, 0x0);
2324 nv_wr32(dev, 0x405870, 0x1);
2325 nv_wr32(dev, 0x405874, 0x1);
2326 nv_wr32(dev, 0x405878, 0x1);
2327 nv_wr32(dev, 0x40587c, 0x1);
2328 nv_wr32(dev, 0x405a00, 0x0);
2329 nv_wr32(dev, 0x405a04, 0x0);
2330 nv_wr32(dev, 0x405a18, 0x0);
2331 nv_wr32(dev, 0x405b00, 0x0);
2332 nv_wr32(dev, 0x405b10, 0x1000);
2333}
2334
2335static void
2336nve0_graph_generate_unk60xx(struct drm_device *dev)
2337{
2338 nv_wr32(dev, 0x406020, 0x4103c1);
2339 nv_wr32(dev, 0x406028, 0x1);
2340 nv_wr32(dev, 0x40602c, 0x1);
2341 nv_wr32(dev, 0x406030, 0x1);
2342 nv_wr32(dev, 0x406034, 0x1);
2343}
2344
2345static void
2346nve0_graph_generate_unk64xx(struct drm_device *dev)
2347{
2348 nv_wr32(dev, 0x4064a8, 0x0);
2349 nv_wr32(dev, 0x4064ac, 0x3fff);
2350 nv_wr32(dev, 0x4064b4, 0x0);
2351 nv_wr32(dev, 0x4064b8, 0x0);
2352 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2353 nv_wr32(dev, 0x4064c4, 0x192ffff);
2354 nv_wr32(dev, 0x4064c8, 0x1800600);
2355 nv_wr32(dev, 0x4064cc, 0x0);
2356 nv_wr32(dev, 0x4064d0, 0x0);
2357 nv_wr32(dev, 0x4064d4, 0x0);
2358 nv_wr32(dev, 0x4064d8, 0x0);
2359 nv_wr32(dev, 0x4064dc, 0x0);
2360 nv_wr32(dev, 0x4064e0, 0x0);
2361 nv_wr32(dev, 0x4064e4, 0x0);
2362 nv_wr32(dev, 0x4064e8, 0x0);
2363 nv_wr32(dev, 0x4064ec, 0x0);
2364 nv_wr32(dev, 0x4064fc, 0x22a);
2365}
2366
2367static void
2368nve0_graph_generate_unk70xx(struct drm_device *dev)
2369{
2370 nv_wr32(dev, 0x407040, 0x0);
2371}
2372
2373static void
2374nve0_graph_generate_unk78xx(struct drm_device *dev)
2375{
2376 nv_wr32(dev, 0x407804, 0x23);
2377 nv_wr32(dev, 0x40780c, 0xa418820);
2378 nv_wr32(dev, 0x407810, 0x62080e6);
2379 nv_wr32(dev, 0x407814, 0x20398a4);
2380 nv_wr32(dev, 0x407818, 0xe629062);
2381 nv_wr32(dev, 0x40781c, 0xa418820);
2382 nv_wr32(dev, 0x407820, 0xe6);
2383 nv_wr32(dev, 0x4078bc, 0x103);
2384}
2385
2386static void
2387nve0_graph_generate_unk80xx(struct drm_device *dev)
2388{
2389 nv_wr32(dev, 0x408000, 0x0);
2390 nv_wr32(dev, 0x408004, 0x0);
2391 nv_wr32(dev, 0x408008, 0x30);
2392 nv_wr32(dev, 0x40800c, 0x0);
2393 nv_wr32(dev, 0x408010, 0x0);
2394 nv_wr32(dev, 0x408014, 0x69);
2395 nv_wr32(dev, 0x408018, 0xe100e100);
2396 nv_wr32(dev, 0x408064, 0x0);
2397}
2398
2399static void
2400nve0_graph_generate_unk88xx(struct drm_device *dev)
2401{
2402 nv_wr32(dev, 0x408800, 0x2802a3c);
2403 nv_wr32(dev, 0x408804, 0x40);
2404 nv_wr32(dev, 0x408808, 0x1043e005);
2405 nv_wr32(dev, 0x408840, 0xb);
2406 nv_wr32(dev, 0x408900, 0x3080b801);
2407 nv_wr32(dev, 0x408904, 0x62000001);
2408 nv_wr32(dev, 0x408908, 0xc8102f);
2409 nv_wr32(dev, 0x408980, 0x11d);
2410}
2411
2412static void
2413nve0_graph_generate_gpc(struct drm_device *dev)
2414{
2415 nv_wr32(dev, 0x418380, 0x16);
2416 nv_wr32(dev, 0x418400, 0x38004e00);
2417 nv_wr32(dev, 0x418404, 0x71e0ffff);
2418 nv_wr32(dev, 0x41840c, 0x1008);
2419 nv_wr32(dev, 0x418410, 0xfff0fff);
2420 nv_wr32(dev, 0x418414, 0x2200fff);
2421 nv_wr32(dev, 0x418450, 0x0);
2422 nv_wr32(dev, 0x418454, 0x0);
2423 nv_wr32(dev, 0x418458, 0x0);
2424 nv_wr32(dev, 0x41845c, 0x0);
2425 nv_wr32(dev, 0x418460, 0x0);
2426 nv_wr32(dev, 0x418464, 0x0);
2427 nv_wr32(dev, 0x418468, 0x1);
2428 nv_wr32(dev, 0x41846c, 0x0);
2429 nv_wr32(dev, 0x418470, 0x0);
2430 nv_wr32(dev, 0x418600, 0x1f);
2431 nv_wr32(dev, 0x418684, 0xf);
2432 nv_wr32(dev, 0x418700, 0x2);
2433 nv_wr32(dev, 0x418704, 0x80);
2434 nv_wr32(dev, 0x418708, 0x0);
2435 nv_wr32(dev, 0x41870c, 0x0);
2436 nv_wr32(dev, 0x418710, 0x0);
2437 nv_wr32(dev, 0x418800, 0x7006860a);
2438 nv_wr32(dev, 0x418808, 0x0);
2439 nv_wr32(dev, 0x41880c, 0x0);
2440 nv_wr32(dev, 0x418810, 0x0);
2441 nv_wr32(dev, 0x418828, 0x44);
2442 nv_wr32(dev, 0x418830, 0x10000001);
2443 nv_wr32(dev, 0x4188d8, 0x8);
2444 nv_wr32(dev, 0x4188e0, 0x1000000);
2445 nv_wr32(dev, 0x4188e8, 0x0);
2446 nv_wr32(dev, 0x4188ec, 0x0);
2447 nv_wr32(dev, 0x4188f0, 0x0);
2448 nv_wr32(dev, 0x4188f4, 0x0);
2449 nv_wr32(dev, 0x4188f8, 0x0);
2450 nv_wr32(dev, 0x4188fc, 0x20100018);
2451 nv_wr32(dev, 0x41891c, 0xff00ff);
2452 nv_wr32(dev, 0x418924, 0x0);
2453 nv_wr32(dev, 0x418928, 0xffff00);
2454 nv_wr32(dev, 0x41892c, 0xff00);
2455 nv_wr32(dev, 0x418a00, 0x0);
2456 nv_wr32(dev, 0x418a04, 0x0);
2457 nv_wr32(dev, 0x418a08, 0x0);
2458 nv_wr32(dev, 0x418a0c, 0x10000);
2459 nv_wr32(dev, 0x418a10, 0x0);
2460 nv_wr32(dev, 0x418a14, 0x0);
2461 nv_wr32(dev, 0x418a18, 0x0);
2462 nv_wr32(dev, 0x418a20, 0x0);
2463 nv_wr32(dev, 0x418a24, 0x0);
2464 nv_wr32(dev, 0x418a28, 0x0);
2465 nv_wr32(dev, 0x418a2c, 0x10000);
2466 nv_wr32(dev, 0x418a30, 0x0);
2467 nv_wr32(dev, 0x418a34, 0x0);
2468 nv_wr32(dev, 0x418a38, 0x0);
2469 nv_wr32(dev, 0x418a40, 0x0);
2470 nv_wr32(dev, 0x418a44, 0x0);
2471 nv_wr32(dev, 0x418a48, 0x0);
2472 nv_wr32(dev, 0x418a4c, 0x10000);
2473 nv_wr32(dev, 0x418a50, 0x0);
2474 nv_wr32(dev, 0x418a54, 0x0);
2475 nv_wr32(dev, 0x418a58, 0x0);
2476 nv_wr32(dev, 0x418a60, 0x0);
2477 nv_wr32(dev, 0x418a64, 0x0);
2478 nv_wr32(dev, 0x418a68, 0x0);
2479 nv_wr32(dev, 0x418a6c, 0x10000);
2480 nv_wr32(dev, 0x418a70, 0x0);
2481 nv_wr32(dev, 0x418a74, 0x0);
2482 nv_wr32(dev, 0x418a78, 0x0);
2483 nv_wr32(dev, 0x418a80, 0x0);
2484 nv_wr32(dev, 0x418a84, 0x0);
2485 nv_wr32(dev, 0x418a88, 0x0);
2486 nv_wr32(dev, 0x418a8c, 0x10000);
2487 nv_wr32(dev, 0x418a90, 0x0);
2488 nv_wr32(dev, 0x418a94, 0x0);
2489 nv_wr32(dev, 0x418a98, 0x0);
2490 nv_wr32(dev, 0x418aa0, 0x0);
2491 nv_wr32(dev, 0x418aa4, 0x0);
2492 nv_wr32(dev, 0x418aa8, 0x0);
2493 nv_wr32(dev, 0x418aac, 0x10000);
2494 nv_wr32(dev, 0x418ab0, 0x0);
2495 nv_wr32(dev, 0x418ab4, 0x0);
2496 nv_wr32(dev, 0x418ab8, 0x0);
2497 nv_wr32(dev, 0x418ac0, 0x0);
2498 nv_wr32(dev, 0x418ac4, 0x0);
2499 nv_wr32(dev, 0x418ac8, 0x0);
2500 nv_wr32(dev, 0x418acc, 0x10000);
2501 nv_wr32(dev, 0x418ad0, 0x0);
2502 nv_wr32(dev, 0x418ad4, 0x0);
2503 nv_wr32(dev, 0x418ad8, 0x0);
2504 nv_wr32(dev, 0x418ae0, 0x0);
2505 nv_wr32(dev, 0x418ae4, 0x0);
2506 nv_wr32(dev, 0x418ae8, 0x0);
2507 nv_wr32(dev, 0x418aec, 0x10000);
2508 nv_wr32(dev, 0x418af0, 0x0);
2509 nv_wr32(dev, 0x418af4, 0x0);
2510 nv_wr32(dev, 0x418af8, 0x0);
2511 nv_wr32(dev, 0x418b00, 0x6);
2512 nv_wr32(dev, 0x418b08, 0xa418820);
2513 nv_wr32(dev, 0x418b0c, 0x62080e6);
2514 nv_wr32(dev, 0x418b10, 0x20398a4);
2515 nv_wr32(dev, 0x418b14, 0xe629062);
2516 nv_wr32(dev, 0x418b18, 0xa418820);
2517 nv_wr32(dev, 0x418b1c, 0xe6);
2518 nv_wr32(dev, 0x418bb8, 0x103);
2519 nv_wr32(dev, 0x418c08, 0x1);
2520 nv_wr32(dev, 0x418c10, 0x0);
2521 nv_wr32(dev, 0x418c14, 0x0);
2522 nv_wr32(dev, 0x418c18, 0x0);
2523 nv_wr32(dev, 0x418c1c, 0x0);
2524 nv_wr32(dev, 0x418c20, 0x0);
2525 nv_wr32(dev, 0x418c24, 0x0);
2526 nv_wr32(dev, 0x418c28, 0x0);
2527 nv_wr32(dev, 0x418c2c, 0x0);
2528 nv_wr32(dev, 0x418c40, 0xffffffff);
2529 nv_wr32(dev, 0x418c6c, 0x1);
2530 nv_wr32(dev, 0x418c80, 0x20200004);
2531 nv_wr32(dev, 0x418c8c, 0x1);
2532 nv_wr32(dev, 0x419000, 0x780);
2533 nv_wr32(dev, 0x419004, 0x0);
2534 nv_wr32(dev, 0x419008, 0x0);
2535 nv_wr32(dev, 0x419014, 0x4);
2536}
2537
2538static void
2539nve0_graph_generate_tpc(struct drm_device *dev)
2540{
2541 nv_wr32(dev, 0x419848, 0x0);
2542 nv_wr32(dev, 0x419864, 0x129);
2543 nv_wr32(dev, 0x419888, 0x0);
2544 nv_wr32(dev, 0x419a00, 0xf0);
2545 nv_wr32(dev, 0x419a04, 0x1);
2546 nv_wr32(dev, 0x419a08, 0x21);
2547 nv_wr32(dev, 0x419a0c, 0x20000);
2548 nv_wr32(dev, 0x419a10, 0x0);
2549 nv_wr32(dev, 0x419a14, 0x200);
2550 nv_wr32(dev, 0x419a1c, 0xc000);
2551 nv_wr32(dev, 0x419a20, 0x800);
2552 nv_wr32(dev, 0x419a30, 0x1);
2553 nv_wr32(dev, 0x419ac4, 0x37f440);
2554 nv_wr32(dev, 0x419c00, 0xa);
2555 nv_wr32(dev, 0x419c04, 0x80000006);
2556 nv_wr32(dev, 0x419c08, 0x2);
2557 nv_wr32(dev, 0x419c20, 0x0);
2558 nv_wr32(dev, 0x419c24, 0x84210);
2559 nv_wr32(dev, 0x419c28, 0x3efbefbe);
2560 nv_wr32(dev, 0x419ce8, 0x0);
2561 nv_wr32(dev, 0x419cf4, 0x3203);
2562 nv_wr32(dev, 0x419e04, 0x0);
2563 nv_wr32(dev, 0x419e08, 0x0);
2564 nv_wr32(dev, 0x419e0c, 0x0);
2565 nv_wr32(dev, 0x419e10, 0x402);
2566 nv_wr32(dev, 0x419e44, 0x13eff2);
2567 nv_wr32(dev, 0x419e48, 0x0);
2568 nv_wr32(dev, 0x419e4c, 0x7f);
2569 nv_wr32(dev, 0x419e50, 0x0);
2570 nv_wr32(dev, 0x419e54, 0x0);
2571 nv_wr32(dev, 0x419e58, 0x0);
2572 nv_wr32(dev, 0x419e5c, 0x0);
2573 nv_wr32(dev, 0x419e60, 0x0);
2574 nv_wr32(dev, 0x419e64, 0x0);
2575 nv_wr32(dev, 0x419e68, 0x0);
2576 nv_wr32(dev, 0x419e6c, 0x0);
2577 nv_wr32(dev, 0x419e70, 0x0);
2578 nv_wr32(dev, 0x419e74, 0x0);
2579 nv_wr32(dev, 0x419e78, 0x0);
2580 nv_wr32(dev, 0x419e7c, 0x0);
2581 nv_wr32(dev, 0x419e80, 0x0);
2582 nv_wr32(dev, 0x419e84, 0x0);
2583 nv_wr32(dev, 0x419e88, 0x0);
2584 nv_wr32(dev, 0x419e8c, 0x0);
2585 nv_wr32(dev, 0x419e90, 0x0);
2586 nv_wr32(dev, 0x419e94, 0x0);
2587 nv_wr32(dev, 0x419e98, 0x0);
2588 nv_wr32(dev, 0x419eac, 0x1fcf);
2589 nv_wr32(dev, 0x419eb0, 0xd3f);
2590 nv_wr32(dev, 0x419ec8, 0x1304f);
2591 nv_wr32(dev, 0x419f30, 0x0);
2592 nv_wr32(dev, 0x419f34, 0x0);
2593 nv_wr32(dev, 0x419f38, 0x0);
2594 nv_wr32(dev, 0x419f3c, 0x0);
2595 nv_wr32(dev, 0x419f40, 0x0);
2596 nv_wr32(dev, 0x419f44, 0x0);
2597 nv_wr32(dev, 0x419f48, 0x0);
2598 nv_wr32(dev, 0x419f4c, 0x0);
2599 nv_wr32(dev, 0x419f58, 0x0);
2600 nv_wr32(dev, 0x419f78, 0xb);
2601}
2602
2603static void
2604nve0_graph_generate_tpcunk(struct drm_device *dev)
2605{
2606 nv_wr32(dev, 0x41be24, 0x6);
2607 nv_wr32(dev, 0x41bec0, 0x12180000);
2608 nv_wr32(dev, 0x41bec4, 0x37f7f);
2609 nv_wr32(dev, 0x41bee4, 0x6480430);
2610 nv_wr32(dev, 0x41bf00, 0xa418820);
2611 nv_wr32(dev, 0x41bf04, 0x62080e6);
2612 nv_wr32(dev, 0x41bf08, 0x20398a4);
2613 nv_wr32(dev, 0x41bf0c, 0xe629062);
2614 nv_wr32(dev, 0x41bf10, 0xa418820);
2615 nv_wr32(dev, 0x41bf14, 0xe6);
2616 nv_wr32(dev, 0x41bfd0, 0x900103);
2617 nv_wr32(dev, 0x41bfe0, 0x400001);
2618 nv_wr32(dev, 0x41bfe4, 0x0);
2619}
2620
2621int
2622nve0_grctx_generate(struct nouveau_channel *chan)
2623{
2624 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
2625 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
2626 struct drm_device *dev = chan->dev;
2627 u32 data[6] = {}, data2[2] = {}, tmp;
2628 u32 tpc_set = 0, tpc_mask = 0;
2629 u8 tpcnr[GPC_MAX], a, b;
2630 u8 shift, ntpcv;
2631 int i, gpc, tpc, id;
2632
2633 nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
2634 nv_wr32(dev, 0x400204, 0x00000000);
2635 nv_wr32(dev, 0x400208, 0x00000000);
2636
2637 nve0_graph_generate_unk40xx(dev);
2638 nve0_graph_generate_unk44xx(dev);
2639 nve0_graph_generate_unk46xx(dev);
2640 nve0_graph_generate_unk47xx(dev);
2641 nve0_graph_generate_unk58xx(dev);
2642 nve0_graph_generate_unk60xx(dev);
2643 nve0_graph_generate_unk64xx(dev);
2644 nve0_graph_generate_unk70xx(dev);
2645 nve0_graph_generate_unk78xx(dev);
2646 nve0_graph_generate_unk80xx(dev);
2647 nve0_graph_generate_unk88xx(dev);
2648 nve0_graph_generate_gpc(dev);
2649 nve0_graph_generate_tpc(dev);
2650 nve0_graph_generate_tpcunk(dev);
2651
2652 nv_wr32(dev, 0x404154, 0x0);
2653
2654 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
2655 u32 reg = nv_ro32(grch->mmio, i + 0);
2656 u32 val = nv_ro32(grch->mmio, i + 4);
2657 nv_wr32(dev, reg, val);
2658 }
2659
2660 nv_wr32(dev, 0x418c6c, 0x1);
2661 nv_wr32(dev, 0x41980c, 0x10);
2662 nv_wr32(dev, 0x41be08, 0x4);
2663 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2664 nv_wr32(dev, 0x405800, 0xf8000bf);
2665 nv_wr32(dev, 0x419c00, 0xa);
2666
2667 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2668 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2669 if (tpc < priv->tpc_nr[gpc]) {
2670 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
2671 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
2672 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2673 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
2674 }
2675
2676 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2677 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2678 }
2679 }
2680
2681 tmp = 0;
2682 for (i = 0; i < priv->gpc_nr; i++)
2683 tmp |= priv->tpc_nr[i] << (i * 4);
2684 nv_wr32(dev, 0x406028, tmp);
2685 nv_wr32(dev, 0x405870, tmp);
2686
2687 nv_wr32(dev, 0x40602c, 0x0);
2688 nv_wr32(dev, 0x405874, 0x0);
2689 nv_wr32(dev, 0x406030, 0x0);
2690 nv_wr32(dev, 0x405878, 0x0);
2691 nv_wr32(dev, 0x406034, 0x0);
2692 nv_wr32(dev, 0x40587c, 0x0);
2693
2694 /* calculate first set of magics */
2695 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2696
2697 gpc = -1;
2698 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2699 do {
2700 gpc = (gpc + 1) % priv->gpc_nr;
2701 } while (!tpcnr[gpc]);
2702 tpcnr[gpc]--;
2703
2704 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2705 }
2706
2707 for (; tpc < 32; tpc++)
2708 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2709
2710 /* and the second... */
2711 shift = 0;
2712 ntpcv = priv->tpc_total;
2713 while (!(ntpcv & (1 << 4))) {
2714 ntpcv <<= 1;
2715 shift++;
2716 }
2717
2718 data2[0] = ntpcv << 16;
2719 data2[0] |= shift << 21;
2720 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2721 data2[0] |= priv->tpc_total << 8;
2722 data2[0] |= priv->magic_not_rop_nr;
2723 for (i = 1; i < 7; i++)
2724 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2725
2726 /* and write it all the various parts of PGRAPH */
2727 nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2728 for (i = 0; i < 6; i++)
2729 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
2730
2731 nv_wr32(dev, 0x41bfd0, data2[0]);
2732 nv_wr32(dev, 0x41bfe4, data2[1]);
2733 for (i = 0; i < 6; i++)
2734 nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
2735
2736 nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2737 for (i = 0; i < 6; i++)
2738 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
2739
2740
2741 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2742 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2743 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2744
2745 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2746 a = (i * (priv->tpc_total - 1)) / 32;
2747 if (a != b) {
2748 b = a;
2749 do {
2750 gpc = (gpc + 1) % priv->gpc_nr;
2751 } while (!tpcnr[gpc]);
2752 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2753
2754 tpc_set |= 1 << ((gpc * 8) + tpc);
2755 }
2756
2757 nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
2758 nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2759 }
2760
2761 for (i = 0; i < 8; i++)
2762 nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
2763
2764 nv_wr32(dev, 0x405b00, 0x201);
2765 nv_wr32(dev, 0x408850, 0x2);
2766 nv_wr32(dev, 0x408958, 0x2);
2767 nv_wr32(dev, 0x419f78, 0xa);
2768
2769 nve0_grctx_generate_icmd(dev);
2770 nve0_grctx_generate_a097(dev);
2771 nve0_grctx_generate_902d(dev);
2772
2773 nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
2774 nv_wr32(dev, 0x418800, 0x7026860a); //XXX
2775 nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
2776 return 0;
2777}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9d83729956ff..a6598fd66423 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
73 radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \ 73 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
74 radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o 74 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
75 si_blit_shaders.o radeon_prime.o
75 76
76radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 77radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
77radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 78radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af1054f8202a..01d77d1554f4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -591,8 +591,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
591 if (encoder->crtc == crtc) { 591 if (encoder->crtc == crtc) {
592 radeon_encoder = to_radeon_encoder(encoder); 592 radeon_encoder = to_radeon_encoder(encoder);
593 connector = radeon_get_connector_for_encoder(encoder); 593 connector = radeon_get_connector_for_encoder(encoder);
594 /* if (connector && connector->display_info.bpc) 594 bpc = radeon_get_monitor_bpc(connector);
595 bpc = connector->display_info.bpc; */
596 encoder_mode = atombios_get_encoder_mode(encoder); 595 encoder_mode = atombios_get_encoder_mode(encoder);
597 is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 596 is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
598 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 597 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -968,9 +967,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
968 struct radeon_connector_atom_dig *dig_connector = 967 struct radeon_connector_atom_dig *dig_connector =
969 radeon_connector->con_priv; 968 radeon_connector->con_priv;
970 int dp_clock; 969 int dp_clock;
971 970 bpc = radeon_get_monitor_bpc(connector);
972 /* if (connector->display_info.bpc)
973 bpc = connector->display_info.bpc; */
974 971
975 switch (encoder_mode) { 972 switch (encoder_mode) {
976 case ATOM_ENCODER_MODE_DP_MST: 973 case ATOM_ENCODER_MODE_DP_MST:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c57d85664e77..5131b3b0f7d2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,13 +405,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
405/* get bpc from the EDID */ 405/* get bpc from the EDID */
406static int convert_bpc_to_bpp(int bpc) 406static int convert_bpc_to_bpp(int bpc)
407{ 407{
408#if 0
409 if (bpc == 0) 408 if (bpc == 0)
410 return 24; 409 return 24;
411 else 410 else
412 return bpc * 3; 411 return bpc * 3;
413#endif
414 return 24;
415} 412}
416 413
417/* get the max pix clock supported by the link rate and lane num */ 414/* get the max pix clock supported by the link rate and lane num */
@@ -463,7 +460,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
463 u8 dpcd[DP_DPCD_SIZE], 460 u8 dpcd[DP_DPCD_SIZE],
464 int pix_clock) 461 int pix_clock)
465{ 462{
466 int bpp = convert_bpc_to_bpp(connector->display_info.bpc); 463 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
467 int max_link_rate = dp_get_max_link_rate(dpcd); 464 int max_link_rate = dp_get_max_link_rate(dpcd);
468 int max_lane_num = dp_get_max_lane_number(dpcd); 465 int max_lane_num = dp_get_max_lane_number(dpcd);
469 int lane_num; 466 int lane_num;
@@ -482,7 +479,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
482 u8 dpcd[DP_DPCD_SIZE], 479 u8 dpcd[DP_DPCD_SIZE],
483 int pix_clock) 480 int pix_clock)
484{ 481{
485 int bpp = convert_bpc_to_bpp(connector->display_info.bpc); 482 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
486 int lane_num, max_pix_clock; 483 int lane_num, max_pix_clock;
487 484
488 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 485 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
@@ -533,6 +530,23 @@ u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
533 dig_connector->dp_i2c_bus->rec.i2c_id, 0); 530 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
534} 531}
535 532
533static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
534{
535 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
536 u8 buf[3];
537
538 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
539 return;
540
541 if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
542 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
543 buf[0], buf[1], buf[2]);
544
545 if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
546 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
547 buf[0], buf[1], buf[2]);
548}
549
536bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) 550bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
537{ 551{
538 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 552 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
@@ -546,6 +560,9 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
546 for (i = 0; i < 8; i++) 560 for (i = 0; i < 8; i++)
547 DRM_DEBUG_KMS("%02x ", msg[i]); 561 DRM_DEBUG_KMS("%02x ", msg[i]);
548 DRM_DEBUG_KMS("\n"); 562 DRM_DEBUG_KMS("\n");
563
564 radeon_dp_probe_oui(radeon_connector);
565
549 return true; 566 return true;
550 } 567 }
551 dig_connector->dpcd[0] = 0; 568 dig_connector->dpcd[0] = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2d39f9977e00..e7b1ec5ae8c6 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -545,7 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
545 dp_clock = dig_connector->dp_clock; 545 dp_clock = dig_connector->dp_clock;
546 dp_lane_count = dig_connector->dp_lane_count; 546 dp_lane_count = dig_connector->dp_lane_count;
547 hpd_id = radeon_connector->hpd.hpd; 547 hpd_id = radeon_connector->hpd.hpd;
548 /* bpc = connector->display_info.bpc; */ 548 bpc = radeon_get_monitor_bpc(connector);
549 } 549 }
550 550
551 /* no dig encoder assigned */ 551 /* no dig encoder assigned */
@@ -1163,7 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1163 dp_lane_count = dig_connector->dp_lane_count; 1163 dp_lane_count = dig_connector->dp_lane_count;
1164 connector_object_id = 1164 connector_object_id =
1165 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1165 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1166 /* bpc = connector->display_info.bpc; */ 1166 bpc = radeon_get_monitor_bpc(connector);
1167 } 1167 }
1168 1168
1169 memset(&args, 0, sizeof(args)); 1169 memset(&args, 0, sizeof(args));
@@ -1926,7 +1926,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1926 1926
1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1928 r600_hdmi_enable(encoder); 1928 r600_hdmi_enable(encoder);
1929 r600_hdmi_setmode(encoder, adjusted_mode); 1929 if (ASIC_IS_DCE4(rdev))
1930 evergreen_hdmi_setmode(encoder, adjusted_mode);
1931 else
1932 r600_hdmi_setmode(encoder, adjusted_mode);
1930 } 1933 }
1931} 1934}
1932 1935
@@ -2081,6 +2084,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
2081 2084
2082static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 2085static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2083{ 2086{
2087 struct radeon_device *rdev = encoder->dev->dev_private;
2084 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2088 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2085 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 2089 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2086 2090
@@ -2089,8 +2093,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2089 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != 2093 (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
2090 ENCODER_OBJECT_ID_NONE)) { 2094 ENCODER_OBJECT_ID_NONE)) {
2091 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 2095 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2092 if (dig) 2096 if (dig) {
2093 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); 2097 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
2098 if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
2099 if (rdev->family >= CHIP_R600)
2100 dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
2101 else
2102 /* RS600/690/740 have only 1 afmt block */
2103 dig->afmt = rdev->mode_info.afmt[0];
2104 }
2105 }
2094 } 2106 }
2095 2107
2096 radeon_atom_output_lock(encoder, true); 2108 radeon_atom_output_lock(encoder, true);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index cfa372cb1cb3..58991af90502 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,27 +2424,18 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
2424 u32 srbm_status; 2424 u32 srbm_status;
2425 u32 grbm_status; 2425 u32 grbm_status;
2426 u32 grbm_status_se0, grbm_status_se1; 2426 u32 grbm_status_se0, grbm_status_se1;
2427 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2428 int r;
2429 2427
2430 srbm_status = RREG32(SRBM_STATUS); 2428 srbm_status = RREG32(SRBM_STATUS);
2431 grbm_status = RREG32(GRBM_STATUS); 2429 grbm_status = RREG32(GRBM_STATUS);
2432 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2430 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2433 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2431 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2434 if (!(grbm_status & GUI_ACTIVE)) { 2432 if (!(grbm_status & GUI_ACTIVE)) {
2435 r100_gpu_lockup_update(lockup, ring); 2433 radeon_ring_lockup_update(ring);
2436 return false; 2434 return false;
2437 } 2435 }
2438 /* force CP activities */ 2436 /* force CP activities */
2439 r = radeon_ring_lock(rdev, ring, 2); 2437 radeon_ring_force_activity(rdev, ring);
2440 if (!r) { 2438 return radeon_ring_test_lockup(rdev, ring);
2441 /* PACKET2 NOP */
2442 radeon_ring_write(ring, 0x80000000);
2443 radeon_ring_write(ring, 0x80000000);
2444 radeon_ring_unlock_commit(rdev, ring);
2445 }
2446 ring->rptr = RREG32(CP_RB_RPTR);
2447 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
2448} 2439}
2449 2440
2450static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2441static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2594,6 +2585,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2594 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2585 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2595 u32 grbm_int_cntl = 0; 2586 u32 grbm_int_cntl = 0;
2596 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 2587 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2588 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
2597 2589
2598 if (!rdev->irq.installed) { 2590 if (!rdev->irq.installed) {
2599 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2591 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2614,6 +2606,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
2614 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 2606 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2615 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2607 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2616 2608
2609 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2610 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2611 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2612 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2613 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2614 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2615
2617 if (rdev->family >= CHIP_CAYMAN) { 2616 if (rdev->family >= CHIP_CAYMAN) {
2618 /* enable CP interrupts on all rings */ 2617 /* enable CP interrupts on all rings */
2619 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 2618 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -2690,6 +2689,30 @@ int evergreen_irq_set(struct radeon_device *rdev)
2690 DRM_DEBUG("evergreen_irq_set: hpd 6\n"); 2689 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2691 hpd6 |= DC_HPDx_INT_EN; 2690 hpd6 |= DC_HPDx_INT_EN;
2692 } 2691 }
2692 if (rdev->irq.afmt[0]) {
2693 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
2694 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2695 }
2696 if (rdev->irq.afmt[1]) {
2697 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
2698 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2699 }
2700 if (rdev->irq.afmt[2]) {
2701 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
2702 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2703 }
2704 if (rdev->irq.afmt[3]) {
2705 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
2706 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2707 }
2708 if (rdev->irq.afmt[4]) {
2709 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
2710 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2711 }
2712 if (rdev->irq.afmt[5]) {
2713 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2714 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2715 }
2693 if (rdev->irq.gui_idle) { 2716 if (rdev->irq.gui_idle) {
2694 DRM_DEBUG("gui idle\n"); 2717 DRM_DEBUG("gui idle\n");
2695 grbm_int_cntl |= GUI_IDLE_INT_ENABLE; 2718 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
@@ -2732,6 +2755,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
2732 WREG32(DC_HPD5_INT_CONTROL, hpd5); 2755 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2733 WREG32(DC_HPD6_INT_CONTROL, hpd6); 2756 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2734 2757
2758 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
2759 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
2760 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
2761 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
2762 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
2763 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
2764
2735 return 0; 2765 return 0;
2736} 2766}
2737 2767
@@ -2756,6 +2786,13 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
2756 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); 2786 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2757 } 2787 }
2758 2788
2789 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2790 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2791 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2792 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2793 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2794 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2795
2759 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) 2796 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2760 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); 2797 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2761 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) 2798 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
@@ -2829,6 +2866,36 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
2829 tmp |= DC_HPDx_INT_ACK; 2866 tmp |= DC_HPDx_INT_ACK;
2830 WREG32(DC_HPD6_INT_CONTROL, tmp); 2867 WREG32(DC_HPD6_INT_CONTROL, tmp);
2831 } 2868 }
2869 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
2870 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
2871 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2872 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
2873 }
2874 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
2875 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
2876 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2877 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
2878 }
2879 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
2880 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
2881 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2882 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
2883 }
2884 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
2885 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
2886 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2887 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
2888 }
2889 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
2890 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
2891 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2892 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
2893 }
2894 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
2895 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
2896 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2897 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
2898 }
2832} 2899}
2833 2900
2834void evergreen_irq_disable(struct radeon_device *rdev) 2901void evergreen_irq_disable(struct radeon_device *rdev)
@@ -2878,6 +2945,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
2878 u32 ring_index; 2945 u32 ring_index;
2879 unsigned long flags; 2946 unsigned long flags;
2880 bool queue_hotplug = false; 2947 bool queue_hotplug = false;
2948 bool queue_hdmi = false;
2881 2949
2882 if (!rdev->ih.enabled || rdev->shutdown) 2950 if (!rdev->ih.enabled || rdev->shutdown)
2883 return IRQ_NONE; 2951 return IRQ_NONE;
@@ -3111,6 +3179,55 @@ restart_ih:
3111 break; 3179 break;
3112 } 3180 }
3113 break; 3181 break;
3182 case 44: /* hdmi */
3183 switch (src_data) {
3184 case 0:
3185 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3186 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3187 queue_hdmi = true;
3188 DRM_DEBUG("IH: HDMI0\n");
3189 }
3190 break;
3191 case 1:
3192 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3193 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3194 queue_hdmi = true;
3195 DRM_DEBUG("IH: HDMI1\n");
3196 }
3197 break;
3198 case 2:
3199 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3200 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3201 queue_hdmi = true;
3202 DRM_DEBUG("IH: HDMI2\n");
3203 }
3204 break;
3205 case 3:
3206 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3207 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3208 queue_hdmi = true;
3209 DRM_DEBUG("IH: HDMI3\n");
3210 }
3211 break;
3212 case 4:
3213 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3214 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3215 queue_hdmi = true;
3216 DRM_DEBUG("IH: HDMI4\n");
3217 }
3218 break;
3219 case 5:
3220 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3221 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3222 queue_hdmi = true;
3223 DRM_DEBUG("IH: HDMI5\n");
3224 }
3225 break;
3226 default:
3227 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3228 break;
3229 }
3230 break;
3114 case 176: /* CP_INT in ring buffer */ 3231 case 176: /* CP_INT in ring buffer */
3115 case 177: /* CP_INT in IB1 */ 3232 case 177: /* CP_INT in IB1 */
3116 case 178: /* CP_INT in IB2 */ 3233 case 178: /* CP_INT in IB2 */
@@ -3154,6 +3271,8 @@ restart_ih:
3154 goto restart_ih; 3271 goto restart_ih;
3155 if (queue_hotplug) 3272 if (queue_hotplug)
3156 schedule_work(&rdev->hotplug_work); 3273 schedule_work(&rdev->hotplug_work);
3274 if (queue_hdmi)
3275 schedule_work(&rdev->audio_work);
3157 rdev->ih.rptr = rptr; 3276 rdev->ih.rptr = rptr;
3158 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3277 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3159 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3278 spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3248,12 +3367,9 @@ static int evergreen_startup(struct radeon_device *rdev)
3248 if (r) 3367 if (r)
3249 return r; 3368 return r;
3250 3369
3251 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 3370 r = radeon_ib_ring_tests(rdev);
3252 if (r) { 3371 if (r)
3253 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3254 rdev->accel_working = false;
3255 return r; 3372 return r;
3256 }
3257 3373
3258 r = r600_audio_init(rdev); 3374 r = r600_audio_init(rdev);
3259 if (r) { 3375 if (r) {
@@ -3319,10 +3435,6 @@ int evergreen_init(struct radeon_device *rdev)
3319{ 3435{
3320 int r; 3436 int r;
3321 3437
3322 /* This don't do much */
3323 r = radeon_gem_init(rdev);
3324 if (r)
3325 return r;
3326 /* Read BIOS */ 3438 /* Read BIOS */
3327 if (!radeon_get_bios(rdev)) { 3439 if (!radeon_get_bios(rdev)) {
3328 if (ASIC_IS_AVIVO(rdev)) 3440 if (ASIC_IS_AVIVO(rdev))
@@ -3434,7 +3546,6 @@ void evergreen_fini(struct radeon_device *rdev)
3434 evergreen_pcie_gart_fini(rdev); 3546 evergreen_pcie_gart_fini(rdev);
3435 r600_vram_scratch_fini(rdev); 3547 r600_vram_scratch_fini(rdev);
3436 radeon_gem_fini(rdev); 3548 radeon_gem_fini(rdev);
3437 radeon_semaphore_driver_fini(rdev);
3438 radeon_fence_driver_fini(rdev); 3549 radeon_fence_driver_fini(rdev);
3439 radeon_agp_fini(rdev); 3550 radeon_agp_fini(rdev);
3440 radeon_bo_fini(rdev); 3551 radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 222acd2d33df..1e96bd458cfd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
637 if (rdev->r600_blit.shader_obj) 637 if (rdev->r600_blit.shader_obj)
638 goto done; 638 goto done;
639 639
640 mutex_init(&rdev->r600_blit.mutex);
641 rdev->r600_blit.state_offset = 0; 640 rdev->r600_blit.state_offset = 0;
642 641
643 if (rdev->family < CHIP_CAYMAN) 642 if (rdev->family < CHIP_CAYMAN)
@@ -669,7 +668,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
669 obj_size = ALIGN(obj_size, 256); 668 obj_size = ALIGN(obj_size, 256);
670 669
671 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 670 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
672 &rdev->r600_blit.shader_obj); 671 NULL, &rdev->r600_blit.shader_obj);
673 if (r) { 672 if (r) {
674 DRM_ERROR("evergreen failed to allocate shader\n"); 673 DRM_ERROR("evergreen failed to allocate shader\n");
675 return r; 674 return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 70089d32b80f..4e7dd2b4843d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1057 uint32_t header, h_idx, reg, wait_reg_mem_info; 1057 uint32_t header, h_idx, reg, wait_reg_mem_info;
1058 volatile uint32_t *ib; 1058 volatile uint32_t *ib;
1059 1059
1060 ib = p->ib->ptr; 1060 ib = p->ib.ptr;
1061 1061
1062 /* parse the WAIT_REG_MEM */ 1062 /* parse the WAIT_REG_MEM */
1063 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); 1063 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1215 if (!(evergreen_reg_safe_bm[i] & m)) 1215 if (!(evergreen_reg_safe_bm[i] & m))
1216 return 0; 1216 return 0;
1217 } 1217 }
1218 ib = p->ib->ptr; 1218 ib = p->ib.ptr;
1219 switch (reg) { 1219 switch (reg) {
1220 /* force following reg to 0 in an attempt to disable out buffer 1220 /* force following reg to 0 in an attempt to disable out buffer
1221 * which will need us to better understand how it works to perform 1221 * which will need us to better understand how it works to perform
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1896 u32 idx_value; 1896 u32 idx_value;
1897 1897
1898 track = (struct evergreen_cs_track *)p->track; 1898 track = (struct evergreen_cs_track *)p->track;
1899 ib = p->ib->ptr; 1899 ib = p->ib.ptr;
1900 idx = pkt->idx + 1; 1900 idx = pkt->idx + 1;
1901 idx_value = radeon_get_ib_value(p, idx); 1901 idx_value = radeon_get_ib_value(p, idx);
1902 1902
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2610 } 2610 }
2611 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2611 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2612#if 0 2612#if 0
2613 for (r = 0; r < p->ib->length_dw; r++) { 2613 for (r = 0; r < p->ib.length_dw; r++) {
2614 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); 2614 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2615 mdelay(1); 2615 mdelay(1);
2616 } 2616 }
2617#endif 2617#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 000000000000..a51f880985f8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 * Rafał Miłecki
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "radeon_asic.h"
31#include "evergreend.h"
32#include "atom.h"
33
34/*
35 * update the N and CTS parameters for a given pixel clock rate
36 */
37static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
38{
39 struct drm_device *dev = encoder->dev;
40 struct radeon_device *rdev = dev->dev_private;
41 struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
42 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
43 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
44 uint32_t offset = dig->afmt->offset;
45
46 WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
47 WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
48
49 WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
50 WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
51
52 WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
53 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
54}
55
56/*
57 * calculate the crc for a given info frame
58 */
59static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
60 uint8_t versionNumber,
61 uint8_t length,
62 uint8_t *frame)
63{
64 int i;
65 frame[0] = packetType + versionNumber + length;
66 for (i = 1; i <= length; i++)
67 frame[0] += frame[i];
68 frame[0] = 0x100 - frame[0];
69}
70
71/*
72 * build a HDMI Video Info Frame
73 */
74static void evergreen_hdmi_videoinfoframe(
75 struct drm_encoder *encoder,
76 uint8_t color_format,
77 int active_information_present,
78 uint8_t active_format_aspect_ratio,
79 uint8_t scan_information,
80 uint8_t colorimetry,
81 uint8_t ex_colorimetry,
82 uint8_t quantization,
83 int ITC,
84 uint8_t picture_aspect_ratio,
85 uint8_t video_format_identification,
86 uint8_t pixel_repetition,
87 uint8_t non_uniform_picture_scaling,
88 uint8_t bar_info_data_valid,
89 uint16_t top_bar,
90 uint16_t bottom_bar,
91 uint16_t left_bar,
92 uint16_t right_bar
93)
94{
95 struct drm_device *dev = encoder->dev;
96 struct radeon_device *rdev = dev->dev_private;
97 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
98 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
99 uint32_t offset = dig->afmt->offset;
100
101 uint8_t frame[14];
102
103 frame[0x0] = 0;
104 frame[0x1] =
105 (scan_information & 0x3) |
106 ((bar_info_data_valid & 0x3) << 2) |
107 ((active_information_present & 0x1) << 4) |
108 ((color_format & 0x3) << 5);
109 frame[0x2] =
110 (active_format_aspect_ratio & 0xF) |
111 ((picture_aspect_ratio & 0x3) << 4) |
112 ((colorimetry & 0x3) << 6);
113 frame[0x3] =
114 (non_uniform_picture_scaling & 0x3) |
115 ((quantization & 0x3) << 2) |
116 ((ex_colorimetry & 0x7) << 4) |
117 ((ITC & 0x1) << 7);
118 frame[0x4] = (video_format_identification & 0x7F);
119 frame[0x5] = (pixel_repetition & 0xF);
120 frame[0x6] = (top_bar & 0xFF);
121 frame[0x7] = (top_bar >> 8);
122 frame[0x8] = (bottom_bar & 0xFF);
123 frame[0x9] = (bottom_bar >> 8);
124 frame[0xA] = (left_bar & 0xFF);
125 frame[0xB] = (left_bar >> 8);
126 frame[0xC] = (right_bar & 0xFF);
127 frame[0xD] = (right_bar >> 8);
128
129 evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
130 /* Our header values (type, version, length) should be alright, Intel
131 * is using the same. Checksum function also seems to be OK, it works
132 * fine for audio infoframe. However calculated value is always lower
133 * by 2 in comparison to fglrx. It breaks displaying anything in case
134 * of TVs that strictly check the checksum. Hack it manually here to
135 * workaround this issue. */
136 frame[0x0] += 2;
137
138 WREG32(AFMT_AVI_INFO0 + offset,
139 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
140 WREG32(AFMT_AVI_INFO1 + offset,
141 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
142 WREG32(AFMT_AVI_INFO2 + offset,
143 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
144 WREG32(AFMT_AVI_INFO3 + offset,
145 frame[0xC] | (frame[0xD] << 8));
146}
147
148/*
149 * update the info frames with the data from the current display mode
150 */
151void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
152{
153 struct drm_device *dev = encoder->dev;
154 struct radeon_device *rdev = dev->dev_private;
155 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
157 uint32_t offset;
158
159 if (ASIC_IS_DCE5(rdev))
160 return;
161
162 /* Silent, r600_hdmi_enable will raise WARN for us */
163 if (!dig->afmt->enabled)
164 return;
165 offset = dig->afmt->offset;
166
167 r600_audio_set_clock(encoder, mode->clock);
168
169 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
170 HDMI_NULL_SEND); /* send null packets when required */
171
172 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
173
174 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
175 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
176 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
177
178 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
179 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
180 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
181
182 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
183 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
184 HDMI_ACR_SOURCE); /* select SW CTS value */
185
186 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
187 HDMI_NULL_SEND | /* send null packets when required */
188 HDMI_GC_SEND | /* send general control packets */
189 HDMI_GC_CONT); /* send general control packets every frame */
190
191 WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
192 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
193 HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
194 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
195 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
196
197 WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
198 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
199
200 WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
201 HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
202 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
203
204 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
205
206 evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
207 0, 0, 0, 0, 0, 0);
208
209 evergreen_hdmi_update_ACR(encoder, mode->clock);
210
211 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
212 WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
213 WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
214 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
215 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
216}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3991aa..8beac1065025 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -232,6 +232,4 @@
232/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 232/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
233#define EVERGREEN_HDMI_BASE 0x7030 233#define EVERGREEN_HDMI_BASE 0x7030
234 234
235#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
236
237#endif 235#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b4eefc355f16..79130bfd1d6f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -112,6 +112,226 @@
112#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 112#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
113#define CP_DEBUG 0xC1FC 113#define CP_DEBUG 0xC1FC
114 114
115/* Audio clocks */
116#define DCCG_AUDIO_DTO_SOURCE 0x05ac
117# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
118# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
119
120#define DCCG_AUDIO_DTO0_PHASE 0x05b0
121#define DCCG_AUDIO_DTO0_MODULE 0x05b4
122#define DCCG_AUDIO_DTO0_LOAD 0x05b8
123#define DCCG_AUDIO_DTO0_CNTL 0x05bc
124
125#define DCCG_AUDIO_DTO1_PHASE 0x05c0
126#define DCCG_AUDIO_DTO1_MODULE 0x05c4
127#define DCCG_AUDIO_DTO1_LOAD 0x05c8
128#define DCCG_AUDIO_DTO1_CNTL 0x05cc
129
130/* DCE 4.0 AFMT */
131#define HDMI_CONTROL 0x7030
132# define HDMI_KEEPOUT_MODE (1 << 0)
133# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
134# define HDMI_ERROR_ACK (1 << 8)
135# define HDMI_ERROR_MASK (1 << 9)
136# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
137# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
138# define HDMI_24BIT_DEEP_COLOR 0
139# define HDMI_30BIT_DEEP_COLOR 1
140# define HDMI_36BIT_DEEP_COLOR 2
141#define HDMI_STATUS 0x7034
142# define HDMI_ACTIVE_AVMUTE (1 << 0)
143# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
144# define HDMI_VBI_PACKET_ERROR (1 << 20)
145#define HDMI_AUDIO_PACKET_CONTROL 0x7038
146# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
147# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
148#define HDMI_ACR_PACKET_CONTROL 0x703c
149# define HDMI_ACR_SEND (1 << 0)
150# define HDMI_ACR_CONT (1 << 1)
151# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
152# define HDMI_ACR_HW 0
153# define HDMI_ACR_32 1
154# define HDMI_ACR_44 2
155# define HDMI_ACR_48 3
156# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
157# define HDMI_ACR_AUTO_SEND (1 << 12)
158# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
159# define HDMI_ACR_X1 1
160# define HDMI_ACR_X2 2
161# define HDMI_ACR_X4 4
162# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
163#define HDMI_VBI_PACKET_CONTROL 0x7040
164# define HDMI_NULL_SEND (1 << 0)
165# define HDMI_GC_SEND (1 << 4)
166# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
167#define HDMI_INFOFRAME_CONTROL0 0x7044
168# define HDMI_AVI_INFO_SEND (1 << 0)
169# define HDMI_AVI_INFO_CONT (1 << 1)
170# define HDMI_AUDIO_INFO_SEND (1 << 4)
171# define HDMI_AUDIO_INFO_CONT (1 << 5)
172# define HDMI_MPEG_INFO_SEND (1 << 8)
173# define HDMI_MPEG_INFO_CONT (1 << 9)
174#define HDMI_INFOFRAME_CONTROL1 0x7048
175# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
176# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
177# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
178#define HDMI_GENERIC_PACKET_CONTROL 0x704c
179# define HDMI_GENERIC0_SEND (1 << 0)
180# define HDMI_GENERIC0_CONT (1 << 1)
181# define HDMI_GENERIC1_SEND (1 << 4)
182# define HDMI_GENERIC1_CONT (1 << 5)
183# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
184# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
185#define HDMI_GC 0x7058
186# define HDMI_GC_AVMUTE (1 << 0)
187# define HDMI_GC_AVMUTE_CONT (1 << 2)
188#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
189# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
190# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
191# define AFMT_60958_CS_SOURCE (1 << 4)
192# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
193# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
194#define AFMT_AVI_INFO0 0x7084
195# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
196# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
197# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
198# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
199# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
200# define AFMT_AVI_INFO_Y_RGB 0
201# define AFMT_AVI_INFO_Y_YCBCR422 1
202# define AFMT_AVI_INFO_Y_YCBCR444 2
203# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
204# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
205# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
206# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
207# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
208# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
209# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
210# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
211# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
212# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
213#define AFMT_AVI_INFO1 0x7088
214# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
215# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
216# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
217# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
218# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
219#define AFMT_AVI_INFO2 0x708c
220# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
221# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
222#define AFMT_AVI_INFO3 0x7090
223# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
224# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
225#define AFMT_MPEG_INFO0 0x7094
226# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
227# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
228# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
229# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
230#define AFMT_MPEG_INFO1 0x7098
231# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
232# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
233# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
234#define AFMT_GENERIC0_HDR 0x709c
235#define AFMT_GENERIC0_0 0x70a0
236#define AFMT_GENERIC0_1 0x70a4
237#define AFMT_GENERIC0_2 0x70a8
238#define AFMT_GENERIC0_3 0x70ac
239#define AFMT_GENERIC0_4 0x70b0
240#define AFMT_GENERIC0_5 0x70b4
241#define AFMT_GENERIC0_6 0x70b8
242#define AFMT_GENERIC1_HDR 0x70bc
243#define AFMT_GENERIC1_0 0x70c0
244#define AFMT_GENERIC1_1 0x70c4
245#define AFMT_GENERIC1_2 0x70c8
246#define AFMT_GENERIC1_3 0x70cc
247#define AFMT_GENERIC1_4 0x70d0
248#define AFMT_GENERIC1_5 0x70d4
249#define AFMT_GENERIC1_6 0x70d8
250#define HDMI_ACR_32_0 0x70dc
251# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
252#define HDMI_ACR_32_1 0x70e0
253# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
254#define HDMI_ACR_44_0 0x70e4
255# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
256#define HDMI_ACR_44_1 0x70e8
257# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
258#define HDMI_ACR_48_0 0x70ec
259# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
260#define HDMI_ACR_48_1 0x70f0
261# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
262#define HDMI_ACR_STATUS_0 0x70f4
263#define HDMI_ACR_STATUS_1 0x70f8
264#define AFMT_AUDIO_INFO0 0x70fc
265# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
266# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
267# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
268# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
269# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
270#define AFMT_AUDIO_INFO1 0x7100
271# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
272# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
273# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
274# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
275# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
276#define AFMT_60958_0 0x7104
277# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
278# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
279# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
280# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
281# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
282# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
283# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
284# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
285# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
286# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
287#define AFMT_60958_1 0x7108
288# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
289# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
290# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
291# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
292# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
293#define AFMT_AUDIO_CRC_CONTROL 0x710c
294# define AFMT_AUDIO_CRC_EN (1 << 0)
295#define AFMT_RAMP_CONTROL0 0x7110
296# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
297# define AFMT_RAMP_DATA_SIGN (1 << 31)
298#define AFMT_RAMP_CONTROL1 0x7114
299# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
300# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
301#define AFMT_RAMP_CONTROL2 0x7118
302# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
303#define AFMT_RAMP_CONTROL3 0x711c
304# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
305#define AFMT_60958_2 0x7120
306# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
307# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
308# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
309# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
310# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
311# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
312#define AFMT_STATUS 0x7128
313# define AFMT_AUDIO_ENABLE (1 << 4)
314# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
315# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
316# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
317# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
318#define AFMT_AUDIO_PACKET_CONTROL 0x712c
319# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
320# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
321# define AFMT_AUDIO_TEST_EN (1 << 12)
322# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
323# define AFMT_60958_CS_UPDATE (1 << 26)
324# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
325# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
326# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
327# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
328#define AFMT_VBI_PACKET_CONTROL 0x7130
329# define AFMT_GENERIC0_UPDATE (1 << 2)
330#define AFMT_INFOFRAME_CONTROL0 0x7134
331# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
332# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
333# define AFMT_MPEG_INFO_UPDATE (1 << 10)
334#define AFMT_GENERIC0_7 0x7138
115 335
116#define GC_USER_SHADER_PIPE_CONFIG 0x8954 336#define GC_USER_SHADER_PIPE_CONFIG 0x8954
117#define INACTIVE_QD_PIPES(x) ((x) << 8) 337#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a48ca53fcd6a..b01c2dd627b0 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1392,35 +1392,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1392 return 0; 1392 return 0;
1393} 1393}
1394 1394
1395bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1396{
1397 u32 srbm_status;
1398 u32 grbm_status;
1399 u32 grbm_status_se0, grbm_status_se1;
1400 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
1401 int r;
1402
1403 srbm_status = RREG32(SRBM_STATUS);
1404 grbm_status = RREG32(GRBM_STATUS);
1405 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1406 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1407 if (!(grbm_status & GUI_ACTIVE)) {
1408 r100_gpu_lockup_update(lockup, ring);
1409 return false;
1410 }
1411 /* force CP activities */
1412 r = radeon_ring_lock(rdev, ring, 2);
1413 if (!r) {
1414 /* PACKET2 NOP */
1415 radeon_ring_write(ring, 0x80000000);
1416 radeon_ring_write(ring, 0x80000000);
1417 radeon_ring_unlock_commit(rdev, ring);
1418 }
1419 /* XXX deal with CP0,1,2 */
1420 ring->rptr = RREG32(ring->rptr_reg);
1421 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1422}
1423
1424static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1395static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1425{ 1396{
1426 struct evergreen_mc_save save; 1397 struct evergreen_mc_save save;
@@ -1601,12 +1572,9 @@ static int cayman_startup(struct radeon_device *rdev)
1601 if (r) 1572 if (r)
1602 return r; 1573 return r;
1603 1574
1604 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1575 r = radeon_ib_ring_tests(rdev);
1605 if (r) { 1576 if (r)
1606 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1607 rdev->accel_working = false;
1608 return r; 1577 return r;
1609 }
1610 1578
1611 r = radeon_vm_manager_start(rdev); 1579 r = radeon_vm_manager_start(rdev);
1612 if (r) 1580 if (r)
@@ -1661,10 +1629,6 @@ int cayman_init(struct radeon_device *rdev)
1661 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1629 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1662 int r; 1630 int r;
1663 1631
1664 /* This don't do much */
1665 r = radeon_gem_init(rdev);
1666 if (r)
1667 return r;
1668 /* Read BIOS */ 1632 /* Read BIOS */
1669 if (!radeon_get_bios(rdev)) { 1633 if (!radeon_get_bios(rdev)) {
1670 if (ASIC_IS_AVIVO(rdev)) 1634 if (ASIC_IS_AVIVO(rdev))
@@ -1776,7 +1740,6 @@ void cayman_fini(struct radeon_device *rdev)
1776 cayman_pcie_gart_fini(rdev); 1740 cayman_pcie_gart_fini(rdev);
1777 r600_vram_scratch_fini(rdev); 1741 r600_vram_scratch_fini(rdev);
1778 radeon_gem_fini(rdev); 1742 radeon_gem_fini(rdev);
1779 radeon_semaphore_driver_fini(rdev);
1780 radeon_fence_driver_fini(rdev); 1743 radeon_fence_driver_fini(rdev);
1781 radeon_bo_fini(rdev); 1744 radeon_bo_fini(rdev);
1782 radeon_atombios_fini(rdev); 1745 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fe33d35dae8c..fb44e7e49083 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
139 } 139 }
140 140
141 tmp |= tile_flags; 141 tmp |= tile_flags;
142 p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; 142 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
143 } else 143 } else
144 p->ib->ptr[idx] = (value & 0xffc00000) | tmp; 144 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
145 return 0; 145 return 0;
146} 146}
147 147
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
156 volatile uint32_t *ib; 156 volatile uint32_t *ib;
157 u32 idx_value; 157 u32 idx_value;
158 158
159 ib = p->ib->ptr; 159 ib = p->ib.ptr;
160 track = (struct r100_cs_track *)p->track; 160 track = (struct r100_cs_track *)p->track;
161 c = radeon_get_ib_value(p, idx++) & 0x1F; 161 c = radeon_get_ib_value(p, idx++) & 0x1F;
162 if (c > 16) { 162 if (c > 16) {
@@ -660,7 +660,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
660 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 660 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
661 WREG32(RADEON_AIC_CNTL, tmp); 661 WREG32(RADEON_AIC_CNTL, tmp);
662 r100_pci_gart_tlb_flush(rdev); 662 r100_pci_gart_tlb_flush(rdev);
663 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 663 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
664 (unsigned)(rdev->mc.gtt_size >> 20), 664 (unsigned)(rdev->mc.gtt_size >> 20),
665 (unsigned long long)rdev->gart.table_addr); 665 (unsigned long long)rdev->gart.table_addr);
666 rdev->gart.ready = true; 666 rdev->gart.ready = true;
@@ -1180,6 +1180,10 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1180 WREG32(RADEON_CP_RB_WPTR_DELAY, 0); 1180 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1181 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1181 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1182 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1182 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1183
1184 /* at this point everything should be setup correctly to enable master */
1185 pci_set_master(rdev->pdev);
1186
1183 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1187 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1184 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 1188 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1185 if (r) { 1189 if (r) {
@@ -1271,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
1271 unsigned i; 1275 unsigned i;
1272 unsigned idx; 1276 unsigned idx;
1273 1277
1274 ib = p->ib->ptr; 1278 ib = p->ib.ptr;
1275 idx = pkt->idx; 1279 idx = pkt->idx;
1276 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 1280 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1277 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 1281 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1350,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1350 uint32_t header, h_idx, reg; 1354 uint32_t header, h_idx, reg;
1351 volatile uint32_t *ib; 1355 volatile uint32_t *ib;
1352 1356
1353 ib = p->ib->ptr; 1357 ib = p->ib.ptr;
1354 1358
1355 /* parse the wait until */ 1359 /* parse the wait until */
1356 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 1360 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1529,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1529 u32 tile_flags = 0; 1533 u32 tile_flags = 0;
1530 u32 idx_value; 1534 u32 idx_value;
1531 1535
1532 ib = p->ib->ptr; 1536 ib = p->ib.ptr;
1533 track = (struct r100_cs_track *)p->track; 1537 track = (struct r100_cs_track *)p->track;
1534 1538
1535 idx_value = radeon_get_ib_value(p, idx); 1539 idx_value = radeon_get_ib_value(p, idx);
@@ -1885,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1885 volatile uint32_t *ib; 1889 volatile uint32_t *ib;
1886 int r; 1890 int r;
1887 1891
1888 ib = p->ib->ptr; 1892 ib = p->ib.ptr;
1889 idx = pkt->idx + 1; 1893 idx = pkt->idx + 1;
1890 track = (struct r100_cs_track *)p->track; 1894 track = (struct r100_cs_track *)p->track;
1891 switch (pkt->opcode) { 1895 switch (pkt->opcode) {
@@ -2004,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
2004 int r; 2008 int r;
2005 2009
2006 track = kzalloc(sizeof(*track), GFP_KERNEL); 2010 track = kzalloc(sizeof(*track), GFP_KERNEL);
2011 if (!track)
2012 return -ENOMEM;
2007 r100_cs_track_clear(p->rdev, track); 2013 r100_cs_track_clear(p->rdev, track);
2008 p->track = track; 2014 p->track = track;
2009 do { 2015 do {
@@ -2155,79 +2161,18 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
2155 return -1; 2161 return -1;
2156} 2162}
2157 2163
2158void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2159{
2160 lockup->last_cp_rptr = ring->rptr;
2161 lockup->last_jiffies = jiffies;
2162}
2163
2164/**
2165 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
2166 * @rdev: radeon device structure
2167 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
2168 * @cp: radeon_cp structure holding CP information
2169 *
2170 * We don't need to initialize the lockup tracking information as we will either
2171 * have CP rptr to a different value of jiffies wrap around which will force
2172 * initialization of the lockup tracking informations.
2173 *
2174 * A possible false positivie is if we get call after while and last_cp_rptr ==
2175 * the current CP rptr, even if it's unlikely it might happen. To avoid this
2176 * if the elapsed time since last call is bigger than 2 second than we return
2177 * false and update the tracking information. Due to this the caller must call
2178 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
2179 * the fencing code should be cautious about that.
2180 *
2181 * Caller should write to the ring to force CP to do something so we don't get
2182 * false positive when CP is just gived nothing to do.
2183 *
2184 **/
2185bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2186{
2187 unsigned long cjiffies, elapsed;
2188
2189 cjiffies = jiffies;
2190 if (!time_after(cjiffies, lockup->last_jiffies)) {
2191 /* likely a wrap around */
2192 lockup->last_cp_rptr = ring->rptr;
2193 lockup->last_jiffies = jiffies;
2194 return false;
2195 }
2196 if (ring->rptr != lockup->last_cp_rptr) {
2197 /* CP is still working no lockup */
2198 lockup->last_cp_rptr = ring->rptr;
2199 lockup->last_jiffies = jiffies;
2200 return false;
2201 }
2202 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2203 if (elapsed >= 10000) {
2204 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2205 return true;
2206 }
2207 /* give a chance to the GPU ... */
2208 return false;
2209}
2210
2211bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2164bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2212{ 2165{
2213 u32 rbbm_status; 2166 u32 rbbm_status;
2214 int r;
2215 2167
2216 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2168 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2217 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2169 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2218 r100_gpu_lockup_update(&rdev->config.r100.lockup, ring); 2170 radeon_ring_lockup_update(ring);
2219 return false; 2171 return false;
2220 } 2172 }
2221 /* force CP activities */ 2173 /* force CP activities */
2222 r = radeon_ring_lock(rdev, ring, 2); 2174 radeon_ring_force_activity(rdev, ring);
2223 if (!r) { 2175 return radeon_ring_test_lockup(rdev, ring);
2224 /* PACKET2 NOP */
2225 radeon_ring_write(ring, 0x80000000);
2226 radeon_ring_write(ring, 0x80000000);
2227 radeon_ring_unlock_commit(rdev, ring);
2228 }
2229 ring->rptr = RREG32(ring->rptr_reg);
2230 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
2231} 2176}
2232 2177
2233void r100_bm_disable(struct radeon_device *rdev) 2178void r100_bm_disable(struct radeon_device *rdev)
@@ -2296,7 +2241,6 @@ int r100_asic_reset(struct radeon_device *rdev)
2296 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || 2241 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2297 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2242 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2298 dev_err(rdev->dev, "failed to reset GPU\n"); 2243 dev_err(rdev->dev, "failed to reset GPU\n");
2299 rdev->gpu_lockup = true;
2300 ret = -1; 2244 ret = -1;
2301 } else 2245 } else
2302 dev_info(rdev->dev, "GPU reset succeed\n"); 2246 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -3742,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3742 3686
3743int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3687int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3744{ 3688{
3745 struct radeon_ib *ib; 3689 struct radeon_ib ib;
3746 uint32_t scratch; 3690 uint32_t scratch;
3747 uint32_t tmp = 0; 3691 uint32_t tmp = 0;
3748 unsigned i; 3692 unsigned i;
@@ -3758,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3758 if (r) { 3702 if (r) {
3759 return r; 3703 return r;
3760 } 3704 }
3761 ib->ptr[0] = PACKET0(scratch, 0); 3705 ib.ptr[0] = PACKET0(scratch, 0);
3762 ib->ptr[1] = 0xDEADBEEF; 3706 ib.ptr[1] = 0xDEADBEEF;
3763 ib->ptr[2] = PACKET2(0); 3707 ib.ptr[2] = PACKET2(0);
3764 ib->ptr[3] = PACKET2(0); 3708 ib.ptr[3] = PACKET2(0);
3765 ib->ptr[4] = PACKET2(0); 3709 ib.ptr[4] = PACKET2(0);
3766 ib->ptr[5] = PACKET2(0); 3710 ib.ptr[5] = PACKET2(0);
3767 ib->ptr[6] = PACKET2(0); 3711 ib.ptr[6] = PACKET2(0);
3768 ib->ptr[7] = PACKET2(0); 3712 ib.ptr[7] = PACKET2(0);
3769 ib->length_dw = 8; 3713 ib.length_dw = 8;
3770 r = radeon_ib_schedule(rdev, ib); 3714 r = radeon_ib_schedule(rdev, &ib);
3771 if (r) { 3715 if (r) {
3772 radeon_scratch_free(rdev, scratch); 3716 radeon_scratch_free(rdev, scratch);
3773 radeon_ib_free(rdev, &ib); 3717 radeon_ib_free(rdev, &ib);
3774 return r; 3718 return r;
3775 } 3719 }
3776 r = radeon_fence_wait(ib->fence, false); 3720 r = radeon_fence_wait(ib.fence, false);
3777 if (r) { 3721 if (r) {
3778 return r; 3722 return r;
3779 } 3723 }
@@ -3965,12 +3909,9 @@ static int r100_startup(struct radeon_device *rdev)
3965 if (r) 3909 if (r)
3966 return r; 3910 return r;
3967 3911
3968 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 3912 r = radeon_ib_ring_tests(rdev);
3969 if (r) { 3913 if (r)
3970 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
3971 rdev->accel_working = false;
3972 return r; 3914 return r;
3973 }
3974 3915
3975 return 0; 3916 return 0;
3976} 3917}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc474d537..a26144d01207 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
154 u32 tile_flags = 0; 154 u32 tile_flags = 0;
155 u32 idx_value; 155 u32 idx_value;
156 156
157 ib = p->ib->ptr; 157 ib = p->ib.ptr;
158 track = (struct r100_cs_track *)p->track; 158 track = (struct r100_cs_track *)p->track;
159 idx_value = radeon_get_ib_value(p, idx); 159 idx_value = radeon_get_ib_value(p, idx);
160 switch (reg) { 160 switch (reg) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383f9ca0..97722a33e513 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -377,28 +377,6 @@ void r300_gpu_init(struct radeon_device *rdev)
377 rdev->num_gb_pipes, rdev->num_z_pipes); 377 rdev->num_gb_pipes, rdev->num_z_pipes);
378} 378}
379 379
380bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
381{
382 u32 rbbm_status;
383 int r;
384
385 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
386 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
387 r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
388 return false;
389 }
390 /* force CP activities */
391 r = radeon_ring_lock(rdev, ring, 2);
392 if (!r) {
393 /* PACKET2 NOP */
394 radeon_ring_write(ring, 0x80000000);
395 radeon_ring_write(ring, 0x80000000);
396 radeon_ring_unlock_commit(rdev, ring);
397 }
398 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
399 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
400}
401
402int r300_asic_reset(struct radeon_device *rdev) 380int r300_asic_reset(struct radeon_device *rdev)
403{ 381{
404 struct r100_mc_save save; 382 struct r100_mc_save save;
@@ -449,7 +427,6 @@ int r300_asic_reset(struct radeon_device *rdev)
449 /* Check if GPU is idle */ 427 /* Check if GPU is idle */
450 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 428 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
451 dev_err(rdev->dev, "failed to reset GPU\n"); 429 dev_err(rdev->dev, "failed to reset GPU\n");
452 rdev->gpu_lockup = true;
453 ret = -1; 430 ret = -1;
454 } else 431 } else
455 dev_info(rdev->dev, "GPU reset succeed\n"); 432 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -627,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
627 int r; 604 int r;
628 u32 idx_value; 605 u32 idx_value;
629 606
630 ib = p->ib->ptr; 607 ib = p->ib.ptr;
631 track = (struct r100_cs_track *)p->track; 608 track = (struct r100_cs_track *)p->track;
632 idx_value = radeon_get_ib_value(p, idx); 609 idx_value = radeon_get_ib_value(p, idx);
633 610
@@ -1169,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1169 unsigned idx; 1146 unsigned idx;
1170 int r; 1147 int r;
1171 1148
1172 ib = p->ib->ptr; 1149 ib = p->ib.ptr;
1173 idx = pkt->idx + 1; 1150 idx = pkt->idx + 1;
1174 track = (struct r100_cs_track *)p->track; 1151 track = (struct r100_cs_track *)p->track;
1175 switch(pkt->opcode) { 1152 switch(pkt->opcode) {
@@ -1418,12 +1395,9 @@ static int r300_startup(struct radeon_device *rdev)
1418 if (r) 1395 if (r)
1419 return r; 1396 return r;
1420 1397
1421 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1398 r = radeon_ib_ring_tests(rdev);
1422 if (r) { 1399 if (r)
1423 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
1424 rdev->accel_working = false;
1425 return r; 1400 return r;
1426 }
1427 1401
1428 return 0; 1402 return 0;
1429} 1403}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaacfea01..99137be7a300 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,12 +279,9 @@ static int r420_startup(struct radeon_device *rdev)
279 if (r) 279 if (r)
280 return r; 280 return r;
281 281
282 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 282 r = radeon_ib_ring_tests(rdev);
283 if (r) { 283 if (r)
284 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
285 rdev->accel_working = false;
286 return r; 284 return r;
287 }
288 285
289 return 0; 286 return 0;
290} 287}
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b03c9f..b5cf8375cd25 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -207,12 +207,10 @@ static int r520_startup(struct radeon_device *rdev)
207 if (r) 207 if (r)
208 return r; 208 return r;
209 209
210 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 210 r = radeon_ib_ring_tests(rdev);
211 if (r) { 211 if (r)
212 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
213 rdev->accel_working = false;
214 return r; 212 return r;
215 } 213
216 return 0; 214 return 0;
217} 215}
218 216
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c8187c4b6ae8..f388a1d73b63 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
713 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 713 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
714 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 714 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
715 715
716 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
717 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
718 /* don't try to enable hpd on eDP or LVDS avoid breaking the
719 * aux dp channel on imac and help (but not completely fix)
720 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
721 */
722 continue;
723 }
716 if (ASIC_IS_DCE3(rdev)) { 724 if (ASIC_IS_DCE3(rdev)) {
717 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 725 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
718 if (ASIC_IS_DCE32(rdev)) 726 if (ASIC_IS_DCE32(rdev))
@@ -1223,7 +1231,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
1223 if (rdev->vram_scratch.robj == NULL) { 1231 if (rdev->vram_scratch.robj == NULL) {
1224 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1232 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1225 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1233 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1226 &rdev->vram_scratch.robj); 1234 NULL, &rdev->vram_scratch.robj);
1227 if (r) { 1235 if (r) {
1228 return r; 1236 return r;
1229 } 1237 }
@@ -1350,31 +1358,17 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1350 u32 srbm_status; 1358 u32 srbm_status;
1351 u32 grbm_status; 1359 u32 grbm_status;
1352 u32 grbm_status2; 1360 u32 grbm_status2;
1353 struct r100_gpu_lockup *lockup;
1354 int r;
1355
1356 if (rdev->family >= CHIP_RV770)
1357 lockup = &rdev->config.rv770.lockup;
1358 else
1359 lockup = &rdev->config.r600.lockup;
1360 1361
1361 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1362 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1362 grbm_status = RREG32(R_008010_GRBM_STATUS); 1363 grbm_status = RREG32(R_008010_GRBM_STATUS);
1363 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1364 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1364 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1365 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1365 r100_gpu_lockup_update(lockup, ring); 1366 radeon_ring_lockup_update(ring);
1366 return false; 1367 return false;
1367 } 1368 }
1368 /* force CP activities */ 1369 /* force CP activities */
1369 r = radeon_ring_lock(rdev, ring, 2); 1370 radeon_ring_force_activity(rdev, ring);
1370 if (!r) { 1371 return radeon_ring_test_lockup(rdev, ring);
1371 /* PACKET2 NOP */
1372 radeon_ring_write(ring, 0x80000000);
1373 radeon_ring_write(ring, 0x80000000);
1374 radeon_ring_unlock_commit(rdev, ring);
1375 }
1376 ring->rptr = RREG32(ring->rptr_reg);
1377 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1378} 1372}
1379 1373
1380int r600_asic_reset(struct radeon_device *rdev) 1374int r600_asic_reset(struct radeon_device *rdev)
@@ -2377,20 +2371,15 @@ int r600_copy_blit(struct radeon_device *rdev,
2377 unsigned num_gpu_pages, 2371 unsigned num_gpu_pages,
2378 struct radeon_fence *fence) 2372 struct radeon_fence *fence)
2379{ 2373{
2374 struct radeon_sa_bo *vb = NULL;
2380 int r; 2375 int r;
2381 2376
2382 mutex_lock(&rdev->r600_blit.mutex); 2377 r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
2383 rdev->r600_blit.vb_ib = NULL;
2384 r = r600_blit_prepare_copy(rdev, num_gpu_pages);
2385 if (r) { 2378 if (r) {
2386 if (rdev->r600_blit.vb_ib)
2387 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2388 mutex_unlock(&rdev->r600_blit.mutex);
2389 return r; 2379 return r;
2390 } 2380 }
2391 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages); 2381 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2392 r600_blit_done_copy(rdev, fence); 2382 r600_blit_done_copy(rdev, fence, vb);
2393 mutex_unlock(&rdev->r600_blit.mutex);
2394 return 0; 2383 return 0;
2395} 2384}
2396 2385
@@ -2494,12 +2483,9 @@ int r600_startup(struct radeon_device *rdev)
2494 if (r) 2483 if (r)
2495 return r; 2484 return r;
2496 2485
2497 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 2486 r = radeon_ib_ring_tests(rdev);
2498 if (r) { 2487 if (r)
2499 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2500 rdev->accel_working = false;
2501 return r; 2488 return r;
2502 }
2503 2489
2504 return 0; 2490 return 0;
2505} 2491}
@@ -2574,10 +2560,6 @@ int r600_init(struct radeon_device *rdev)
2574 if (r600_debugfs_mc_info_init(rdev)) { 2560 if (r600_debugfs_mc_info_init(rdev)) {
2575 DRM_ERROR("Failed to register debugfs file for mc !\n"); 2561 DRM_ERROR("Failed to register debugfs file for mc !\n");
2576 } 2562 }
2577 /* This don't do much */
2578 r = radeon_gem_init(rdev);
2579 if (r)
2580 return r;
2581 /* Read BIOS */ 2563 /* Read BIOS */
2582 if (!radeon_get_bios(rdev)) { 2564 if (!radeon_get_bios(rdev)) {
2583 if (ASIC_IS_AVIVO(rdev)) 2565 if (ASIC_IS_AVIVO(rdev))
@@ -2675,7 +2657,6 @@ void r600_fini(struct radeon_device *rdev)
2675 r600_vram_scratch_fini(rdev); 2657 r600_vram_scratch_fini(rdev);
2676 radeon_agp_fini(rdev); 2658 radeon_agp_fini(rdev);
2677 radeon_gem_fini(rdev); 2659 radeon_gem_fini(rdev);
2678 radeon_semaphore_driver_fini(rdev);
2679 radeon_fence_driver_fini(rdev); 2660 radeon_fence_driver_fini(rdev);
2680 radeon_bo_fini(rdev); 2661 radeon_bo_fini(rdev);
2681 radeon_atombios_fini(rdev); 2662 radeon_atombios_fini(rdev);
@@ -2704,7 +2685,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2704 2685
2705int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 2686int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2706{ 2687{
2707 struct radeon_ib *ib; 2688 struct radeon_ib ib;
2708 uint32_t scratch; 2689 uint32_t scratch;
2709 uint32_t tmp = 0; 2690 uint32_t tmp = 0;
2710 unsigned i; 2691 unsigned i;
@@ -2722,18 +2703,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2722 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 2703 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2723 return r; 2704 return r;
2724 } 2705 }
2725 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 2706 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2726 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2707 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2727 ib->ptr[2] = 0xDEADBEEF; 2708 ib.ptr[2] = 0xDEADBEEF;
2728 ib->length_dw = 3; 2709 ib.length_dw = 3;
2729 r = radeon_ib_schedule(rdev, ib); 2710 r = radeon_ib_schedule(rdev, &ib);
2730 if (r) { 2711 if (r) {
2731 radeon_scratch_free(rdev, scratch); 2712 radeon_scratch_free(rdev, scratch);
2732 radeon_ib_free(rdev, &ib); 2713 radeon_ib_free(rdev, &ib);
2733 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 2714 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2734 return r; 2715 return r;
2735 } 2716 }
2736 r = radeon_fence_wait(ib->fence, false); 2717 r = radeon_fence_wait(ib.fence, false);
2737 if (r) { 2718 if (r) {
2738 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 2719 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2739 return r; 2720 return r;
@@ -2745,7 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2745 DRM_UDELAY(1); 2726 DRM_UDELAY(1);
2746 } 2727 }
2747 if (i < rdev->usec_timeout) { 2728 if (i < rdev->usec_timeout) {
2748 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i); 2729 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2749 } else { 2730 } else {
2750 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 2731 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2751 scratch, tmp); 2732 scratch, tmp);
@@ -2788,7 +2769,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
2788 r = radeon_bo_create(rdev, rdev->ih.ring_size, 2769 r = radeon_bo_create(rdev, rdev->ih.ring_size,
2789 PAGE_SIZE, true, 2770 PAGE_SIZE, true,
2790 RADEON_GEM_DOMAIN_GTT, 2771 RADEON_GEM_DOMAIN_GTT,
2791 &rdev->ih.ring_obj); 2772 NULL, &rdev->ih.ring_obj);
2792 if (r) { 2773 if (r) {
2793 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 2774 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2794 return r; 2775 return r;
@@ -2968,6 +2949,15 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2968 WREG32(DC_HPD5_INT_CONTROL, tmp); 2949 WREG32(DC_HPD5_INT_CONTROL, tmp);
2969 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2950 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2970 WREG32(DC_HPD6_INT_CONTROL, tmp); 2951 WREG32(DC_HPD6_INT_CONTROL, tmp);
2952 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2953 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
2954 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2955 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
2956 } else {
2957 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2958 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2959 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2960 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
2971 } 2961 }
2972 } else { 2962 } else {
2973 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2963 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
@@ -2978,6 +2968,10 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2978 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 2968 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2979 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2969 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2980 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 2970 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2971 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2972 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2973 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2974 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
2981 } 2975 }
2982} 2976}
2983 2977
@@ -3047,6 +3041,9 @@ int r600_irq_init(struct radeon_device *rdev)
3047 else 3041 else
3048 r600_disable_interrupt_state(rdev); 3042 r600_disable_interrupt_state(rdev);
3049 3043
3044 /* at this point everything should be setup correctly to enable master */
3045 pci_set_master(rdev->pdev);
3046
3050 /* enable irqs */ 3047 /* enable irqs */
3051 r600_enable_interrupts(rdev); 3048 r600_enable_interrupts(rdev);
3052 3049
@@ -3071,7 +3068,7 @@ int r600_irq_set(struct radeon_device *rdev)
3071 u32 mode_int = 0; 3068 u32 mode_int = 0;
3072 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3069 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3073 u32 grbm_int_cntl = 0; 3070 u32 grbm_int_cntl = 0;
3074 u32 hdmi1, hdmi2; 3071 u32 hdmi0, hdmi1;
3075 u32 d1grph = 0, d2grph = 0; 3072 u32 d1grph = 0, d2grph = 0;
3076 3073
3077 if (!rdev->irq.installed) { 3074 if (!rdev->irq.installed) {
@@ -3086,9 +3083,7 @@ int r600_irq_set(struct radeon_device *rdev)
3086 return 0; 3083 return 0;
3087 } 3084 }
3088 3085
3089 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3090 if (ASIC_IS_DCE3(rdev)) { 3086 if (ASIC_IS_DCE3(rdev)) {
3091 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3092 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3087 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3093 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3088 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3094 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3089 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -3096,12 +3091,18 @@ int r600_irq_set(struct radeon_device *rdev)
3096 if (ASIC_IS_DCE32(rdev)) { 3091 if (ASIC_IS_DCE32(rdev)) {
3097 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3092 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3098 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3093 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3094 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3095 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3096 } else {
3097 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3098 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3099 } 3099 }
3100 } else { 3100 } else {
3101 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3102 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3101 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3103 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3102 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3104 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3103 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3104 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3105 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3105 } 3106 }
3106 3107
3107 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 3108 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -3143,13 +3144,13 @@ int r600_irq_set(struct radeon_device *rdev)
3143 DRM_DEBUG("r600_irq_set: hpd 6\n"); 3144 DRM_DEBUG("r600_irq_set: hpd 6\n");
3144 hpd6 |= DC_HPDx_INT_EN; 3145 hpd6 |= DC_HPDx_INT_EN;
3145 } 3146 }
3146 if (rdev->irq.hdmi[0]) { 3147 if (rdev->irq.afmt[0]) {
3147 DRM_DEBUG("r600_irq_set: hdmi 1\n"); 3148 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3148 hdmi1 |= R600_HDMI_INT_EN; 3149 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3149 } 3150 }
3150 if (rdev->irq.hdmi[1]) { 3151 if (rdev->irq.afmt[1]) {
3151 DRM_DEBUG("r600_irq_set: hdmi 2\n"); 3152 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3152 hdmi2 |= R600_HDMI_INT_EN; 3153 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3153 } 3154 }
3154 if (rdev->irq.gui_idle) { 3155 if (rdev->irq.gui_idle) {
3155 DRM_DEBUG("gui idle\n"); 3156 DRM_DEBUG("gui idle\n");
@@ -3161,9 +3162,7 @@ int r600_irq_set(struct radeon_device *rdev)
3161 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3162 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3162 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3163 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3163 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3164 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3164 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3165 if (ASIC_IS_DCE3(rdev)) { 3165 if (ASIC_IS_DCE3(rdev)) {
3166 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3167 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3166 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3168 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3167 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3169 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3168 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -3171,12 +3170,18 @@ int r600_irq_set(struct radeon_device *rdev)
3171 if (ASIC_IS_DCE32(rdev)) { 3170 if (ASIC_IS_DCE32(rdev)) {
3172 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3171 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3173 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3172 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3173 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3174 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3175 } else {
3176 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3177 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3174 } 3178 }
3175 } else { 3179 } else {
3176 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3177 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3180 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3178 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3181 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3179 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3182 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3183 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3184 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3180 } 3185 }
3181 3186
3182 return 0; 3187 return 0;
@@ -3190,10 +3195,19 @@ static void r600_irq_ack(struct radeon_device *rdev)
3190 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3195 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3191 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3196 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3192 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3197 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3198 if (ASIC_IS_DCE32(rdev)) {
3199 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3200 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3201 } else {
3202 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3203 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3204 }
3193 } else { 3205 } else {
3194 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3206 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3195 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3207 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3196 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 3208 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3209 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3210 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3197 } 3211 }
3198 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 3212 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3199 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 3213 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
@@ -3259,17 +3273,32 @@ static void r600_irq_ack(struct radeon_device *rdev)
3259 tmp |= DC_HPDx_INT_ACK; 3273 tmp |= DC_HPDx_INT_ACK;
3260 WREG32(DC_HPD6_INT_CONTROL, tmp); 3274 WREG32(DC_HPD6_INT_CONTROL, tmp);
3261 } 3275 }
3262 } 3276 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3263 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { 3277 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3264 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); 3278 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3265 } 3279 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3266 if (ASIC_IS_DCE3(rdev)) { 3280 }
3267 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { 3281 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3268 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); 3282 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3283 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3284 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3269 } 3285 }
3270 } else { 3286 } else {
3271 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { 3287 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3272 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); 3288 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3289 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3290 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3291 }
3292 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3293 if (ASIC_IS_DCE3(rdev)) {
3294 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3295 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3296 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3297 } else {
3298 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3299 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3300 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3301 }
3273 } 3302 }
3274 } 3303 }
3275} 3304}
@@ -3345,6 +3374,7 @@ int r600_irq_process(struct radeon_device *rdev)
3345 u32 ring_index; 3374 u32 ring_index;
3346 unsigned long flags; 3375 unsigned long flags;
3347 bool queue_hotplug = false; 3376 bool queue_hotplug = false;
3377 bool queue_hdmi = false;
3348 3378
3349 if (!rdev->ih.enabled || rdev->shutdown) 3379 if (!rdev->ih.enabled || rdev->shutdown)
3350 return IRQ_NONE; 3380 return IRQ_NONE;
@@ -3480,9 +3510,26 @@ restart_ih:
3480 break; 3510 break;
3481 } 3511 }
3482 break; 3512 break;
3483 case 21: /* HDMI */ 3513 case 21: /* hdmi */
3484 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); 3514 switch (src_data) {
3485 r600_audio_schedule_polling(rdev); 3515 case 4:
3516 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3517 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3518 queue_hdmi = true;
3519 DRM_DEBUG("IH: HDMI0\n");
3520 }
3521 break;
3522 case 5:
3523 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3524 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3525 queue_hdmi = true;
3526 DRM_DEBUG("IH: HDMI1\n");
3527 }
3528 break;
3529 default:
3530 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3531 break;
3532 }
3486 break; 3533 break;
3487 case 176: /* CP_INT in ring buffer */ 3534 case 176: /* CP_INT in ring buffer */
3488 case 177: /* CP_INT in IB1 */ 3535 case 177: /* CP_INT in IB1 */
@@ -3514,6 +3561,8 @@ restart_ih:
3514 goto restart_ih; 3561 goto restart_ih;
3515 if (queue_hotplug) 3562 if (queue_hotplug)
3516 schedule_work(&rdev->hotplug_work); 3563 schedule_work(&rdev->hotplug_work);
3564 if (queue_hdmi)
3565 schedule_work(&rdev->audio_work);
3517 rdev->ih.rptr = rptr; 3566 rdev->ih.rptr = rptr;
3518 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3567 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3519 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3568 spin_unlock_irqrestore(&rdev->ih.lock, flags);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index ba66f3093d46..7c4fa77f018f 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -29,7 +29,28 @@
29#include "radeon_asic.h" 29#include "radeon_asic.h"
30#include "atom.h" 30#include "atom.h"
31 31
32#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ 32/*
33 * check if enc_priv stores radeon_encoder_atom_dig
34 */
35static bool radeon_dig_encoder(struct drm_encoder *encoder)
36{
37 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
38 switch (radeon_encoder->encoder_id) {
39 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
40 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
41 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
42 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
43 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
44 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
45 case ENCODER_OBJECT_ID_INTERNAL_DDI:
46 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
47 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
48 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
49 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
50 return true;
51 }
52 return false;
53}
33 54
34/* 55/*
35 * check if the chipset is supported 56 * check if the chipset is supported
@@ -42,118 +63,85 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
42 || rdev->family == CHIP_RS740; 63 || rdev->family == CHIP_RS740;
43} 64}
44 65
45/* 66struct r600_audio r600_audio_status(struct radeon_device *rdev)
46 * current number of channels
47 */
48int r600_audio_channels(struct radeon_device *rdev)
49{ 67{
50 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; 68 struct r600_audio status;
51} 69 uint32_t value;
52 70
53/* 71 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
54 * current bits per sample
55 */
56int r600_audio_bits_per_sample(struct radeon_device *rdev)
57{
58 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
59 switch (value) {
60 case 0x0: return 8;
61 case 0x1: return 16;
62 case 0x2: return 20;
63 case 0x3: return 24;
64 case 0x4: return 32;
65 }
66 72
67 dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n", 73 /* number of channels */
68 (int)value); 74 status.channels = (value & 0x7) + 1;
69 75
70 return 16; 76 /* bits per sample */
71} 77 switch ((value & 0xF0) >> 4) {
72 78 case 0x0:
73/* 79 status.bits_per_sample = 8;
74 * current sampling rate in HZ 80 break;
75 */ 81 case 0x1:
76int r600_audio_rate(struct radeon_device *rdev) 82 status.bits_per_sample = 16;
77{ 83 break;
78 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); 84 case 0x2:
79 uint32_t result; 85 status.bits_per_sample = 20;
86 break;
87 case 0x3:
88 status.bits_per_sample = 24;
89 break;
90 case 0x4:
91 status.bits_per_sample = 32;
92 break;
93 default:
94 dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
95 (int)value);
96 status.bits_per_sample = 16;
97 }
80 98
99 /* current sampling rate in HZ */
81 if (value & 0x4000) 100 if (value & 0x4000)
82 result = 44100; 101 status.rate = 44100;
83 else 102 else
84 result = 48000; 103 status.rate = 48000;
104 status.rate *= ((value >> 11) & 0x7) + 1;
105 status.rate /= ((value >> 8) & 0x7) + 1;
85 106
86 result *= ((value >> 11) & 0x7) + 1; 107 value = RREG32(R600_AUDIO_STATUS_BITS);
87 result /= ((value >> 8) & 0x7) + 1;
88 108
89 return result; 109 /* iec 60958 status bits */
90} 110 status.status_bits = value & 0xff;
91 111
92/* 112 /* iec 60958 category code */
93 * iec 60958 status bits 113 status.category_code = (value >> 8) & 0xff;
94 */
95uint8_t r600_audio_status_bits(struct radeon_device *rdev)
96{
97 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
98}
99 114
100/* 115 return status;
101 * iec 60958 category code
102 */
103uint8_t r600_audio_category_code(struct radeon_device *rdev)
104{
105 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
106}
107
108/*
109 * schedule next audio update event
110 */
111void r600_audio_schedule_polling(struct radeon_device *rdev)
112{
113 mod_timer(&rdev->audio_timer,
114 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
115} 116}
116 117
117/* 118/*
118 * update all hdmi interfaces with current audio parameters 119 * update all hdmi interfaces with current audio parameters
119 */ 120 */
120static void r600_audio_update_hdmi(unsigned long param) 121void r600_audio_update_hdmi(struct work_struct *work)
121{ 122{
122 struct radeon_device *rdev = (struct radeon_device *)param; 123 struct radeon_device *rdev = container_of(work, struct radeon_device,
124 audio_work);
123 struct drm_device *dev = rdev->ddev; 125 struct drm_device *dev = rdev->ddev;
124 126 struct r600_audio audio_status = r600_audio_status(rdev);
125 int channels = r600_audio_channels(rdev);
126 int rate = r600_audio_rate(rdev);
127 int bps = r600_audio_bits_per_sample(rdev);
128 uint8_t status_bits = r600_audio_status_bits(rdev);
129 uint8_t category_code = r600_audio_category_code(rdev);
130
131 struct drm_encoder *encoder; 127 struct drm_encoder *encoder;
132 int changes = 0, still_going = 0; 128 bool changed = false;
133 129
134 changes |= channels != rdev->audio_channels; 130 if (rdev->audio_status.channels != audio_status.channels ||
135 changes |= rate != rdev->audio_rate; 131 rdev->audio_status.rate != audio_status.rate ||
136 changes |= bps != rdev->audio_bits_per_sample; 132 rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
137 changes |= status_bits != rdev->audio_status_bits; 133 rdev->audio_status.status_bits != audio_status.status_bits ||
138 changes |= category_code != rdev->audio_category_code; 134 rdev->audio_status.category_code != audio_status.category_code) {
139 135 rdev->audio_status = audio_status;
140 if (changes) { 136 changed = true;
141 rdev->audio_channels = channels;
142 rdev->audio_rate = rate;
143 rdev->audio_bits_per_sample = bps;
144 rdev->audio_status_bits = status_bits;
145 rdev->audio_category_code = category_code;
146 } 137 }
147 138
148 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 139 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
149 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 140 if (!radeon_dig_encoder(encoder))
150 still_going |= radeon_encoder->audio_polling_active; 141 continue;
151 if (changes || r600_hdmi_buffer_status_changed(encoder)) 142 if (changed || r600_hdmi_buffer_status_changed(encoder))
152 r600_hdmi_update_audio_settings(encoder); 143 r600_hdmi_update_audio_settings(encoder);
153 } 144 }
154
155 if (still_going)
156 r600_audio_schedule_polling(rdev);
157} 145}
158 146
159/* 147/*
@@ -177,7 +165,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
177} 165}
178 166
179/* 167/*
180 * initialize the audio vars and register the update timer 168 * initialize the audio vars
181 */ 169 */
182int r600_audio_init(struct radeon_device *rdev) 170int r600_audio_init(struct radeon_device *rdev)
183{ 171{
@@ -186,51 +174,16 @@ int r600_audio_init(struct radeon_device *rdev)
186 174
187 r600_audio_engine_enable(rdev, true); 175 r600_audio_engine_enable(rdev, true);
188 176
189 rdev->audio_channels = -1; 177 rdev->audio_status.channels = -1;
190 rdev->audio_rate = -1; 178 rdev->audio_status.rate = -1;
191 rdev->audio_bits_per_sample = -1; 179 rdev->audio_status.bits_per_sample = -1;
192 rdev->audio_status_bits = 0; 180 rdev->audio_status.status_bits = 0;
193 rdev->audio_category_code = 0; 181 rdev->audio_status.category_code = 0;
194
195 setup_timer(
196 &rdev->audio_timer,
197 r600_audio_update_hdmi,
198 (unsigned long)rdev);
199 182
200 return 0; 183 return 0;
201} 184}
202 185
203/* 186/*
204 * enable the polling timer, to check for status changes
205 */
206void r600_audio_enable_polling(struct drm_encoder *encoder)
207{
208 struct drm_device *dev = encoder->dev;
209 struct radeon_device *rdev = dev->dev_private;
210 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
211
212 DRM_DEBUG("r600_audio_enable_polling: %d\n",
213 radeon_encoder->audio_polling_active);
214 if (radeon_encoder->audio_polling_active)
215 return;
216
217 radeon_encoder->audio_polling_active = 1;
218 if (rdev->audio_enabled)
219 mod_timer(&rdev->audio_timer, jiffies + 1);
220}
221
222/*
223 * disable the polling timer, so we get no more status updates
224 */
225void r600_audio_disable_polling(struct drm_encoder *encoder)
226{
227 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
228 DRM_DEBUG("r600_audio_disable_polling: %d\n",
229 radeon_encoder->audio_polling_active);
230 radeon_encoder->audio_polling_active = 0;
231}
232
233/*
234 * atach the audio codec to the clock source of the encoder 187 * atach the audio codec to the clock source of the encoder
235 */ 188 */
236void r600_audio_set_clock(struct drm_encoder *encoder, int clock) 189void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -297,7 +250,5 @@ void r600_audio_fini(struct radeon_device *rdev)
297 if (!rdev->audio_enabled) 250 if (!rdev->audio_enabled)
298 return; 251 return;
299 252
300 del_timer(&rdev->audio_timer);
301
302 r600_audio_engine_enable(rdev, false); 253 r600_audio_engine_enable(rdev, false);
303} 254}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f587f27a..03b6e0d3d503 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
513 rdev->r600_blit.primitives.set_default_state = set_default_state; 513 rdev->r600_blit.primitives.set_default_state = set_default_state;
514 514
515 rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ 515 rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
516 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
517 rdev->r600_blit.ring_size_common += 5; /* done copy */ 516 rdev->r600_blit.ring_size_common += 5; /* done copy */
518 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ 517 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
519 518
@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
528 if (rdev->r600_blit.shader_obj) 527 if (rdev->r600_blit.shader_obj)
529 goto done; 528 goto done;
530 529
531 mutex_init(&rdev->r600_blit.mutex);
532 rdev->r600_blit.state_offset = 0; 530 rdev->r600_blit.state_offset = 0;
533 531
534 if (rdev->family >= CHIP_RV770) 532 if (rdev->family >= CHIP_RV770)
@@ -554,7 +552,7 @@ int r600_blit_init(struct radeon_device *rdev)
554 obj_size = ALIGN(obj_size, 256); 552 obj_size = ALIGN(obj_size, 256);
555 553
556 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 554 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
557 &rdev->r600_blit.shader_obj); 555 NULL, &rdev->r600_blit.shader_obj);
558 if (r) { 556 if (r) {
559 DRM_ERROR("r600 failed to allocate shader\n"); 557 DRM_ERROR("r600 failed to allocate shader\n");
560 return r; 558 return r;
@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
621 radeon_bo_unref(&rdev->r600_blit.shader_obj); 619 radeon_bo_unref(&rdev->r600_blit.shader_obj);
622} 620}
623 621
624static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
625{
626 int r;
627 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
628 &rdev->r600_blit.vb_ib, size);
629 if (r) {
630 DRM_ERROR("failed to get IB for vertex buffer\n");
631 return r;
632 }
633
634 rdev->r600_blit.vb_total = size;
635 rdev->r600_blit.vb_used = 0;
636 return 0;
637}
638
639static void r600_vb_ib_put(struct radeon_device *rdev)
640{
641 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
642 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
643}
644
645static unsigned r600_blit_create_rect(unsigned num_gpu_pages, 622static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
646 int *width, int *height, int max_dim) 623 int *width, int *height, int max_dim)
647{ 624{
@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
688} 665}
689 666
690 667
691int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) 668int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
669 struct radeon_sa_bo **vb)
692{ 670{
693 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 671 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
694 int r; 672 int r;
@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
705 } 683 }
706 684
707 /* 48 bytes for vertex per loop */ 685 /* 48 bytes for vertex per loop */
708 r = r600_vb_ib_get(rdev, (num_loops*48)+256); 686 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
709 if (r) 687 (num_loops*48)+256, 256, true);
688 if (r) {
710 return r; 689 return r;
690 }
711 691
712 /* calculate number of loops correctly */ 692 /* calculate number of loops correctly */
713 ring_size = num_loops * dwords_per_loop; 693 ring_size = num_loops * dwords_per_loop;
714 ring_size += rdev->r600_blit.ring_size_common; 694 ring_size += rdev->r600_blit.ring_size_common;
715 r = radeon_ring_lock(rdev, ring, ring_size); 695 r = radeon_ring_lock(rdev, ring, ring_size);
716 if (r) 696 if (r) {
697 radeon_sa_bo_free(rdev, vb, NULL);
717 return r; 698 return r;
699 }
718 700
719 rdev->r600_blit.primitives.set_default_state(rdev); 701 rdev->r600_blit.primitives.set_default_state(rdev);
720 rdev->r600_blit.primitives.set_shaders(rdev); 702 rdev->r600_blit.primitives.set_shaders(rdev);
721 return 0; 703 return 0;
722} 704}
723 705
724void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) 706void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
707 struct radeon_sa_bo *vb)
725{ 708{
709 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
726 int r; 710 int r;
727 711
728 if (rdev->r600_blit.vb_ib) 712 r = radeon_fence_emit(rdev, fence);
729 r600_vb_ib_put(rdev); 713 if (r) {
730 714 radeon_ring_unlock_undo(rdev, ring);
731 if (fence) 715 return;
732 r = radeon_fence_emit(rdev, fence); 716 }
733 717
734 radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 718 radeon_ring_unlock_commit(rdev, ring);
719 radeon_sa_bo_free(rdev, &vb, fence);
735} 720}
736 721
737void r600_kms_blit_copy(struct radeon_device *rdev, 722void r600_kms_blit_copy(struct radeon_device *rdev,
738 u64 src_gpu_addr, u64 dst_gpu_addr, 723 u64 src_gpu_addr, u64 dst_gpu_addr,
739 unsigned num_gpu_pages) 724 unsigned num_gpu_pages,
725 struct radeon_sa_bo *vb)
740{ 726{
741 u64 vb_gpu_addr; 727 u64 vb_gpu_addr;
742 u32 *vb; 728 u32 *vb_cpu_addr;
743 729
744 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", 730 DRM_DEBUG("emitting copy %16llx %16llx %d\n",
745 src_gpu_addr, dst_gpu_addr, 731 src_gpu_addr, dst_gpu_addr, num_gpu_pages);
746 num_gpu_pages, rdev->r600_blit.vb_used); 732 vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
747 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); 733 vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
748 734
749 while (num_gpu_pages) { 735 while (num_gpu_pages) {
750 int w, h; 736 int w, h;
@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
756 size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE; 742 size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
757 DRM_DEBUG("rectangle w=%d h=%d\n", w, h); 743 DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
758 744
759 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { 745 vb_cpu_addr[0] = 0;
760 WARN_ON(1); 746 vb_cpu_addr[1] = 0;
761 } 747 vb_cpu_addr[2] = 0;
762 748 vb_cpu_addr[3] = 0;
763 vb[0] = 0;
764 vb[1] = 0;
765 vb[2] = 0;
766 vb[3] = 0;
767 749
768 vb[4] = 0; 750 vb_cpu_addr[4] = 0;
769 vb[5] = i2f(h); 751 vb_cpu_addr[5] = i2f(h);
770 vb[6] = 0; 752 vb_cpu_addr[6] = 0;
771 vb[7] = i2f(h); 753 vb_cpu_addr[7] = i2f(h);
772 754
773 vb[8] = i2f(w); 755 vb_cpu_addr[8] = i2f(w);
774 vb[9] = i2f(h); 756 vb_cpu_addr[9] = i2f(h);
775 vb[10] = i2f(w); 757 vb_cpu_addr[10] = i2f(w);
776 vb[11] = i2f(h); 758 vb_cpu_addr[11] = i2f(h);
777 759
778 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, 760 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
779 w, h, w, src_gpu_addr, size_in_bytes); 761 w, h, w, src_gpu_addr, size_in_bytes);
780 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, 762 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
781 w, h, dst_gpu_addr); 763 w, h, dst_gpu_addr);
782 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); 764 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
783 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
784 rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr); 765 rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
785 rdev->r600_blit.primitives.draw_auto(rdev); 766 rdev->r600_blit.primitives.draw_auto(rdev);
786 rdev->r600_blit.primitives.cp_set_surface_sync(rdev, 767 rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
787 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, 768 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
788 size_in_bytes, dst_gpu_addr); 769 size_in_bytes, dst_gpu_addr);
789 770
790 vb += 12; 771 vb_cpu_addr += 12;
791 rdev->r600_blit.vb_used += 4*12; 772 vb_gpu_addr += 4*12;
792 src_gpu_addr += size_in_bytes; 773 src_gpu_addr += size_in_bytes;
793 dst_gpu_addr += size_in_bytes; 774 dst_gpu_addr += size_in_bytes;
794 num_gpu_pages -= pages_per_loop; 775 num_gpu_pages -= pages_per_loop;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..0133f5f09bd6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
345 u32 height, height_align, pitch, pitch_align, depth_align; 345 u32 height, height_align, pitch, pitch_align, depth_align;
346 u64 base_offset, base_align; 346 u64 base_offset, base_align;
347 struct array_mode_checker array_check; 347 struct array_mode_checker array_check;
348 volatile u32 *ib = p->ib->ptr; 348 volatile u32 *ib = p->ib.ptr;
349 unsigned array_mode; 349 unsigned array_mode;
350 u32 format; 350 u32 format;
351 351
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
471 u64 base_offset, base_align; 471 u64 base_offset, base_align;
472 struct array_mode_checker array_check; 472 struct array_mode_checker array_check;
473 int array_mode; 473 int array_mode;
474 volatile u32 *ib = p->ib->ptr; 474 volatile u32 *ib = p->ib.ptr;
475 475
476 476
477 if (track->db_bo == NULL) { 477 if (track->db_bo == NULL) {
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
961 uint32_t header, h_idx, reg, wait_reg_mem_info; 961 uint32_t header, h_idx, reg, wait_reg_mem_info;
962 volatile uint32_t *ib; 962 volatile uint32_t *ib;
963 963
964 ib = p->ib->ptr; 964 ib = p->ib.ptr;
965 965
966 /* parse the WAIT_REG_MEM */ 966 /* parse the WAIT_REG_MEM */
967 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 967 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1110 m = 1 << ((reg >> 2) & 31); 1110 m = 1 << ((reg >> 2) & 31);
1111 if (!(r600_reg_safe_bm[i] & m)) 1111 if (!(r600_reg_safe_bm[i] & m))
1112 return 0; 1112 return 0;
1113 ib = p->ib->ptr; 1113 ib = p->ib.ptr;
1114 switch (reg) { 1114 switch (reg) {
1115 /* force following reg to 0 in an attempt to disable out buffer 1115 /* force following reg to 0 in an attempt to disable out buffer
1116 * which will need us to better understand how it works to perform 1116 * which will need us to better understand how it works to perform
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1714 u32 idx_value; 1714 u32 idx_value;
1715 1715
1716 track = (struct r600_cs_track *)p->track; 1716 track = (struct r600_cs_track *)p->track;
1717 ib = p->ib->ptr; 1717 ib = p->ib.ptr;
1718 idx = pkt->idx + 1; 1718 idx = pkt->idx + 1;
1719 idx_value = radeon_get_ib_value(p, idx); 1719 idx_value = radeon_get_ib_value(p, idx);
1720 1720
@@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2249 } 2249 }
2250 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2250 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2251#if 0 2251#if 0
2252 for (r = 0; r < p->ib->length_dw; r++) { 2252 for (r = 0; r < p->ib.length_dw; r++) {
2253 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); 2253 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2254 mdelay(1); 2254 mdelay(1);
2255 } 2255 }
2256#endif 2256#endif
@@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2298{ 2298{
2299 struct radeon_cs_parser parser; 2299 struct radeon_cs_parser parser;
2300 struct radeon_cs_chunk *ib_chunk; 2300 struct radeon_cs_chunk *ib_chunk;
2301 struct radeon_ib fake_ib;
2302 struct r600_cs_track *track; 2301 struct r600_cs_track *track;
2303 int r; 2302 int r;
2304 2303
@@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2314 parser.dev = &dev->pdev->dev; 2313 parser.dev = &dev->pdev->dev;
2315 parser.rdev = NULL; 2314 parser.rdev = NULL;
2316 parser.family = family; 2315 parser.family = family;
2317 parser.ib = &fake_ib;
2318 parser.track = track; 2316 parser.track = track;
2319 fake_ib.ptr = ib; 2317 parser.ib.ptr = ib;
2320 r = radeon_cs_parser_init(&parser, data); 2318 r = radeon_cs_parser_init(&parser, data);
2321 if (r) { 2319 if (r) {
2322 DRM_ERROR("Failed to initialize parser !\n"); 2320 DRM_ERROR("Failed to initialize parser !\n");
@@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2333 * input memory (cached) and write to the IB (which can be 2331 * input memory (cached) and write to the IB (which can be
2334 * uncached). */ 2332 * uncached). */
2335 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2333 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2336 parser.ib->length_dw = ib_chunk->length_dw; 2334 parser.ib.length_dw = ib_chunk->length_dw;
2337 *l = parser.ib->length_dw; 2335 *l = parser.ib.length_dw;
2338 r = r600_cs_parse(&parser); 2336 r = r600_cs_parse(&parser);
2339 if (r) { 2337 if (r) {
2340 DRM_ERROR("Invalid command stream !\n"); 2338 DRM_ERROR("Invalid command stream !\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 0b5920671450..226379e00ac1 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -27,6 +27,7 @@
27#include "radeon_drm.h" 27#include "radeon_drm.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h" 29#include "radeon_asic.h"
30#include "r600d.h"
30#include "atom.h" 31#include "atom.h"
31 32
32/* 33/*
@@ -52,19 +53,7 @@ enum r600_hdmi_iec_status_bits {
52 AUDIO_STATUS_LEVEL = 0x80 53 AUDIO_STATUS_LEVEL = 0x80
53}; 54};
54 55
55struct { 56struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
56 uint32_t Clock;
57
58 int N_32kHz;
59 int CTS_32kHz;
60
61 int N_44_1kHz;
62 int CTS_44_1kHz;
63
64 int N_48kHz;
65 int CTS_48kHz;
66
67} r600_hdmi_ACR[] = {
68 /* 32kHz 44.1kHz 48kHz */ 57 /* 32kHz 44.1kHz 48kHz */
69 /* Clock N CTS N CTS N CTS */ 58 /* Clock N CTS N CTS N CTS */
70 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 59 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
@@ -83,7 +72,7 @@ struct {
83/* 72/*
84 * calculate CTS value if it's not found in the table 73 * calculate CTS value if it's not found in the table
85 */ 74 */
86static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) 75static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
87{ 76{
88 if (*CTS == 0) 77 if (*CTS == 0)
89 *CTS = clock * N / (128 * freq) * 1000; 78 *CTS = clock * N / (128 * freq) * 1000;
@@ -91,6 +80,24 @@ static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
91 N, *CTS, freq); 80 N, *CTS, freq);
92} 81}
93 82
83struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
84{
85 struct radeon_hdmi_acr res;
86 u8 i;
87
88 for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
89 r600_hdmi_predefined_acr[i].clock != 0; i++)
90 ;
91 res = r600_hdmi_predefined_acr[i];
92
93 /* In case some CTS are missing */
94 r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
95 r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
96 r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
97
98 return res;
99}
100
94/* 101/*
95 * update the N and CTS parameters for a given pixel clock rate 102 * update the N and CTS parameters for a given pixel clock rate
96 */ 103 */
@@ -98,30 +105,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
98{ 105{
99 struct drm_device *dev = encoder->dev; 106 struct drm_device *dev = encoder->dev;
100 struct radeon_device *rdev = dev->dev_private; 107 struct radeon_device *rdev = dev->dev_private;
101 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 108 struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
102 int CTS; 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
103 int N; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
104 int i; 111 uint32_t offset = dig->afmt->offset;
112
113 WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
114 WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
105 115
106 for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++); 116 WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
107 117 WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
108 CTS = r600_hdmi_ACR[i].CTS_32kHz; 118
109 N = r600_hdmi_ACR[i].N_32kHz; 119 WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
110 r600_hdmi_calc_CTS(clock, &CTS, N, 32000); 120 WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
111 WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
112 WREG32(offset+R600_HDMI_32kHz_N, N);
113
114 CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
115 N = r600_hdmi_ACR[i].N_44_1kHz;
116 r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
117 WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
118 WREG32(offset+R600_HDMI_44_1kHz_N, N);
119
120 CTS = r600_hdmi_ACR[i].CTS_48kHz;
121 N = r600_hdmi_ACR[i].N_48kHz;
122 r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
123 WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
124 WREG32(offset+R600_HDMI_48kHz_N, N);
125} 121}
126 122
127/* 123/*
@@ -165,7 +161,9 @@ static void r600_hdmi_videoinfoframe(
165{ 161{
166 struct drm_device *dev = encoder->dev; 162 struct drm_device *dev = encoder->dev;
167 struct radeon_device *rdev = dev->dev_private; 163 struct radeon_device *rdev = dev->dev_private;
168 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 164 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
165 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
166 uint32_t offset = dig->afmt->offset;
169 167
170 uint8_t frame[14]; 168 uint8_t frame[14];
171 169
@@ -204,13 +202,13 @@ static void r600_hdmi_videoinfoframe(
204 * workaround this issue. */ 202 * workaround this issue. */
205 frame[0x0] += 2; 203 frame[0x0] += 2;
206 204
207 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, 205 WREG32(HDMI0_AVI_INFO0 + offset,
208 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 206 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
209 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1, 207 WREG32(HDMI0_AVI_INFO1 + offset,
210 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 208 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
211 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2, 209 WREG32(HDMI0_AVI_INFO2 + offset,
212 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 210 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
213 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3, 211 WREG32(HDMI0_AVI_INFO3 + offset,
214 frame[0xC] | (frame[0xD] << 8)); 212 frame[0xC] | (frame[0xD] << 8));
215} 213}
216 214
@@ -231,7 +229,9 @@ static void r600_hdmi_audioinfoframe(
231{ 229{
232 struct drm_device *dev = encoder->dev; 230 struct drm_device *dev = encoder->dev;
233 struct radeon_device *rdev = dev->dev_private; 231 struct radeon_device *rdev = dev->dev_private;
234 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 232 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
233 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
234 uint32_t offset = dig->afmt->offset;
235 235
236 uint8_t frame[11]; 236 uint8_t frame[11];
237 237
@@ -249,22 +249,24 @@ static void r600_hdmi_audioinfoframe(
249 249
250 r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame); 250 r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
251 251
252 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0, 252 WREG32(HDMI0_AUDIO_INFO0 + offset,
253 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 253 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
254 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1, 254 WREG32(HDMI0_AUDIO_INFO1 + offset,
255 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24)); 255 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
256} 256}
257 257
258/* 258/*
259 * test if audio buffer is filled enough to start playing 259 * test if audio buffer is filled enough to start playing
260 */ 260 */
261static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder) 261static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
262{ 262{
263 struct drm_device *dev = encoder->dev; 263 struct drm_device *dev = encoder->dev;
264 struct radeon_device *rdev = dev->dev_private; 264 struct radeon_device *rdev = dev->dev_private;
265 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 265 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
266 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
267 uint32_t offset = dig->afmt->offset;
266 268
267 return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0; 269 return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
268} 270}
269 271
270/* 272/*
@@ -273,14 +275,15 @@ static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
273int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) 275int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
274{ 276{
275 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 277 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
278 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
276 int status, result; 279 int status, result;
277 280
278 if (!radeon_encoder->hdmi_offset) 281 if (!dig->afmt || !dig->afmt->enabled)
279 return 0; 282 return 0;
280 283
281 status = r600_hdmi_is_audio_buffer_filled(encoder); 284 status = r600_hdmi_is_audio_buffer_filled(encoder);
282 result = radeon_encoder->hdmi_buffer_status != status; 285 result = dig->afmt->last_buffer_filled_status != status;
283 radeon_encoder->hdmi_buffer_status = status; 286 dig->afmt->last_buffer_filled_status = status;
284 287
285 return result; 288 return result;
286} 289}
@@ -288,26 +291,23 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
288/* 291/*
289 * write the audio workaround status to the hardware 292 * write the audio workaround status to the hardware
290 */ 293 */
291void r600_hdmi_audio_workaround(struct drm_encoder *encoder) 294static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
292{ 295{
293 struct drm_device *dev = encoder->dev; 296 struct drm_device *dev = encoder->dev;
294 struct radeon_device *rdev = dev->dev_private; 297 struct radeon_device *rdev = dev->dev_private;
295 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 298 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
296 uint32_t offset = radeon_encoder->hdmi_offset; 299 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
297 300 uint32_t offset = dig->afmt->offset;
298 if (!offset) 301 bool hdmi_audio_workaround = false; /* FIXME */
299 return; 302 u32 value;
300 303
301 if (!radeon_encoder->hdmi_audio_workaround || 304 if (!hdmi_audio_workaround ||
302 r600_hdmi_is_audio_buffer_filled(encoder)) { 305 r600_hdmi_is_audio_buffer_filled(encoder))
303 306 value = 0; /* disable workaround */
304 /* disable audio workaround */ 307 else
305 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); 308 value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
306 309 WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
307 } else { 310 value, ~HDMI0_AUDIO_TEST_EN);
308 /* enable audio workaround */
309 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
310 }
311} 311}
312 312
313 313
@@ -318,39 +318,75 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
318{ 318{
319 struct drm_device *dev = encoder->dev; 319 struct drm_device *dev = encoder->dev;
320 struct radeon_device *rdev = dev->dev_private; 320 struct radeon_device *rdev = dev->dev_private;
321 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 321 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
323 uint32_t offset;
322 324
323 if (ASIC_IS_DCE5(rdev)) 325 if (ASIC_IS_DCE5(rdev))
324 return; 326 return;
325 327
326 if (!offset) 328 /* Silent, r600_hdmi_enable will raise WARN for us */
329 if (!dig->afmt->enabled)
327 return; 330 return;
331 offset = dig->afmt->offset;
328 332
329 r600_audio_set_clock(encoder, mode->clock); 333 r600_audio_set_clock(encoder, mode->clock);
330 334
331 WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000); 335 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
332 WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0); 336 HDMI0_NULL_SEND); /* send null packets when required */
333 WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
334 337
335 r600_hdmi_update_ACR(encoder, mode->clock); 338 WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
339
340 if (ASIC_IS_DCE32(rdev)) {
341 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
342 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
343 HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
344 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
345 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
346 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
347 } else {
348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
351 HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
352 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
353 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
354 }
355
356 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
357 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
358 HDMI0_ACR_SOURCE); /* select SW CTS value */
359
360 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
361 HDMI0_NULL_SEND | /* send null packets when required */
362 HDMI0_GC_SEND | /* send general control packets */
363 HDMI0_GC_CONT); /* send general control packets every frame */
336 364
337 WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13); 365 /* TODO: HDMI0_AUDIO_INFO_UPDATE */
366 WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
367 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
368 HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
369 HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
370 HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
338 371
339 WREG32(offset+R600_HDMI_VERSION, 0x202); 372 WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
373 HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
374 HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
375
376 WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
340 377
341 r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0, 378 r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
342 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 379 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
343 380
381 r600_hdmi_update_ACR(encoder, mode->clock);
382
344 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 383 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
345 WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF); 384 WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
346 WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF); 385 WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
347 WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001); 386 WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
348 WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001); 387 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
349 388
350 r600_hdmi_audio_workaround(encoder); 389 r600_hdmi_audio_workaround(encoder);
351
352 /* audio packets per line, does anyone know how to calc this ? */
353 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
354} 390}
355 391
356/* 392/*
@@ -360,145 +396,82 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
360{ 396{
361 struct drm_device *dev = encoder->dev; 397 struct drm_device *dev = encoder->dev;
362 struct radeon_device *rdev = dev->dev_private; 398 struct radeon_device *rdev = dev->dev_private;
363 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 399 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
364 400 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
365 int channels = r600_audio_channels(rdev); 401 struct r600_audio audio = r600_audio_status(rdev);
366 int rate = r600_audio_rate(rdev); 402 uint32_t offset;
367 int bps = r600_audio_bits_per_sample(rdev);
368 uint8_t status_bits = r600_audio_status_bits(rdev);
369 uint8_t category_code = r600_audio_category_code(rdev);
370
371 uint32_t iec; 403 uint32_t iec;
372 404
373 if (!offset) 405 if (!dig->afmt || !dig->afmt->enabled)
374 return; 406 return;
407 offset = dig->afmt->offset;
375 408
376 DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n", 409 DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
377 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped", 410 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
378 channels, rate, bps); 411 audio.channels, audio.rate, audio.bits_per_sample);
379 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", 412 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
380 (int)status_bits, (int)category_code); 413 (int)audio.status_bits, (int)audio.category_code);
381 414
382 iec = 0; 415 iec = 0;
383 if (status_bits & AUDIO_STATUS_PROFESSIONAL) 416 if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
384 iec |= 1 << 0; 417 iec |= 1 << 0;
385 if (status_bits & AUDIO_STATUS_NONAUDIO) 418 if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
386 iec |= 1 << 1; 419 iec |= 1 << 1;
387 if (status_bits & AUDIO_STATUS_COPYRIGHT) 420 if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
388 iec |= 1 << 2; 421 iec |= 1 << 2;
389 if (status_bits & AUDIO_STATUS_EMPHASIS) 422 if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
390 iec |= 1 << 3; 423 iec |= 1 << 3;
391 424
392 iec |= category_code << 8; 425 iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
393 426
394 switch (rate) { 427 switch (audio.rate) {
395 case 32000: iec |= 0x3 << 24; break; 428 case 32000:
396 case 44100: iec |= 0x0 << 24; break; 429 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
397 case 88200: iec |= 0x8 << 24; break; 430 break;
398 case 176400: iec |= 0xc << 24; break; 431 case 44100:
399 case 48000: iec |= 0x2 << 24; break; 432 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
400 case 96000: iec |= 0xa << 24; break; 433 break;
401 case 192000: iec |= 0xe << 24; break; 434 case 48000:
435 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
436 break;
437 case 88200:
438 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
439 break;
440 case 96000:
441 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
442 break;
443 case 176400:
444 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
445 break;
446 case 192000:
447 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
448 break;
402 } 449 }
403 450
404 WREG32(offset+R600_HDMI_IEC60958_1, iec); 451 WREG32(HDMI0_60958_0 + offset, iec);
405 452
406 iec = 0; 453 iec = 0;
407 switch (bps) { 454 switch (audio.bits_per_sample) {
408 case 16: iec |= 0x2; break; 455 case 16:
409 case 20: iec |= 0x3; break; 456 iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
410 case 24: iec |= 0xb; break; 457 break;
458 case 20:
459 iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
460 break;
461 case 24:
462 iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
463 break;
411 } 464 }
412 if (status_bits & AUDIO_STATUS_V) 465 if (audio.status_bits & AUDIO_STATUS_V)
413 iec |= 0x5 << 16; 466 iec |= 0x5 << 16;
467 WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
414 468
415 WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f); 469 r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
416 470 0);
417 /* 0x021 or 0x031 sets the audio frame length */
418 WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
419 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
420 471
421 r600_hdmi_audio_workaround(encoder); 472 r600_hdmi_audio_workaround(encoder);
422} 473}
423 474
424static int r600_hdmi_find_free_block(struct drm_device *dev)
425{
426 struct radeon_device *rdev = dev->dev_private;
427 struct drm_encoder *encoder;
428 struct radeon_encoder *radeon_encoder;
429 bool free_blocks[3] = { true, true, true };
430
431 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
432 radeon_encoder = to_radeon_encoder(encoder);
433 switch (radeon_encoder->hdmi_offset) {
434 case R600_HDMI_BLOCK1:
435 free_blocks[0] = false;
436 break;
437 case R600_HDMI_BLOCK2:
438 free_blocks[1] = false;
439 break;
440 case R600_HDMI_BLOCK3:
441 free_blocks[2] = false;
442 break;
443 }
444 }
445
446 if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
447 rdev->family == CHIP_RS740) {
448 return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
449 } else if (rdev->family >= CHIP_R600) {
450 if (free_blocks[0])
451 return R600_HDMI_BLOCK1;
452 else if (free_blocks[1])
453 return R600_HDMI_BLOCK2;
454 }
455 return 0;
456}
457
458static void r600_hdmi_assign_block(struct drm_encoder *encoder)
459{
460 struct drm_device *dev = encoder->dev;
461 struct radeon_device *rdev = dev->dev_private;
462 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
463 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
464
465 u16 eg_offsets[] = {
466 EVERGREEN_CRTC0_REGISTER_OFFSET,
467 EVERGREEN_CRTC1_REGISTER_OFFSET,
468 EVERGREEN_CRTC2_REGISTER_OFFSET,
469 EVERGREEN_CRTC3_REGISTER_OFFSET,
470 EVERGREEN_CRTC4_REGISTER_OFFSET,
471 EVERGREEN_CRTC5_REGISTER_OFFSET,
472 };
473
474 if (!dig) {
475 dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
476 return;
477 }
478
479 if (ASIC_IS_DCE5(rdev)) {
480 /* TODO */
481 } else if (ASIC_IS_DCE4(rdev)) {
482 if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
483 dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
484 return;
485 }
486 radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
487 eg_offsets[dig->dig_encoder];
488 radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
489 + EVERGREEN_HDMI_CONFIG_OFFSET;
490 } else if (ASIC_IS_DCE3(rdev)) {
491 radeon_encoder->hdmi_offset = dig->dig_encoder ?
492 R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
493 if (ASIC_IS_DCE32(rdev))
494 radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
495 R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
496 } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
497 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
498 radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
499 }
500}
501
502/* 475/*
503 * enable the HDMI engine 476 * enable the HDMI engine
504 */ 477 */
@@ -507,64 +480,57 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
507 struct drm_device *dev = encoder->dev; 480 struct drm_device *dev = encoder->dev;
508 struct radeon_device *rdev = dev->dev_private; 481 struct radeon_device *rdev = dev->dev_private;
509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 482 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
510 uint32_t offset; 484 uint32_t offset;
485 u32 hdmi;
511 486
512 if (ASIC_IS_DCE5(rdev)) 487 if (ASIC_IS_DCE5(rdev))
513 return; 488 return;
514 489
515 if (!radeon_encoder->hdmi_offset) { 490 /* Silent, r600_hdmi_enable will raise WARN for us */
516 r600_hdmi_assign_block(encoder); 491 if (dig->afmt->enabled)
517 if (!radeon_encoder->hdmi_offset) { 492 return;
518 dev_warn(rdev->dev, "Could not find HDMI block for " 493 offset = dig->afmt->offset;
519 "0x%x encoder\n", radeon_encoder->encoder_id);
520 return;
521 }
522 }
523 494
524 offset = radeon_encoder->hdmi_offset; 495 /* Older chipsets require setting HDMI and routing manually */
525 if (ASIC_IS_DCE5(rdev)) { 496 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
526 /* TODO */ 497 hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
527 } else if (ASIC_IS_DCE4(rdev)) {
528 WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
529 } else if (ASIC_IS_DCE32(rdev)) {
530 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
531 } else if (ASIC_IS_DCE3(rdev)) {
532 /* TODO */
533 } else if (rdev->family >= CHIP_R600) {
534 switch (radeon_encoder->encoder_id) { 498 switch (radeon_encoder->encoder_id) {
535 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 499 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
536 WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN, 500 WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
537 ~AVIVO_TMDSA_CNTL_HDMI_EN); 501 ~AVIVO_TMDSA_CNTL_HDMI_EN);
538 WREG32(offset + R600_HDMI_ENABLE, 0x101); 502 hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
539 break; 503 break;
540 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 504 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
541 WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN, 505 WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
542 ~AVIVO_LVTMA_CNTL_HDMI_EN); 506 ~AVIVO_LVTMA_CNTL_HDMI_EN);
543 WREG32(offset + R600_HDMI_ENABLE, 0x105); 507 hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
508 break;
509 case ENCODER_OBJECT_ID_INTERNAL_DDI:
510 WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
511 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
512 break;
513 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
514 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
544 break; 515 break;
545 default: 516 default:
546 dev_err(rdev->dev, "Unknown HDMI output type\n"); 517 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
518 radeon_encoder->encoder_id);
547 break; 519 break;
548 } 520 }
521 WREG32(HDMI0_CONTROL + offset, hdmi);
549 } 522 }
550 523
551 if (rdev->irq.installed 524 if (rdev->irq.installed) {
552 && rdev->family != CHIP_RS600
553 && rdev->family != CHIP_RS690
554 && rdev->family != CHIP_RS740
555 && !ASIC_IS_DCE4(rdev)) {
556 /* if irq is available use it */ 525 /* if irq is available use it */
557 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; 526 rdev->irq.afmt[dig->afmt->id] = true;
558 radeon_irq_set(rdev); 527 radeon_irq_set(rdev);
559
560 r600_audio_disable_polling(encoder);
561 } else {
562 /* if not fallback to polling */
563 r600_audio_enable_polling(encoder);
564 } 528 }
565 529
530 dig->afmt->enabled = true;
531
566 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", 532 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
567 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 533 offset, radeon_encoder->encoder_id);
568} 534}
569 535
570/* 536/*
@@ -575,51 +541,51 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
575 struct drm_device *dev = encoder->dev; 541 struct drm_device *dev = encoder->dev;
576 struct radeon_device *rdev = dev->dev_private; 542 struct radeon_device *rdev = dev->dev_private;
577 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 543 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
544 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
578 uint32_t offset; 545 uint32_t offset;
579 546
580 if (ASIC_IS_DCE5(rdev)) 547 if (ASIC_IS_DCE5(rdev))
581 return; 548 return;
582 549
583 offset = radeon_encoder->hdmi_offset; 550 /* Called for ATOM_ENCODER_MODE_HDMI only */
584 if (!offset) { 551 if (!dig || !dig->afmt) {
585 dev_err(rdev->dev, "Disabling not enabled HDMI\n"); 552 WARN_ON(1);
586 return; 553 return;
587 } 554 }
555 if (!dig->afmt->enabled)
556 return;
557 offset = dig->afmt->offset;
588 558
589 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", 559 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
590 offset, radeon_encoder->encoder_id); 560 offset, radeon_encoder->encoder_id);
591 561
592 /* disable irq */ 562 /* disable irq */
593 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; 563 rdev->irq.afmt[dig->afmt->id] = false;
594 radeon_irq_set(rdev); 564 radeon_irq_set(rdev);
595 565
596 /* disable polling */ 566 /* Older chipsets not handled by AtomBIOS */
597 r600_audio_disable_polling(encoder); 567 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
598
599 if (ASIC_IS_DCE5(rdev)) {
600 /* TODO */
601 } else if (ASIC_IS_DCE4(rdev)) {
602 WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
603 } else if (ASIC_IS_DCE32(rdev)) {
604 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
605 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
606 switch (radeon_encoder->encoder_id) { 568 switch (radeon_encoder->encoder_id) {
607 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 569 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
608 WREG32_P(AVIVO_TMDSA_CNTL, 0, 570 WREG32_P(AVIVO_TMDSA_CNTL, 0,
609 ~AVIVO_TMDSA_CNTL_HDMI_EN); 571 ~AVIVO_TMDSA_CNTL_HDMI_EN);
610 WREG32(offset + R600_HDMI_ENABLE, 0);
611 break; 572 break;
612 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 573 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
613 WREG32_P(AVIVO_LVTMA_CNTL, 0, 574 WREG32_P(AVIVO_LVTMA_CNTL, 0,
614 ~AVIVO_LVTMA_CNTL_HDMI_EN); 575 ~AVIVO_LVTMA_CNTL_HDMI_EN);
615 WREG32(offset + R600_HDMI_ENABLE, 0); 576 break;
577 case ENCODER_OBJECT_ID_INTERNAL_DDI:
578 WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
579 break;
580 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
616 break; 581 break;
617 default: 582 default:
618 dev_err(rdev->dev, "Unknown HDMI output type\n"); 583 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
584 radeon_encoder->encoder_id);
619 break; 585 break;
620 } 586 }
587 WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
621 } 588 }
622 589
623 radeon_encoder->hdmi_offset = 0; 590 dig->afmt->enabled = false;
624 radeon_encoder->hdmi_config_offset = 0;
625} 591}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index f869897c7456..2b960cb5c18a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -156,45 +156,10 @@
156#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4 156#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
157#define R600_AUDIO_STATUS_BITS 0x73d8 157#define R600_AUDIO_STATUS_BITS 0x73d8
158 158
159/* HDMI base register addresses */ 159#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
160#define R600_HDMI_BLOCK1 0x7400 160#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
161#define R600_HDMI_BLOCK2 0x7700 161/* DCE3.2 second instance starts at 0x7800 */
162#define R600_HDMI_BLOCK3 0x7800 162#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
163 163#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
164/* HDMI registers */
165#define R600_HDMI_ENABLE 0x00
166#define R600_HDMI_STATUS 0x04
167# define R600_HDMI_INT_PENDING (1 << 29)
168#define R600_HDMI_CNTL 0x08
169# define R600_HDMI_INT_EN (1 << 28)
170# define R600_HDMI_INT_ACK (1 << 29)
171#define R600_HDMI_UNKNOWN_0 0x0C
172#define R600_HDMI_AUDIOCNTL 0x10
173#define R600_HDMI_VIDEOCNTL 0x14
174#define R600_HDMI_VERSION 0x18
175#define R600_HDMI_UNKNOWN_1 0x28
176#define R600_HDMI_VIDEOINFOFRAME_0 0x54
177#define R600_HDMI_VIDEOINFOFRAME_1 0x58
178#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
179#define R600_HDMI_VIDEOINFOFRAME_3 0x60
180#define R600_HDMI_32kHz_CTS 0xac
181#define R600_HDMI_32kHz_N 0xb0
182#define R600_HDMI_44_1kHz_CTS 0xb4
183#define R600_HDMI_44_1kHz_N 0xb8
184#define R600_HDMI_48kHz_CTS 0xbc
185#define R600_HDMI_48kHz_N 0xc0
186#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
187#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
188#define R600_HDMI_IEC60958_1 0xd4
189#define R600_HDMI_IEC60958_2 0xd8
190#define R600_HDMI_UNKNOWN_2 0xdc
191#define R600_HDMI_AUDIO_DEBUG_0 0xe0
192#define R600_HDMI_AUDIO_DEBUG_1 0xe4
193#define R600_HDMI_AUDIO_DEBUG_2 0xe8
194#define R600_HDMI_AUDIO_DEBUG_3 0xec
195
196/* HDMI additional config base register addresses */
197#define R600_HDMI_CONFIG1 0x7600
198#define R600_HDMI_CONFIG2 0x7a00
199 164
200#endif 165#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59f9c993cc31..15bd3b216243 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -824,6 +824,239 @@
824# define TARGET_LINK_SPEED_MASK (0xf << 0) 824# define TARGET_LINK_SPEED_MASK (0xf << 0)
825# define SELECTABLE_DEEMPHASIS (1 << 6) 825# define SELECTABLE_DEEMPHASIS (1 << 6)
826 826
827/* Audio clocks */
828#define DCCG_AUDIO_DTO0_PHASE 0x0514
829#define DCCG_AUDIO_DTO0_MODULE 0x0518
830#define DCCG_AUDIO_DTO0_LOAD 0x051c
831# define DTO_LOAD (1 << 31)
832#define DCCG_AUDIO_DTO0_CNTL 0x0520
833
834#define DCCG_AUDIO_DTO1_PHASE 0x0524
835#define DCCG_AUDIO_DTO1_MODULE 0x0528
836#define DCCG_AUDIO_DTO1_LOAD 0x052c
837#define DCCG_AUDIO_DTO1_CNTL 0x0530
838
839#define DCCG_AUDIO_DTO_SELECT 0x0534
840
841/* digital blocks */
842#define TMDSA_CNTL 0x7880
843# define TMDSA_HDMI_EN (1 << 2)
844#define LVTMA_CNTL 0x7a80
845# define LVTMA_HDMI_EN (1 << 2)
846#define DDIA_CNTL 0x7200
847# define DDIA_HDMI_EN (1 << 2)
848#define DIG0_CNTL 0x75a0
849# define DIG_MODE(x) (((x) & 7) << 8)
850# define DIG_MODE_DP 0
851# define DIG_MODE_LVDS 1
852# define DIG_MODE_TMDS_DVI 2
853# define DIG_MODE_TMDS_HDMI 3
854# define DIG_MODE_SDVO 4
855#define DIG1_CNTL 0x79a0
856
857/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
858 * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
859 * different due to the new DIG blocks, but also have 2 instances.
860 * DCE 3.0 HDMI blocks are part of each DIG encoder.
861 */
862
863/* rs6xx/rs740/r6xx/dce3 */
864#define HDMI0_CONTROL 0x7400
865/* rs6xx/rs740/r6xx */
866# define HDMI0_ENABLE (1 << 0)
867# define HDMI0_STREAM(x) (((x) & 3) << 2)
868# define HDMI0_STREAM_TMDSA 0
869# define HDMI0_STREAM_LVTMA 1
870# define HDMI0_STREAM_DVOA 2
871# define HDMI0_STREAM_DDIA 3
872/* rs6xx/r6xx/dce3 */
873# define HDMI0_ERROR_ACK (1 << 8)
874# define HDMI0_ERROR_MASK (1 << 9)
875#define HDMI0_STATUS 0x7404
876# define HDMI0_ACTIVE_AVMUTE (1 << 0)
877# define HDMI0_AUDIO_ENABLE (1 << 4)
878# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
879# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
880#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
881# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
882# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
883# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
884# define HDMI0_AUDIO_TEST_EN (1 << 12)
885# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
886# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
887# define HDMI0_60958_CS_UPDATE (1 << 26)
888# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
889# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
890#define HDMI0_AUDIO_CRC_CONTROL 0x740c
891# define HDMI0_AUDIO_CRC_EN (1 << 0)
892#define HDMI0_VBI_PACKET_CONTROL 0x7410
893# define HDMI0_NULL_SEND (1 << 0)
894# define HDMI0_GC_SEND (1 << 4)
895# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
896#define HDMI0_INFOFRAME_CONTROL0 0x7414
897# define HDMI0_AVI_INFO_SEND (1 << 0)
898# define HDMI0_AVI_INFO_CONT (1 << 1)
899# define HDMI0_AUDIO_INFO_SEND (1 << 4)
900# define HDMI0_AUDIO_INFO_CONT (1 << 5)
901# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
902# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
903# define HDMI0_MPEG_INFO_SEND (1 << 8)
904# define HDMI0_MPEG_INFO_CONT (1 << 9)
905# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
906#define HDMI0_INFOFRAME_CONTROL1 0x7418
907# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
908# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
909# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
910#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
911# define HDMI0_GENERIC0_SEND (1 << 0)
912# define HDMI0_GENERIC0_CONT (1 << 1)
913# define HDMI0_GENERIC0_UPDATE (1 << 2)
914# define HDMI0_GENERIC1_SEND (1 << 4)
915# define HDMI0_GENERIC1_CONT (1 << 5)
916# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
917# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
918#define HDMI0_GC 0x7428
919# define HDMI0_GC_AVMUTE (1 << 0)
920#define HDMI0_AVI_INFO0 0x7454
921# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
922# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
923# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
924# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
925# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
926# define HDMI0_AVI_INFO_Y_RGB 0
927# define HDMI0_AVI_INFO_Y_YCBCR422 1
928# define HDMI0_AVI_INFO_Y_YCBCR444 2
929# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
930# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
931# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
932# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
933# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
934# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
935# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
936#define HDMI0_AVI_INFO1 0x7458
937# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
938# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
939# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
940#define HDMI0_AVI_INFO2 0x745c
941# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
942# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
943#define HDMI0_AVI_INFO3 0x7460
944# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
945# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
946#define HDMI0_MPEG_INFO0 0x7464
947# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
948# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
949# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
950# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
951#define HDMI0_MPEG_INFO1 0x7468
952# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
953# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
954# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
955#define HDMI0_GENERIC0_HDR 0x746c
956#define HDMI0_GENERIC0_0 0x7470
957#define HDMI0_GENERIC0_1 0x7474
958#define HDMI0_GENERIC0_2 0x7478
959#define HDMI0_GENERIC0_3 0x747c
960#define HDMI0_GENERIC0_4 0x7480
961#define HDMI0_GENERIC0_5 0x7484
962#define HDMI0_GENERIC0_6 0x7488
963#define HDMI0_GENERIC1_HDR 0x748c
964#define HDMI0_GENERIC1_0 0x7490
965#define HDMI0_GENERIC1_1 0x7494
966#define HDMI0_GENERIC1_2 0x7498
967#define HDMI0_GENERIC1_3 0x749c
968#define HDMI0_GENERIC1_4 0x74a0
969#define HDMI0_GENERIC1_5 0x74a4
970#define HDMI0_GENERIC1_6 0x74a8
971#define HDMI0_ACR_32_0 0x74ac
972# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
973#define HDMI0_ACR_32_1 0x74b0
974# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
975#define HDMI0_ACR_44_0 0x74b4
976# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
977#define HDMI0_ACR_44_1 0x74b8
978# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
979#define HDMI0_ACR_48_0 0x74bc
980# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
981#define HDMI0_ACR_48_1 0x74c0
982# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
983#define HDMI0_ACR_STATUS_0 0x74c4
984#define HDMI0_ACR_STATUS_1 0x74c8
985#define HDMI0_AUDIO_INFO0 0x74cc
986# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
987# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
988#define HDMI0_AUDIO_INFO1 0x74d0
989# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
990# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
991# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
992# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
993#define HDMI0_60958_0 0x74d4
994# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
995# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
996# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
997# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
998# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
999# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
1000# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
1001# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
1002# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
1003# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
1004#define HDMI0_60958_1 0x74d8
1005# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
1006# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
1007# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
1008# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
1009# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
1010#define HDMI0_ACR_PACKET_CONTROL 0x74dc
1011# define HDMI0_ACR_SEND (1 << 0)
1012# define HDMI0_ACR_CONT (1 << 1)
1013# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
1014# define HDMI0_ACR_HW 0
1015# define HDMI0_ACR_32 1
1016# define HDMI0_ACR_44 2
1017# define HDMI0_ACR_48 3
1018# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
1019# define HDMI0_ACR_AUTO_SEND (1 << 12)
1020#define HDMI0_RAMP_CONTROL0 0x74e0
1021# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
1022#define HDMI0_RAMP_CONTROL1 0x74e4
1023# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
1024#define HDMI0_RAMP_CONTROL2 0x74e8
1025# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
1026#define HDMI0_RAMP_CONTROL3 0x74ec
1027# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
1028/* HDMI0_60958_2 is r7xx only */
1029#define HDMI0_60958_2 0x74f0
1030# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
1031# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
1032# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
1033# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
1034# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
1035# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
1036/* r6xx only; second instance starts at 0x7700 */
1037#define HDMI1_CONTROL 0x7700
1038#define HDMI1_STATUS 0x7704
1039#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
1040/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
1041#define DCE3_HDMI1_CONTROL 0x7800
1042#define DCE3_HDMI1_STATUS 0x7804
1043#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
1044/* DCE3.2 (for interrupts) */
1045#define AFMT_STATUS 0x7600
1046# define AFMT_AUDIO_ENABLE (1 << 4)
1047# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
1048# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
1049# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
1050#define AFMT_AUDIO_PACKET_CONTROL 0x7604
1051# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
1052# define AFMT_AUDIO_TEST_EN (1 << 12)
1053# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
1054# define AFMT_60958_CS_UPDATE (1 << 26)
1055# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
1056# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
1057# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
1058# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1059
827/* 1060/*
828 * PM4 1061 * PM4
829 */ 1062 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 138b95216d8d..1dc3a4aba020 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -94,33 +94,38 @@ extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2; 95extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout;
97 98
98/* 99/*
99 * Copy from radeon_drv.h so we don't have to include both and have conflicting 100 * Copy from radeon_drv.h so we don't have to include both and have conflicting
100 * symbol; 101 * symbol;
101 */ 102 */
102#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 103#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
103#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) 104#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
104/* RADEON_IB_POOL_SIZE must be a power of 2 */ 105/* RADEON_IB_POOL_SIZE must be a power of 2 */
105#define RADEON_IB_POOL_SIZE 16 106#define RADEON_IB_POOL_SIZE 16
106#define RADEON_DEBUGFS_MAX_COMPONENTS 32 107#define RADEON_DEBUGFS_MAX_COMPONENTS 32
107#define RADEONFB_CONN_LIMIT 4 108#define RADEONFB_CONN_LIMIT 4
108#define RADEON_BIOS_NUM_SCRATCH 8 109#define RADEON_BIOS_NUM_SCRATCH 8
109 110
110/* max number of rings */ 111/* max number of rings */
111#define RADEON_NUM_RINGS 3 112#define RADEON_NUM_RINGS 3
113
114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL
116#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
112 117
113/* internal ring indices */ 118/* internal ring indices */
114/* r1xx+ has gfx CP ring */ 119/* r1xx+ has gfx CP ring */
115#define RADEON_RING_TYPE_GFX_INDEX 0 120#define RADEON_RING_TYPE_GFX_INDEX 0
116 121
117/* cayman has 2 compute CP rings */ 122/* cayman has 2 compute CP rings */
118#define CAYMAN_RING_TYPE_CP1_INDEX 1 123#define CAYMAN_RING_TYPE_CP1_INDEX 1
119#define CAYMAN_RING_TYPE_CP2_INDEX 2 124#define CAYMAN_RING_TYPE_CP2_INDEX 2
120 125
121/* hardcode those limit for now */ 126/* hardcode those limit for now */
122#define RADEON_VA_RESERVED_SIZE (8 << 20) 127#define RADEON_VA_RESERVED_SIZE (8 << 20)
123#define RADEON_IB_VM_MAX_SIZE (64 << 10) 128#define RADEON_IB_VM_MAX_SIZE (64 << 10)
124 129
125/* 130/*
126 * Errata workarounds. 131 * Errata workarounds.
@@ -253,28 +258,20 @@ struct radeon_fence_driver {
253 uint32_t scratch_reg; 258 uint32_t scratch_reg;
254 uint64_t gpu_addr; 259 uint64_t gpu_addr;
255 volatile uint32_t *cpu_addr; 260 volatile uint32_t *cpu_addr;
256 atomic_t seq; 261 /* seq is protected by ring emission lock */
257 uint32_t last_seq; 262 uint64_t seq;
258 unsigned long last_jiffies; 263 atomic64_t last_seq;
259 unsigned long last_timeout; 264 unsigned long last_activity;
260 wait_queue_head_t queue;
261 struct list_head created;
262 struct list_head emitted;
263 struct list_head signaled;
264 bool initialized; 265 bool initialized;
265}; 266};
266 267
267struct radeon_fence { 268struct radeon_fence {
268 struct radeon_device *rdev; 269 struct radeon_device *rdev;
269 struct kref kref; 270 struct kref kref;
270 struct list_head list;
271 /* protected by radeon_fence.lock */ 271 /* protected by radeon_fence.lock */
272 uint32_t seq; 272 uint64_t seq;
273 bool emitted;
274 bool signaled;
275 /* RB, DMA, etc. */ 273 /* RB, DMA, etc. */
276 int ring; 274 unsigned ring;
277 struct radeon_semaphore *semaphore;
278}; 275};
279 276
280int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 277int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -285,11 +282,14 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
285void radeon_fence_process(struct radeon_device *rdev, int ring); 282void radeon_fence_process(struct radeon_device *rdev, int ring);
286bool radeon_fence_signaled(struct radeon_fence *fence); 283bool radeon_fence_signaled(struct radeon_fence *fence);
287int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 284int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
288int radeon_fence_wait_next(struct radeon_device *rdev, int ring); 285int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
289int radeon_fence_wait_last(struct radeon_device *rdev, int ring); 286int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
287int radeon_fence_wait_any(struct radeon_device *rdev,
288 struct radeon_fence **fences,
289 bool intr);
290struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 290struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
291void radeon_fence_unref(struct radeon_fence **fence); 291void radeon_fence_unref(struct radeon_fence **fence);
292int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 292unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
293 293
294/* 294/*
295 * Tiling registers 295 * Tiling registers
@@ -382,8 +382,11 @@ struct radeon_bo_list {
382 * alignment). 382 * alignment).
383 */ 383 */
384struct radeon_sa_manager { 384struct radeon_sa_manager {
385 spinlock_t lock;
385 struct radeon_bo *bo; 386 struct radeon_bo *bo;
386 struct list_head sa_bo; 387 struct list_head *hole;
388 struct list_head flist[RADEON_NUM_RINGS];
389 struct list_head olist;
387 unsigned size; 390 unsigned size;
388 uint64_t gpu_addr; 391 uint64_t gpu_addr;
389 void *cpu_ptr; 392 void *cpu_ptr;
@@ -394,10 +397,12 @@ struct radeon_sa_bo;
394 397
395/* sub-allocation buffer */ 398/* sub-allocation buffer */
396struct radeon_sa_bo { 399struct radeon_sa_bo {
397 struct list_head list; 400 struct list_head olist;
401 struct list_head flist;
398 struct radeon_sa_manager *manager; 402 struct radeon_sa_manager *manager;
399 unsigned offset; 403 unsigned soffset;
400 unsigned size; 404 unsigned eoffset;
405 struct radeon_fence *fence;
401}; 406};
402 407
403/* 408/*
@@ -428,42 +433,26 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
428/* 433/*
429 * Semaphores. 434 * Semaphores.
430 */ 435 */
431struct radeon_ring;
432
433#define RADEON_SEMAPHORE_BO_SIZE 256
434
435struct radeon_semaphore_driver {
436 rwlock_t lock;
437 struct list_head bo;
438};
439
440struct radeon_semaphore_bo;
441
442/* everything here is constant */ 436/* everything here is constant */
443struct radeon_semaphore { 437struct radeon_semaphore {
444 struct list_head list; 438 struct radeon_sa_bo *sa_bo;
439 signed waiters;
445 uint64_t gpu_addr; 440 uint64_t gpu_addr;
446 uint32_t *cpu_ptr;
447 struct radeon_semaphore_bo *bo;
448}; 441};
449 442
450struct radeon_semaphore_bo {
451 struct list_head list;
452 struct radeon_ib *ib;
453 struct list_head free;
454 struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
455 unsigned nused;
456};
457
458void radeon_semaphore_driver_fini(struct radeon_device *rdev);
459int radeon_semaphore_create(struct radeon_device *rdev, 443int radeon_semaphore_create(struct radeon_device *rdev,
460 struct radeon_semaphore **semaphore); 444 struct radeon_semaphore **semaphore);
461void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 445void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
462 struct radeon_semaphore *semaphore); 446 struct radeon_semaphore *semaphore);
463void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 447void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
464 struct radeon_semaphore *semaphore); 448 struct radeon_semaphore *semaphore);
449int radeon_semaphore_sync_rings(struct radeon_device *rdev,
450 struct radeon_semaphore *semaphore,
451 bool sync_to[RADEON_NUM_RINGS],
452 int dst_ring);
465void radeon_semaphore_free(struct radeon_device *rdev, 453void radeon_semaphore_free(struct radeon_device *rdev,
466 struct radeon_semaphore *semaphore); 454 struct radeon_semaphore *semaphore,
455 struct radeon_fence *fence);
467 456
468/* 457/*
469 * GART structures, functions & helpers 458 * GART structures, functions & helpers
@@ -560,6 +549,7 @@ struct radeon_unpin_work {
560 549
561struct r500_irq_stat_regs { 550struct r500_irq_stat_regs {
562 u32 disp_int; 551 u32 disp_int;
552 u32 hdmi0_status;
563}; 553};
564 554
565struct r600_irq_stat_regs { 555struct r600_irq_stat_regs {
@@ -568,6 +558,8 @@ struct r600_irq_stat_regs {
568 u32 disp_int_cont2; 558 u32 disp_int_cont2;
569 u32 d1grph_int; 559 u32 d1grph_int;
570 u32 d2grph_int; 560 u32 d2grph_int;
561 u32 hdmi0_status;
562 u32 hdmi1_status;
571}; 563};
572 564
573struct evergreen_irq_stat_regs { 565struct evergreen_irq_stat_regs {
@@ -583,6 +575,12 @@ struct evergreen_irq_stat_regs {
583 u32 d4grph_int; 575 u32 d4grph_int;
584 u32 d5grph_int; 576 u32 d5grph_int;
585 u32 d6grph_int; 577 u32 d6grph_int;
578 u32 afmt_status1;
579 u32 afmt_status2;
580 u32 afmt_status3;
581 u32 afmt_status4;
582 u32 afmt_status5;
583 u32 afmt_status6;
586}; 584};
587 585
588union radeon_irq_stat_regs { 586union radeon_irq_stat_regs {
@@ -593,7 +591,7 @@ union radeon_irq_stat_regs {
593 591
594#define RADEON_MAX_HPD_PINS 6 592#define RADEON_MAX_HPD_PINS 6
595#define RADEON_MAX_CRTCS 6 593#define RADEON_MAX_CRTCS 6
596#define RADEON_MAX_HDMI_BLOCKS 2 594#define RADEON_MAX_AFMT_BLOCKS 6
597 595
598struct radeon_irq { 596struct radeon_irq {
599 bool installed; 597 bool installed;
@@ -605,7 +603,7 @@ struct radeon_irq {
605 bool gui_idle; 603 bool gui_idle;
606 bool gui_idle_acked; 604 bool gui_idle_acked;
607 wait_queue_head_t idle_queue; 605 wait_queue_head_t idle_queue;
608 bool hdmi[RADEON_MAX_HDMI_BLOCKS]; 606 bool afmt[RADEON_MAX_AFMT_BLOCKS];
609 spinlock_t sw_lock; 607 spinlock_t sw_lock;
610 int sw_refcount[RADEON_NUM_RINGS]; 608 int sw_refcount[RADEON_NUM_RINGS];
611 union radeon_irq_stat_regs stat_regs; 609 union radeon_irq_stat_regs stat_regs;
@@ -625,26 +623,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
625 */ 623 */
626 624
627struct radeon_ib { 625struct radeon_ib {
628 struct radeon_sa_bo sa_bo; 626 struct radeon_sa_bo *sa_bo;
629 unsigned idx; 627 uint32_t length_dw;
630 uint32_t length_dw; 628 uint64_t gpu_addr;
631 uint64_t gpu_addr; 629 uint32_t *ptr;
632 uint32_t *ptr; 630 struct radeon_fence *fence;
633 struct radeon_fence *fence; 631 unsigned vm_id;
634 unsigned vm_id; 632 bool is_const_ib;
635 bool is_const_ib; 633 struct radeon_semaphore *semaphore;
636};
637
638/*
639 * locking -
640 * mutex protects scheduled_ibs, ready, alloc_bm
641 */
642struct radeon_ib_pool {
643 struct radeon_mutex mutex;
644 struct radeon_sa_manager sa_manager;
645 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
646 bool ready;
647 unsigned head_id;
648}; 634};
649 635
650struct radeon_ring { 636struct radeon_ring {
@@ -659,10 +645,11 @@ struct radeon_ring {
659 unsigned ring_size; 645 unsigned ring_size;
660 unsigned ring_free_dw; 646 unsigned ring_free_dw;
661 int count_dw; 647 int count_dw;
648 unsigned long last_activity;
649 unsigned last_rptr;
662 uint64_t gpu_addr; 650 uint64_t gpu_addr;
663 uint32_t align_mask; 651 uint32_t align_mask;
664 uint32_t ptr_mask; 652 uint32_t ptr_mask;
665 struct mutex mutex;
666 bool ready; 653 bool ready;
667 u32 ptr_reg_shift; 654 u32 ptr_reg_shift;
668 u32 ptr_reg_mask; 655 u32 ptr_reg_mask;
@@ -679,7 +666,7 @@ struct radeon_vm {
679 unsigned last_pfn; 666 unsigned last_pfn;
680 u64 pt_gpu_addr; 667 u64 pt_gpu_addr;
681 u64 *pt; 668 u64 *pt;
682 struct radeon_sa_bo sa_bo; 669 struct radeon_sa_bo *sa_bo;
683 struct mutex mutex; 670 struct mutex mutex;
684 /* last fence for cs using this vm */ 671 /* last fence for cs using this vm */
685 struct radeon_fence *fence; 672 struct radeon_fence *fence;
@@ -756,7 +743,6 @@ struct r600_blit_cp_primitives {
756}; 743};
757 744
758struct r600_blit { 745struct r600_blit {
759 struct mutex mutex;
760 struct radeon_bo *shader_obj; 746 struct radeon_bo *shader_obj;
761 struct r600_blit_cp_primitives primitives; 747 struct r600_blit_cp_primitives primitives;
762 int max_dim; 748 int max_dim;
@@ -766,8 +752,6 @@ struct r600_blit {
766 u32 vs_offset, ps_offset; 752 u32 vs_offset, ps_offset;
767 u32 state_offset; 753 u32 state_offset;
768 u32 state_len; 754 u32 state_len;
769 u32 vb_used, vb_total;
770 struct radeon_ib *vb_ib;
771}; 755};
772 756
773void r600_blit_suspend(struct radeon_device *rdev); 757void r600_blit_suspend(struct radeon_device *rdev);
@@ -785,14 +769,14 @@ struct si_rlc {
785}; 769};
786 770
787int radeon_ib_get(struct radeon_device *rdev, int ring, 771int radeon_ib_get(struct radeon_device *rdev, int ring,
788 struct radeon_ib **ib, unsigned size); 772 struct radeon_ib *ib, unsigned size);
789void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 773void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
790bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
791int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 774int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
792int radeon_ib_pool_init(struct radeon_device *rdev); 775int radeon_ib_pool_init(struct radeon_device *rdev);
793void radeon_ib_pool_fini(struct radeon_device *rdev); 776void radeon_ib_pool_fini(struct radeon_device *rdev);
794int radeon_ib_pool_start(struct radeon_device *rdev); 777int radeon_ib_pool_start(struct radeon_device *rdev);
795int radeon_ib_pool_suspend(struct radeon_device *rdev); 778int radeon_ib_pool_suspend(struct radeon_device *rdev);
779int radeon_ib_ring_tests(struct radeon_device *rdev);
796/* Ring access between begin & end cannot sleep */ 780/* Ring access between begin & end cannot sleep */
797int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); 781int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
798void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 782void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -800,8 +784,12 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
800int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 784int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
801void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 785void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
802void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 786void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
787void radeon_ring_undo(struct radeon_ring *ring);
803void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 788void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
804int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 789int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
790void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
791void radeon_ring_lockup_update(struct radeon_ring *ring);
792bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
805int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 793int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
806 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 794 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
807 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); 795 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -850,8 +838,8 @@ struct radeon_cs_parser {
850 int chunk_relocs_idx; 838 int chunk_relocs_idx;
851 int chunk_flags_idx; 839 int chunk_flags_idx;
852 int chunk_const_ib_idx; 840 int chunk_const_ib_idx;
853 struct radeon_ib *ib; 841 struct radeon_ib ib;
854 struct radeon_ib *const_ib; 842 struct radeon_ib const_ib;
855 void *track; 843 void *track;
856 unsigned family; 844 unsigned family;
857 int parser_error; 845 int parser_error;
@@ -1105,6 +1093,14 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
1105 enum radeon_pm_state_type ps_type, 1093 enum radeon_pm_state_type ps_type,
1106 int instance); 1094 int instance);
1107 1095
1096struct r600_audio {
1097 int channels;
1098 int rate;
1099 int bits_per_sample;
1100 u8 status_bits;
1101 u8 category_code;
1102};
1103
1108/* 1104/*
1109 * Benchmarking 1105 * Benchmarking
1110 */ 1106 */
@@ -1144,7 +1140,6 @@ struct radeon_asic {
1144 int (*resume)(struct radeon_device *rdev); 1140 int (*resume)(struct radeon_device *rdev);
1145 int (*suspend)(struct radeon_device *rdev); 1141 int (*suspend)(struct radeon_device *rdev);
1146 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1142 void (*vga_set_state)(struct radeon_device *rdev, bool state);
1147 bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1148 int (*asic_reset)(struct radeon_device *rdev); 1143 int (*asic_reset)(struct radeon_device *rdev);
1149 /* ioctl hw specific callback. Some hw might want to perform special 1144 /* ioctl hw specific callback. Some hw might want to perform special
1150 * operation on specific ioctl. For instance on wait idle some hw 1145 * operation on specific ioctl. For instance on wait idle some hw
@@ -1173,6 +1168,7 @@ struct radeon_asic {
1173 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); 1168 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1174 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1169 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1175 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1170 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1171 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1176 } ring[RADEON_NUM_RINGS]; 1172 } ring[RADEON_NUM_RINGS];
1177 /* irqs */ 1173 /* irqs */
1178 struct { 1174 struct {
@@ -1251,16 +1247,10 @@ struct radeon_asic {
1251/* 1247/*
1252 * Asic structures 1248 * Asic structures
1253 */ 1249 */
1254struct r100_gpu_lockup {
1255 unsigned long last_jiffies;
1256 u32 last_cp_rptr;
1257};
1258
1259struct r100_asic { 1250struct r100_asic {
1260 const unsigned *reg_safe_bm; 1251 const unsigned *reg_safe_bm;
1261 unsigned reg_safe_bm_size; 1252 unsigned reg_safe_bm_size;
1262 u32 hdp_cntl; 1253 u32 hdp_cntl;
1263 struct r100_gpu_lockup lockup;
1264}; 1254};
1265 1255
1266struct r300_asic { 1256struct r300_asic {
@@ -1268,7 +1258,6 @@ struct r300_asic {
1268 unsigned reg_safe_bm_size; 1258 unsigned reg_safe_bm_size;
1269 u32 resync_scratch; 1259 u32 resync_scratch;
1270 u32 hdp_cntl; 1260 u32 hdp_cntl;
1271 struct r100_gpu_lockup lockup;
1272}; 1261};
1273 1262
1274struct r600_asic { 1263struct r600_asic {
@@ -1290,7 +1279,6 @@ struct r600_asic {
1290 unsigned tiling_group_size; 1279 unsigned tiling_group_size;
1291 unsigned tile_config; 1280 unsigned tile_config;
1292 unsigned backend_map; 1281 unsigned backend_map;
1293 struct r100_gpu_lockup lockup;
1294}; 1282};
1295 1283
1296struct rv770_asic { 1284struct rv770_asic {
@@ -1316,7 +1304,6 @@ struct rv770_asic {
1316 unsigned tiling_group_size; 1304 unsigned tiling_group_size;
1317 unsigned tile_config; 1305 unsigned tile_config;
1318 unsigned backend_map; 1306 unsigned backend_map;
1319 struct r100_gpu_lockup lockup;
1320}; 1307};
1321 1308
1322struct evergreen_asic { 1309struct evergreen_asic {
@@ -1343,7 +1330,6 @@ struct evergreen_asic {
1343 unsigned tiling_group_size; 1330 unsigned tiling_group_size;
1344 unsigned tile_config; 1331 unsigned tile_config;
1345 unsigned backend_map; 1332 unsigned backend_map;
1346 struct r100_gpu_lockup lockup;
1347}; 1333};
1348 1334
1349struct cayman_asic { 1335struct cayman_asic {
@@ -1382,7 +1368,6 @@ struct cayman_asic {
1382 unsigned multi_gpu_tile_size; 1368 unsigned multi_gpu_tile_size;
1383 1369
1384 unsigned tile_config; 1370 unsigned tile_config;
1385 struct r100_gpu_lockup lockup;
1386}; 1371};
1387 1372
1388struct si_asic { 1373struct si_asic {
@@ -1413,7 +1398,6 @@ struct si_asic {
1413 unsigned multi_gpu_tile_size; 1398 unsigned multi_gpu_tile_size;
1414 1399
1415 unsigned tile_config; 1400 unsigned tile_config;
1416 struct r100_gpu_lockup lockup;
1417}; 1401};
1418 1402
1419union radeon_asic_config { 1403union radeon_asic_config {
@@ -1516,11 +1500,12 @@ struct radeon_device {
1516 struct radeon_mode_info mode_info; 1500 struct radeon_mode_info mode_info;
1517 struct radeon_scratch scratch; 1501 struct radeon_scratch scratch;
1518 struct radeon_mman mman; 1502 struct radeon_mman mman;
1519 rwlock_t fence_lock;
1520 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 1503 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
1521 struct radeon_semaphore_driver semaphore_drv; 1504 wait_queue_head_t fence_queue;
1505 struct mutex ring_lock;
1522 struct radeon_ring ring[RADEON_NUM_RINGS]; 1506 struct radeon_ring ring[RADEON_NUM_RINGS];
1523 struct radeon_ib_pool ib_pool; 1507 bool ib_pool_ready;
1508 struct radeon_sa_manager ring_tmp_bo;
1524 struct radeon_irq irq; 1509 struct radeon_irq irq;
1525 struct radeon_asic *asic; 1510 struct radeon_asic *asic;
1526 struct radeon_gem gem; 1511 struct radeon_gem gem;
@@ -1529,7 +1514,6 @@ struct radeon_device {
1529 struct radeon_mutex cs_mutex; 1514 struct radeon_mutex cs_mutex;
1530 struct radeon_wb wb; 1515 struct radeon_wb wb;
1531 struct radeon_dummy_page dummy_page; 1516 struct radeon_dummy_page dummy_page;
1532 bool gpu_lockup;
1533 bool shutdown; 1517 bool shutdown;
1534 bool suspend; 1518 bool suspend;
1535 bool need_dma32; 1519 bool need_dma32;
@@ -1546,19 +1530,12 @@ struct radeon_device {
1546 struct r600_ih ih; /* r6/700 interrupt ring */ 1530 struct r600_ih ih; /* r6/700 interrupt ring */
1547 struct si_rlc rlc; 1531 struct si_rlc rlc;
1548 struct work_struct hotplug_work; 1532 struct work_struct hotplug_work;
1533 struct work_struct audio_work;
1549 int num_crtc; /* number of crtcs */ 1534 int num_crtc; /* number of crtcs */
1550 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1535 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1551 struct mutex vram_mutex; 1536 struct mutex vram_mutex;
1552 1537 bool audio_enabled;
1553 /* audio stuff */ 1538 struct r600_audio audio_status; /* audio stuff */
1554 bool audio_enabled;
1555 struct timer_list audio_timer;
1556 int audio_channels;
1557 int audio_rate;
1558 int audio_bits_per_sample;
1559 uint8_t audio_status_bits;
1560 uint8_t audio_category_code;
1561
1562 struct notifier_block acpi_nb; 1539 struct notifier_block acpi_nb;
1563 /* only one userspace can use Hyperz features or CMASK at a time */ 1540 /* only one userspace can use Hyperz features or CMASK at a time */
1564 struct drm_file *hyperz_filp; 1541 struct drm_file *hyperz_filp;
@@ -1730,7 +1707,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1730#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1707#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1731#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 1708#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
1732#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1709#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1733#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
1734#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1710#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1735#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1711#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1736#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1712#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
@@ -1739,6 +1715,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1739#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1715#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1740#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1716#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1741#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1717#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1718#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1742#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1719#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1743#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1720#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1744#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1721#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1828,6 +1805,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
1828 struct radeon_vm *vm, 1805 struct radeon_vm *vm,
1829 struct radeon_bo *bo); 1806 struct radeon_bo *bo);
1830 1807
1808/* audio */
1809void r600_audio_update_hdmi(struct work_struct *work);
1831 1810
1832/* 1811/*
1833 * R600 vram scratch functions 1812 * R600 vram scratch functions
@@ -1848,10 +1827,32 @@ int r600_fmt_get_nblocksy(u32 format, u32 h);
1848/* 1827/*
1849 * r600 functions used by radeon_encoder.c 1828 * r600 functions used by radeon_encoder.c
1850 */ 1829 */
1830struct radeon_hdmi_acr {
1831 u32 clock;
1832
1833 int n_32khz;
1834 int cts_32khz;
1835
1836 int n_44_1khz;
1837 int cts_44_1khz;
1838
1839 int n_48khz;
1840 int cts_48khz;
1841
1842};
1843
1844extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1845
1851extern void r600_hdmi_enable(struct drm_encoder *encoder); 1846extern void r600_hdmi_enable(struct drm_encoder *encoder);
1852extern void r600_hdmi_disable(struct drm_encoder *encoder); 1847extern void r600_hdmi_disable(struct drm_encoder *encoder);
1853extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1848extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1854 1849
1850/*
1851 * evergreen functions used by radeon_encoder.c
1852 */
1853
1854extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1855
1855extern int ni_init_microcode(struct radeon_device *rdev); 1856extern int ni_init_microcode(struct radeon_device *rdev);
1856extern int ni_mc_load_microcode(struct radeon_device *rdev); 1857extern int ni_mc_load_microcode(struct radeon_device *rdev);
1857 1858
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be4dc2ff0e40..f533df5f7d50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,6 @@ static struct radeon_asic r100_asic = {
134 .suspend = &r100_suspend, 134 .suspend = &r100_suspend,
135 .resume = &r100_resume, 135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state, 136 .vga_set_state = &r100_vga_set_state,
137 .gpu_is_lockup = &r100_gpu_is_lockup,
138 .asic_reset = &r100_asic_reset, 137 .asic_reset = &r100_asic_reset,
139 .ioctl_wait_idle = NULL, 138 .ioctl_wait_idle = NULL,
140 .gui_idle = &r100_gui_idle, 139 .gui_idle = &r100_gui_idle,
@@ -152,6 +151,7 @@ static struct radeon_asic r100_asic = {
152 .ring_start = &r100_ring_start, 151 .ring_start = &r100_ring_start,
153 .ring_test = &r100_ring_test, 152 .ring_test = &r100_ring_test,
154 .ib_test = &r100_ib_test, 153 .ib_test = &r100_ib_test,
154 .is_lockup = &r100_gpu_is_lockup,
155 } 155 }
156 }, 156 },
157 .irq = { 157 .irq = {
@@ -208,7 +208,6 @@ static struct radeon_asic r200_asic = {
208 .suspend = &r100_suspend, 208 .suspend = &r100_suspend,
209 .resume = &r100_resume, 209 .resume = &r100_resume,
210 .vga_set_state = &r100_vga_set_state, 210 .vga_set_state = &r100_vga_set_state,
211 .gpu_is_lockup = &r100_gpu_is_lockup,
212 .asic_reset = &r100_asic_reset, 211 .asic_reset = &r100_asic_reset,
213 .ioctl_wait_idle = NULL, 212 .ioctl_wait_idle = NULL,
214 .gui_idle = &r100_gui_idle, 213 .gui_idle = &r100_gui_idle,
@@ -226,6 +225,7 @@ static struct radeon_asic r200_asic = {
226 .ring_start = &r100_ring_start, 225 .ring_start = &r100_ring_start,
227 .ring_test = &r100_ring_test, 226 .ring_test = &r100_ring_test,
228 .ib_test = &r100_ib_test, 227 .ib_test = &r100_ib_test,
228 .is_lockup = &r100_gpu_is_lockup,
229 } 229 }
230 }, 230 },
231 .irq = { 231 .irq = {
@@ -282,7 +282,6 @@ static struct radeon_asic r300_asic = {
282 .suspend = &r300_suspend, 282 .suspend = &r300_suspend,
283 .resume = &r300_resume, 283 .resume = &r300_resume,
284 .vga_set_state = &r100_vga_set_state, 284 .vga_set_state = &r100_vga_set_state,
285 .gpu_is_lockup = &r300_gpu_is_lockup,
286 .asic_reset = &r300_asic_reset, 285 .asic_reset = &r300_asic_reset,
287 .ioctl_wait_idle = NULL, 286 .ioctl_wait_idle = NULL,
288 .gui_idle = &r100_gui_idle, 287 .gui_idle = &r100_gui_idle,
@@ -300,6 +299,7 @@ static struct radeon_asic r300_asic = {
300 .ring_start = &r300_ring_start, 299 .ring_start = &r300_ring_start,
301 .ring_test = &r100_ring_test, 300 .ring_test = &r100_ring_test,
302 .ib_test = &r100_ib_test, 301 .ib_test = &r100_ib_test,
302 .is_lockup = &r100_gpu_is_lockup,
303 } 303 }
304 }, 304 },
305 .irq = { 305 .irq = {
@@ -356,7 +356,6 @@ static struct radeon_asic r300_asic_pcie = {
356 .suspend = &r300_suspend, 356 .suspend = &r300_suspend,
357 .resume = &r300_resume, 357 .resume = &r300_resume,
358 .vga_set_state = &r100_vga_set_state, 358 .vga_set_state = &r100_vga_set_state,
359 .gpu_is_lockup = &r300_gpu_is_lockup,
360 .asic_reset = &r300_asic_reset, 359 .asic_reset = &r300_asic_reset,
361 .ioctl_wait_idle = NULL, 360 .ioctl_wait_idle = NULL,
362 .gui_idle = &r100_gui_idle, 361 .gui_idle = &r100_gui_idle,
@@ -374,6 +373,7 @@ static struct radeon_asic r300_asic_pcie = {
374 .ring_start = &r300_ring_start, 373 .ring_start = &r300_ring_start,
375 .ring_test = &r100_ring_test, 374 .ring_test = &r100_ring_test,
376 .ib_test = &r100_ib_test, 375 .ib_test = &r100_ib_test,
376 .is_lockup = &r100_gpu_is_lockup,
377 } 377 }
378 }, 378 },
379 .irq = { 379 .irq = {
@@ -430,7 +430,6 @@ static struct radeon_asic r420_asic = {
430 .suspend = &r420_suspend, 430 .suspend = &r420_suspend,
431 .resume = &r420_resume, 431 .resume = &r420_resume,
432 .vga_set_state = &r100_vga_set_state, 432 .vga_set_state = &r100_vga_set_state,
433 .gpu_is_lockup = &r300_gpu_is_lockup,
434 .asic_reset = &r300_asic_reset, 433 .asic_reset = &r300_asic_reset,
435 .ioctl_wait_idle = NULL, 434 .ioctl_wait_idle = NULL,
436 .gui_idle = &r100_gui_idle, 435 .gui_idle = &r100_gui_idle,
@@ -448,6 +447,7 @@ static struct radeon_asic r420_asic = {
448 .ring_start = &r300_ring_start, 447 .ring_start = &r300_ring_start,
449 .ring_test = &r100_ring_test, 448 .ring_test = &r100_ring_test,
450 .ib_test = &r100_ib_test, 449 .ib_test = &r100_ib_test,
450 .is_lockup = &r100_gpu_is_lockup,
451 } 451 }
452 }, 452 },
453 .irq = { 453 .irq = {
@@ -504,7 +504,6 @@ static struct radeon_asic rs400_asic = {
504 .suspend = &rs400_suspend, 504 .suspend = &rs400_suspend,
505 .resume = &rs400_resume, 505 .resume = &rs400_resume,
506 .vga_set_state = &r100_vga_set_state, 506 .vga_set_state = &r100_vga_set_state,
507 .gpu_is_lockup = &r300_gpu_is_lockup,
508 .asic_reset = &r300_asic_reset, 507 .asic_reset = &r300_asic_reset,
509 .ioctl_wait_idle = NULL, 508 .ioctl_wait_idle = NULL,
510 .gui_idle = &r100_gui_idle, 509 .gui_idle = &r100_gui_idle,
@@ -522,6 +521,7 @@ static struct radeon_asic rs400_asic = {
522 .ring_start = &r300_ring_start, 521 .ring_start = &r300_ring_start,
523 .ring_test = &r100_ring_test, 522 .ring_test = &r100_ring_test,
524 .ib_test = &r100_ib_test, 523 .ib_test = &r100_ib_test,
524 .is_lockup = &r100_gpu_is_lockup,
525 } 525 }
526 }, 526 },
527 .irq = { 527 .irq = {
@@ -578,7 +578,6 @@ static struct radeon_asic rs600_asic = {
578 .suspend = &rs600_suspend, 578 .suspend = &rs600_suspend,
579 .resume = &rs600_resume, 579 .resume = &rs600_resume,
580 .vga_set_state = &r100_vga_set_state, 580 .vga_set_state = &r100_vga_set_state,
581 .gpu_is_lockup = &r300_gpu_is_lockup,
582 .asic_reset = &rs600_asic_reset, 581 .asic_reset = &rs600_asic_reset,
583 .ioctl_wait_idle = NULL, 582 .ioctl_wait_idle = NULL,
584 .gui_idle = &r100_gui_idle, 583 .gui_idle = &r100_gui_idle,
@@ -596,6 +595,7 @@ static struct radeon_asic rs600_asic = {
596 .ring_start = &r300_ring_start, 595 .ring_start = &r300_ring_start,
597 .ring_test = &r100_ring_test, 596 .ring_test = &r100_ring_test,
598 .ib_test = &r100_ib_test, 597 .ib_test = &r100_ib_test,
598 .is_lockup = &r100_gpu_is_lockup,
599 } 599 }
600 }, 600 },
601 .irq = { 601 .irq = {
@@ -652,7 +652,6 @@ static struct radeon_asic rs690_asic = {
652 .suspend = &rs690_suspend, 652 .suspend = &rs690_suspend,
653 .resume = &rs690_resume, 653 .resume = &rs690_resume,
654 .vga_set_state = &r100_vga_set_state, 654 .vga_set_state = &r100_vga_set_state,
655 .gpu_is_lockup = &r300_gpu_is_lockup,
656 .asic_reset = &rs600_asic_reset, 655 .asic_reset = &rs600_asic_reset,
657 .ioctl_wait_idle = NULL, 656 .ioctl_wait_idle = NULL,
658 .gui_idle = &r100_gui_idle, 657 .gui_idle = &r100_gui_idle,
@@ -670,6 +669,7 @@ static struct radeon_asic rs690_asic = {
670 .ring_start = &r300_ring_start, 669 .ring_start = &r300_ring_start,
671 .ring_test = &r100_ring_test, 670 .ring_test = &r100_ring_test,
672 .ib_test = &r100_ib_test, 671 .ib_test = &r100_ib_test,
672 .is_lockup = &r100_gpu_is_lockup,
673 } 673 }
674 }, 674 },
675 .irq = { 675 .irq = {
@@ -726,7 +726,6 @@ static struct radeon_asic rv515_asic = {
726 .suspend = &rv515_suspend, 726 .suspend = &rv515_suspend,
727 .resume = &rv515_resume, 727 .resume = &rv515_resume,
728 .vga_set_state = &r100_vga_set_state, 728 .vga_set_state = &r100_vga_set_state,
729 .gpu_is_lockup = &r300_gpu_is_lockup,
730 .asic_reset = &rs600_asic_reset, 729 .asic_reset = &rs600_asic_reset,
731 .ioctl_wait_idle = NULL, 730 .ioctl_wait_idle = NULL,
732 .gui_idle = &r100_gui_idle, 731 .gui_idle = &r100_gui_idle,
@@ -744,6 +743,7 @@ static struct radeon_asic rv515_asic = {
744 .ring_start = &rv515_ring_start, 743 .ring_start = &rv515_ring_start,
745 .ring_test = &r100_ring_test, 744 .ring_test = &r100_ring_test,
746 .ib_test = &r100_ib_test, 745 .ib_test = &r100_ib_test,
746 .is_lockup = &r100_gpu_is_lockup,
747 } 747 }
748 }, 748 },
749 .irq = { 749 .irq = {
@@ -800,7 +800,6 @@ static struct radeon_asic r520_asic = {
800 .suspend = &rv515_suspend, 800 .suspend = &rv515_suspend,
801 .resume = &r520_resume, 801 .resume = &r520_resume,
802 .vga_set_state = &r100_vga_set_state, 802 .vga_set_state = &r100_vga_set_state,
803 .gpu_is_lockup = &r300_gpu_is_lockup,
804 .asic_reset = &rs600_asic_reset, 803 .asic_reset = &rs600_asic_reset,
805 .ioctl_wait_idle = NULL, 804 .ioctl_wait_idle = NULL,
806 .gui_idle = &r100_gui_idle, 805 .gui_idle = &r100_gui_idle,
@@ -818,6 +817,7 @@ static struct radeon_asic r520_asic = {
818 .ring_start = &rv515_ring_start, 817 .ring_start = &rv515_ring_start,
819 .ring_test = &r100_ring_test, 818 .ring_test = &r100_ring_test,
820 .ib_test = &r100_ib_test, 819 .ib_test = &r100_ib_test,
820 .is_lockup = &r100_gpu_is_lockup,
821 } 821 }
822 }, 822 },
823 .irq = { 823 .irq = {
@@ -874,7 +874,6 @@ static struct radeon_asic r600_asic = {
874 .suspend = &r600_suspend, 874 .suspend = &r600_suspend,
875 .resume = &r600_resume, 875 .resume = &r600_resume,
876 .vga_set_state = &r600_vga_set_state, 876 .vga_set_state = &r600_vga_set_state,
877 .gpu_is_lockup = &r600_gpu_is_lockup,
878 .asic_reset = &r600_asic_reset, 877 .asic_reset = &r600_asic_reset,
879 .ioctl_wait_idle = r600_ioctl_wait_idle, 878 .ioctl_wait_idle = r600_ioctl_wait_idle,
880 .gui_idle = &r600_gui_idle, 879 .gui_idle = &r600_gui_idle,
@@ -891,6 +890,7 @@ static struct radeon_asic r600_asic = {
891 .cs_parse = &r600_cs_parse, 890 .cs_parse = &r600_cs_parse,
892 .ring_test = &r600_ring_test, 891 .ring_test = &r600_ring_test,
893 .ib_test = &r600_ib_test, 892 .ib_test = &r600_ib_test,
893 .is_lockup = &r600_gpu_is_lockup,
894 } 894 }
895 }, 895 },
896 .irq = { 896 .irq = {
@@ -946,7 +946,6 @@ static struct radeon_asic rs780_asic = {
946 .fini = &r600_fini, 946 .fini = &r600_fini,
947 .suspend = &r600_suspend, 947 .suspend = &r600_suspend,
948 .resume = &r600_resume, 948 .resume = &r600_resume,
949 .gpu_is_lockup = &r600_gpu_is_lockup,
950 .vga_set_state = &r600_vga_set_state, 949 .vga_set_state = &r600_vga_set_state,
951 .asic_reset = &r600_asic_reset, 950 .asic_reset = &r600_asic_reset,
952 .ioctl_wait_idle = r600_ioctl_wait_idle, 951 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -964,6 +963,7 @@ static struct radeon_asic rs780_asic = {
964 .cs_parse = &r600_cs_parse, 963 .cs_parse = &r600_cs_parse,
965 .ring_test = &r600_ring_test, 964 .ring_test = &r600_ring_test,
966 .ib_test = &r600_ib_test, 965 .ib_test = &r600_ib_test,
966 .is_lockup = &r600_gpu_is_lockup,
967 } 967 }
968 }, 968 },
969 .irq = { 969 .irq = {
@@ -1020,7 +1020,6 @@ static struct radeon_asic rv770_asic = {
1020 .suspend = &rv770_suspend, 1020 .suspend = &rv770_suspend,
1021 .resume = &rv770_resume, 1021 .resume = &rv770_resume,
1022 .asic_reset = &r600_asic_reset, 1022 .asic_reset = &r600_asic_reset,
1023 .gpu_is_lockup = &r600_gpu_is_lockup,
1024 .vga_set_state = &r600_vga_set_state, 1023 .vga_set_state = &r600_vga_set_state,
1025 .ioctl_wait_idle = r600_ioctl_wait_idle, 1024 .ioctl_wait_idle = r600_ioctl_wait_idle,
1026 .gui_idle = &r600_gui_idle, 1025 .gui_idle = &r600_gui_idle,
@@ -1037,6 +1036,7 @@ static struct radeon_asic rv770_asic = {
1037 .cs_parse = &r600_cs_parse, 1036 .cs_parse = &r600_cs_parse,
1038 .ring_test = &r600_ring_test, 1037 .ring_test = &r600_ring_test,
1039 .ib_test = &r600_ib_test, 1038 .ib_test = &r600_ib_test,
1039 .is_lockup = &r600_gpu_is_lockup,
1040 } 1040 }
1041 }, 1041 },
1042 .irq = { 1042 .irq = {
@@ -1092,7 +1092,6 @@ static struct radeon_asic evergreen_asic = {
1092 .fini = &evergreen_fini, 1092 .fini = &evergreen_fini,
1093 .suspend = &evergreen_suspend, 1093 .suspend = &evergreen_suspend,
1094 .resume = &evergreen_resume, 1094 .resume = &evergreen_resume,
1095 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1096 .asic_reset = &evergreen_asic_reset, 1095 .asic_reset = &evergreen_asic_reset,
1097 .vga_set_state = &r600_vga_set_state, 1096 .vga_set_state = &r600_vga_set_state,
1098 .ioctl_wait_idle = r600_ioctl_wait_idle, 1097 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1110,6 +1109,7 @@ static struct radeon_asic evergreen_asic = {
1110 .cs_parse = &evergreen_cs_parse, 1109 .cs_parse = &evergreen_cs_parse,
1111 .ring_test = &r600_ring_test, 1110 .ring_test = &r600_ring_test,
1112 .ib_test = &r600_ib_test, 1111 .ib_test = &r600_ib_test,
1112 .is_lockup = &evergreen_gpu_is_lockup,
1113 } 1113 }
1114 }, 1114 },
1115 .irq = { 1115 .irq = {
@@ -1165,7 +1165,6 @@ static struct radeon_asic sumo_asic = {
1165 .fini = &evergreen_fini, 1165 .fini = &evergreen_fini,
1166 .suspend = &evergreen_suspend, 1166 .suspend = &evergreen_suspend,
1167 .resume = &evergreen_resume, 1167 .resume = &evergreen_resume,
1168 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1169 .asic_reset = &evergreen_asic_reset, 1168 .asic_reset = &evergreen_asic_reset,
1170 .vga_set_state = &r600_vga_set_state, 1169 .vga_set_state = &r600_vga_set_state,
1171 .ioctl_wait_idle = r600_ioctl_wait_idle, 1170 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1183,6 +1182,7 @@ static struct radeon_asic sumo_asic = {
1183 .cs_parse = &evergreen_cs_parse, 1182 .cs_parse = &evergreen_cs_parse,
1184 .ring_test = &r600_ring_test, 1183 .ring_test = &r600_ring_test,
1185 .ib_test = &r600_ib_test, 1184 .ib_test = &r600_ib_test,
1185 .is_lockup = &evergreen_gpu_is_lockup,
1186 }, 1186 },
1187 }, 1187 },
1188 .irq = { 1188 .irq = {
@@ -1238,7 +1238,6 @@ static struct radeon_asic btc_asic = {
1238 .fini = &evergreen_fini, 1238 .fini = &evergreen_fini,
1239 .suspend = &evergreen_suspend, 1239 .suspend = &evergreen_suspend,
1240 .resume = &evergreen_resume, 1240 .resume = &evergreen_resume,
1241 .gpu_is_lockup = &evergreen_gpu_is_lockup,
1242 .asic_reset = &evergreen_asic_reset, 1241 .asic_reset = &evergreen_asic_reset,
1243 .vga_set_state = &r600_vga_set_state, 1242 .vga_set_state = &r600_vga_set_state,
1244 .ioctl_wait_idle = r600_ioctl_wait_idle, 1243 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1256,6 +1255,7 @@ static struct radeon_asic btc_asic = {
1256 .cs_parse = &evergreen_cs_parse, 1255 .cs_parse = &evergreen_cs_parse,
1257 .ring_test = &r600_ring_test, 1256 .ring_test = &r600_ring_test,
1258 .ib_test = &r600_ib_test, 1257 .ib_test = &r600_ib_test,
1258 .is_lockup = &evergreen_gpu_is_lockup,
1259 } 1259 }
1260 }, 1260 },
1261 .irq = { 1261 .irq = {
@@ -1321,7 +1321,6 @@ static struct radeon_asic cayman_asic = {
1321 .fini = &cayman_fini, 1321 .fini = &cayman_fini,
1322 .suspend = &cayman_suspend, 1322 .suspend = &cayman_suspend,
1323 .resume = &cayman_resume, 1323 .resume = &cayman_resume,
1324 .gpu_is_lockup = &cayman_gpu_is_lockup,
1325 .asic_reset = &cayman_asic_reset, 1324 .asic_reset = &cayman_asic_reset,
1326 .vga_set_state = &r600_vga_set_state, 1325 .vga_set_state = &r600_vga_set_state,
1327 .ioctl_wait_idle = r600_ioctl_wait_idle, 1326 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1340,6 +1339,7 @@ static struct radeon_asic cayman_asic = {
1340 .cs_parse = &evergreen_cs_parse, 1339 .cs_parse = &evergreen_cs_parse,
1341 .ring_test = &r600_ring_test, 1340 .ring_test = &r600_ring_test,
1342 .ib_test = &r600_ib_test, 1341 .ib_test = &r600_ib_test,
1342 .is_lockup = &evergreen_gpu_is_lockup,
1343 }, 1343 },
1344 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1344 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1345 .ib_execute = &cayman_ring_ib_execute, 1345 .ib_execute = &cayman_ring_ib_execute,
@@ -1349,6 +1349,7 @@ static struct radeon_asic cayman_asic = {
1349 .cs_parse = &evergreen_cs_parse, 1349 .cs_parse = &evergreen_cs_parse,
1350 .ring_test = &r600_ring_test, 1350 .ring_test = &r600_ring_test,
1351 .ib_test = &r600_ib_test, 1351 .ib_test = &r600_ib_test,
1352 .is_lockup = &evergreen_gpu_is_lockup,
1352 }, 1353 },
1353 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1354 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1354 .ib_execute = &cayman_ring_ib_execute, 1355 .ib_execute = &cayman_ring_ib_execute,
@@ -1358,6 +1359,7 @@ static struct radeon_asic cayman_asic = {
1358 .cs_parse = &evergreen_cs_parse, 1359 .cs_parse = &evergreen_cs_parse,
1359 .ring_test = &r600_ring_test, 1360 .ring_test = &r600_ring_test,
1360 .ib_test = &r600_ib_test, 1361 .ib_test = &r600_ib_test,
1362 .is_lockup = &evergreen_gpu_is_lockup,
1361 } 1363 }
1362 }, 1364 },
1363 .irq = { 1365 .irq = {
@@ -1413,7 +1415,6 @@ static struct radeon_asic trinity_asic = {
1413 .fini = &cayman_fini, 1415 .fini = &cayman_fini,
1414 .suspend = &cayman_suspend, 1416 .suspend = &cayman_suspend,
1415 .resume = &cayman_resume, 1417 .resume = &cayman_resume,
1416 .gpu_is_lockup = &cayman_gpu_is_lockup,
1417 .asic_reset = &cayman_asic_reset, 1418 .asic_reset = &cayman_asic_reset,
1418 .vga_set_state = &r600_vga_set_state, 1419 .vga_set_state = &r600_vga_set_state,
1419 .ioctl_wait_idle = r600_ioctl_wait_idle, 1420 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1432,6 +1433,7 @@ static struct radeon_asic trinity_asic = {
1432 .cs_parse = &evergreen_cs_parse, 1433 .cs_parse = &evergreen_cs_parse,
1433 .ring_test = &r600_ring_test, 1434 .ring_test = &r600_ring_test,
1434 .ib_test = &r600_ib_test, 1435 .ib_test = &r600_ib_test,
1436 .is_lockup = &evergreen_gpu_is_lockup,
1435 }, 1437 },
1436 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1438 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1437 .ib_execute = &cayman_ring_ib_execute, 1439 .ib_execute = &cayman_ring_ib_execute,
@@ -1441,6 +1443,7 @@ static struct radeon_asic trinity_asic = {
1441 .cs_parse = &evergreen_cs_parse, 1443 .cs_parse = &evergreen_cs_parse,
1442 .ring_test = &r600_ring_test, 1444 .ring_test = &r600_ring_test,
1443 .ib_test = &r600_ib_test, 1445 .ib_test = &r600_ib_test,
1446 .is_lockup = &evergreen_gpu_is_lockup,
1444 }, 1447 },
1445 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1448 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1446 .ib_execute = &cayman_ring_ib_execute, 1449 .ib_execute = &cayman_ring_ib_execute,
@@ -1450,6 +1453,7 @@ static struct radeon_asic trinity_asic = {
1450 .cs_parse = &evergreen_cs_parse, 1453 .cs_parse = &evergreen_cs_parse,
1451 .ring_test = &r600_ring_test, 1454 .ring_test = &r600_ring_test,
1452 .ib_test = &r600_ib_test, 1455 .ib_test = &r600_ib_test,
1456 .is_lockup = &evergreen_gpu_is_lockup,
1453 } 1457 }
1454 }, 1458 },
1455 .irq = { 1459 .irq = {
@@ -1515,7 +1519,6 @@ static struct radeon_asic si_asic = {
1515 .fini = &si_fini, 1519 .fini = &si_fini,
1516 .suspend = &si_suspend, 1520 .suspend = &si_suspend,
1517 .resume = &si_resume, 1521 .resume = &si_resume,
1518 .gpu_is_lockup = &si_gpu_is_lockup,
1519 .asic_reset = &si_asic_reset, 1522 .asic_reset = &si_asic_reset,
1520 .vga_set_state = &r600_vga_set_state, 1523 .vga_set_state = &r600_vga_set_state,
1521 .ioctl_wait_idle = r600_ioctl_wait_idle, 1524 .ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1534,6 +1537,7 @@ static struct radeon_asic si_asic = {
1534 .cs_parse = NULL, 1537 .cs_parse = NULL,
1535 .ring_test = &r600_ring_test, 1538 .ring_test = &r600_ring_test,
1536 .ib_test = &r600_ib_test, 1539 .ib_test = &r600_ib_test,
1540 .is_lockup = &si_gpu_is_lockup,
1537 }, 1541 },
1538 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1542 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1539 .ib_execute = &si_ring_ib_execute, 1543 .ib_execute = &si_ring_ib_execute,
@@ -1543,6 +1547,7 @@ static struct radeon_asic si_asic = {
1543 .cs_parse = NULL, 1547 .cs_parse = NULL,
1544 .ring_test = &r600_ring_test, 1548 .ring_test = &r600_ring_test,
1545 .ib_test = &r600_ib_test, 1549 .ib_test = &r600_ib_test,
1550 .is_lockup = &si_gpu_is_lockup,
1546 }, 1551 },
1547 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1552 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1548 .ib_execute = &si_ring_ib_execute, 1553 .ib_execute = &si_ring_ib_execute,
@@ -1552,6 +1557,7 @@ static struct radeon_asic si_asic = {
1552 .cs_parse = NULL, 1557 .cs_parse = NULL,
1553 .ring_test = &r600_ring_test, 1558 .ring_test = &r600_ring_test,
1554 .ib_test = &r600_ib_test, 1559 .ib_test = &r600_ib_test,
1560 .is_lockup = &si_gpu_is_lockup,
1555 } 1561 }
1556 }, 1562 },
1557 .irq = { 1563 .irq = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d9f9f1d8f90..e76a941ef14e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
103void r100_pci_gart_disable(struct radeon_device *rdev); 103void r100_pci_gart_disable(struct radeon_device *rdev);
104int r100_debugfs_mc_info_init(struct radeon_device *rdev); 104int r100_debugfs_mc_info_init(struct radeon_device *rdev);
105int r100_gui_wait_for_idle(struct radeon_device *rdev); 105int r100_gui_wait_for_idle(struct radeon_device *rdev);
106void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
107 struct radeon_ring *cp);
108bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
109 struct r100_gpu_lockup *lockup,
110 struct radeon_ring *cp);
111void r100_ib_fini(struct radeon_device *rdev); 106void r100_ib_fini(struct radeon_device *rdev);
112int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 107int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
113void r100_irq_disable(struct radeon_device *rdev); 108void r100_irq_disable(struct radeon_device *rdev);
@@ -159,7 +154,6 @@ extern int r300_init(struct radeon_device *rdev);
159extern void r300_fini(struct radeon_device *rdev); 154extern void r300_fini(struct radeon_device *rdev);
160extern int r300_suspend(struct radeon_device *rdev); 155extern int r300_suspend(struct radeon_device *rdev);
161extern int r300_resume(struct radeon_device *rdev); 156extern int r300_resume(struct radeon_device *rdev);
162extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
163extern int r300_asic_reset(struct radeon_device *rdev); 157extern int r300_asic_reset(struct radeon_device *rdev);
164extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 158extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
165extern void r300_fence_ring_emit(struct radeon_device *rdev, 159extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -362,26 +356,20 @@ void r600_disable_interrupts(struct radeon_device *rdev);
362void r600_rlc_stop(struct radeon_device *rdev); 356void r600_rlc_stop(struct radeon_device *rdev);
363/* r600 audio */ 357/* r600 audio */
364int r600_audio_init(struct radeon_device *rdev); 358int r600_audio_init(struct radeon_device *rdev);
365int r600_audio_tmds_index(struct drm_encoder *encoder);
366void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 359void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
367int r600_audio_channels(struct radeon_device *rdev); 360struct r600_audio r600_audio_status(struct radeon_device *rdev);
368int r600_audio_bits_per_sample(struct radeon_device *rdev);
369int r600_audio_rate(struct radeon_device *rdev);
370uint8_t r600_audio_status_bits(struct radeon_device *rdev);
371uint8_t r600_audio_category_code(struct radeon_device *rdev);
372void r600_audio_schedule_polling(struct radeon_device *rdev);
373void r600_audio_enable_polling(struct drm_encoder *encoder);
374void r600_audio_disable_polling(struct drm_encoder *encoder);
375void r600_audio_fini(struct radeon_device *rdev); 361void r600_audio_fini(struct radeon_device *rdev);
376void r600_hdmi_init(struct drm_encoder *encoder);
377int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 362int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
378void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 363void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
379/* r600 blit */ 364/* r600 blit */
380int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages); 365int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
381void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); 366 struct radeon_sa_bo **vb);
367void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
368 struct radeon_sa_bo *vb);
382void r600_kms_blit_copy(struct radeon_device *rdev, 369void r600_kms_blit_copy(struct radeon_device *rdev,
383 u64 src_gpu_addr, u64 dst_gpu_addr, 370 u64 src_gpu_addr, u64 dst_gpu_addr,
384 unsigned num_gpu_pages); 371 unsigned num_gpu_pages,
372 struct radeon_sa_bo *vb);
385int r600_mc_wait_for_idle(struct radeon_device *rdev); 373int r600_mc_wait_for_idle(struct radeon_device *rdev);
386 374
387/* 375/*
@@ -446,7 +434,6 @@ int cayman_init(struct radeon_device *rdev);
446void cayman_fini(struct radeon_device *rdev); 434void cayman_fini(struct radeon_device *rdev);
447int cayman_suspend(struct radeon_device *rdev); 435int cayman_suspend(struct radeon_device *rdev);
448int cayman_resume(struct radeon_device *rdev); 436int cayman_resume(struct radeon_device *rdev);
449bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
450int cayman_asic_reset(struct radeon_device *rdev); 437int cayman_asic_reset(struct radeon_device *rdev);
451void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 438void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
452int cayman_vm_init(struct radeon_device *rdev); 439int cayman_vm_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index fef7b722b05d..364f5b1a04b9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -103,7 +103,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
103 int time; 103 int time;
104 104
105 n = RADEON_BENCHMARK_ITERATIONS; 105 n = RADEON_BENCHMARK_ITERATIONS;
106 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 106 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
107 if (r) { 107 if (r) {
108 goto out_cleanup; 108 goto out_cleanup;
109 } 109 }
@@ -115,7 +115,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
115 if (r) { 115 if (r) {
116 goto out_cleanup; 116 goto out_cleanup;
117 } 117 }
118 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj); 118 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
119 if (r) { 119 if (r) {
120 goto out_cleanup; 120 goto out_cleanup;
121 } 121 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2cad9fde92fc..576f4f6919f2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1561,6 +1561,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1561 (rdev->pdev->subsystem_device == 0x4150)) { 1561 (rdev->pdev->subsystem_device == 0x4150)) {
1562 /* Mac G5 tower 9600 */ 1562 /* Mac G5 tower 9600 */
1563 rdev->mode_info.connector_table = CT_MAC_G5_9600; 1563 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1564 } else if ((rdev->pdev->device == 0x4c66) &&
1565 (rdev->pdev->subsystem_vendor == 0x1002) &&
1566 (rdev->pdev->subsystem_device == 0x4c66)) {
1567 /* SAM440ep RV250 embedded board */
1568 rdev->mode_info.connector_table = CT_SAM440EP;
1564 } else 1569 } else
1565#endif /* CONFIG_PPC_PMAC */ 1570#endif /* CONFIG_PPC_PMAC */
1566#ifdef CONFIG_PPC64 1571#ifdef CONFIG_PPC64
@@ -2134,6 +2139,67 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2134 CONNECTOR_OBJECT_ID_SVIDEO, 2139 CONNECTOR_OBJECT_ID_SVIDEO,
2135 &hpd); 2140 &hpd);
2136 break; 2141 break;
2142 case CT_SAM440EP:
2143 DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
2144 rdev->mode_info.connector_table);
2145 /* LVDS */
2146 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
2147 hpd.hpd = RADEON_HPD_NONE;
2148 radeon_add_legacy_encoder(dev,
2149 radeon_get_encoder_enum(dev,
2150 ATOM_DEVICE_LCD1_SUPPORT,
2151 0),
2152 ATOM_DEVICE_LCD1_SUPPORT);
2153 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
2154 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
2155 CONNECTOR_OBJECT_ID_LVDS,
2156 &hpd);
2157 /* DVI-I - secondary dac, int tmds */
2158 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2159 hpd.hpd = RADEON_HPD_1; /* ??? */
2160 radeon_add_legacy_encoder(dev,
2161 radeon_get_encoder_enum(dev,
2162 ATOM_DEVICE_DFP1_SUPPORT,
2163 0),
2164 ATOM_DEVICE_DFP1_SUPPORT);
2165 radeon_add_legacy_encoder(dev,
2166 radeon_get_encoder_enum(dev,
2167 ATOM_DEVICE_CRT2_SUPPORT,
2168 2),
2169 ATOM_DEVICE_CRT2_SUPPORT);
2170 radeon_add_legacy_connector(dev, 1,
2171 ATOM_DEVICE_DFP1_SUPPORT |
2172 ATOM_DEVICE_CRT2_SUPPORT,
2173 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2174 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2175 &hpd);
2176 /* VGA - primary dac */
2177 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2178 hpd.hpd = RADEON_HPD_NONE;
2179 radeon_add_legacy_encoder(dev,
2180 radeon_get_encoder_enum(dev,
2181 ATOM_DEVICE_CRT1_SUPPORT,
2182 1),
2183 ATOM_DEVICE_CRT1_SUPPORT);
2184 radeon_add_legacy_connector(dev, 2,
2185 ATOM_DEVICE_CRT1_SUPPORT,
2186 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2187 CONNECTOR_OBJECT_ID_VGA,
2188 &hpd);
2189 /* TV - TV DAC */
2190 ddc_i2c.valid = false;
2191 hpd.hpd = RADEON_HPD_NONE;
2192 radeon_add_legacy_encoder(dev,
2193 radeon_get_encoder_enum(dev,
2194 ATOM_DEVICE_TV1_SUPPORT,
2195 2),
2196 ATOM_DEVICE_TV1_SUPPORT);
2197 radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
2198 DRM_MODE_CONNECTOR_SVIDEO,
2199 &ddc_i2c,
2200 CONNECTOR_OBJECT_ID_SVIDEO,
2201 &hpd);
2202 break;
2137 default: 2203 default:
2138 DRM_INFO("Connector table: %d (invalid)\n", 2204 DRM_INFO("Connector table: %d (invalid)\n",
2139 rdev->mode_info.connector_table); 2205 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3c2e7a000a2a..2914c5761cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -84,6 +84,62 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
84 crtc->x, crtc->y, crtc->fb); 84 crtc->x, crtc->y, crtc->fb);
85 } 85 }
86} 86}
87
88int radeon_get_monitor_bpc(struct drm_connector *connector)
89{
90 struct drm_device *dev = connector->dev;
91 struct radeon_device *rdev = dev->dev_private;
92 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
93 struct radeon_connector_atom_dig *dig_connector;
94 int bpc = 8;
95
96 switch (connector->connector_type) {
97 case DRM_MODE_CONNECTOR_DVII:
98 case DRM_MODE_CONNECTOR_HDMIB:
99 if (radeon_connector->use_digital) {
100 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
101 if (connector->display_info.bpc)
102 bpc = connector->display_info.bpc;
103 }
104 }
105 break;
106 case DRM_MODE_CONNECTOR_DVID:
107 case DRM_MODE_CONNECTOR_HDMIA:
108 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
109 if (connector->display_info.bpc)
110 bpc = connector->display_info.bpc;
111 }
112 break;
113 case DRM_MODE_CONNECTOR_DisplayPort:
114 dig_connector = radeon_connector->con_priv;
115 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
116 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
117 drm_detect_hdmi_monitor(radeon_connector->edid)) {
118 if (connector->display_info.bpc)
119 bpc = connector->display_info.bpc;
120 }
121 break;
122 case DRM_MODE_CONNECTOR_eDP:
123 case DRM_MODE_CONNECTOR_LVDS:
124 if (connector->display_info.bpc)
125 bpc = connector->display_info.bpc;
126 else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
127 struct drm_connector_helper_funcs *connector_funcs =
128 connector->helper_private;
129 struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
130 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
131 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
132
133 if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
134 bpc = 6;
135 else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
136 bpc = 8;
137 }
138 break;
139 }
140 return bpc;
141}
142
87static void 143static void
88radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) 144radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
89{ 145{
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5cac83278338..c7d64a739033 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,44 +118,33 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
118static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 118static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{ 119{
120 bool sync_to_ring[RADEON_NUM_RINGS] = { }; 120 bool sync_to_ring[RADEON_NUM_RINGS] = { };
121 bool need_sync = false;
121 int i, r; 122 int i, r;
122 123
123 for (i = 0; i < p->nrelocs; i++) { 124 for (i = 0; i < p->nrelocs; i++) {
125 struct radeon_fence *fence;
126
124 if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj) 127 if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
125 continue; 128 continue;
126 129
127 if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) { 130 fence = p->relocs[i].robj->tbo.sync_obj;
128 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj; 131 if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
129 if (!radeon_fence_signaled(fence)) { 132 sync_to_ring[fence->ring] = true;
130 sync_to_ring[fence->ring] = true; 133 need_sync = true;
131 }
132 } 134 }
133 } 135 }
134 136
135 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 137 if (!need_sync) {
136 /* no need to sync to our own or unused rings */ 138 return 0;
137 if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready) 139 }
138 continue;
139
140 if (!p->ib->fence->semaphore) {
141 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
142 if (r)
143 return r;
144 }
145
146 r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
147 if (r)
148 return r;
149 radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
150 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
151 140
152 r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3); 141 r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
153 if (r) 142 if (r) {
154 return r; 143 return r;
155 radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
156 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
157 } 144 }
158 return 0; 145
146 return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
147 sync_to_ring, p->ring);
159} 148}
160 149
161int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -172,6 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
172 /* get chunks */ 161 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 162 INIT_LIST_HEAD(&p->validated);
174 p->idx = 0; 163 p->idx = 0;
164 p->ib.sa_bo = NULL;
165 p->ib.semaphore = NULL;
166 p->const_ib.sa_bo = NULL;
167 p->const_ib.semaphore = NULL;
175 p->chunk_ib_idx = -1; 168 p->chunk_ib_idx = -1;
176 p->chunk_relocs_idx = -1; 169 p->chunk_relocs_idx = -1;
177 p->chunk_flags_idx = -1; 170 p->chunk_flags_idx = -1;
@@ -278,11 +271,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 p->chunks[p->chunk_ib_idx].length_dw); 271 p->chunks[p->chunk_ib_idx].length_dw);
279 return -EINVAL; 272 return -EINVAL;
280 } 273 }
281 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 274 if ((p->rdev->flags & RADEON_IS_AGP)) {
282 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 275 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
283 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 276 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
284 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) 277 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
285 return -ENOMEM; 278 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
279 kfree(p->chunks[i].kpage[0]);
280 kfree(p->chunks[i].kpage[1]);
281 return -ENOMEM;
282 }
283 }
286 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1; 284 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
287 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1; 285 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
288 p->chunks[p->chunk_ib_idx].last_copied_page = -1; 286 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
@@ -305,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
305{ 303{
306 unsigned i; 304 unsigned i;
307 305
308 306 if (!error)
309 if (!error && parser->ib)
310 ttm_eu_fence_buffer_objects(&parser->validated, 307 ttm_eu_fence_buffer_objects(&parser->validated,
311 parser->ib->fence); 308 parser->ib.fence);
312 else 309 else
313 ttm_eu_backoff_reservation(&parser->validated); 310 ttm_eu_backoff_reservation(&parser->validated);
314 311
@@ -323,12 +320,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
323 kfree(parser->relocs_ptr); 320 kfree(parser->relocs_ptr);
324 for (i = 0; i < parser->nchunks; i++) { 321 for (i = 0; i < parser->nchunks; i++) {
325 kfree(parser->chunks[i].kdata); 322 kfree(parser->chunks[i].kdata);
326 kfree(parser->chunks[i].kpage[0]); 323 if ((parser->rdev->flags & RADEON_IS_AGP)) {
327 kfree(parser->chunks[i].kpage[1]); 324 kfree(parser->chunks[i].kpage[0]);
325 kfree(parser->chunks[i].kpage[1]);
326 }
328 } 327 }
329 kfree(parser->chunks); 328 kfree(parser->chunks);
330 kfree(parser->chunks_array); 329 kfree(parser->chunks_array);
331 radeon_ib_free(parser->rdev, &parser->ib); 330 radeon_ib_free(parser->rdev, &parser->ib);
331 radeon_ib_free(parser->rdev, &parser->const_ib);
332} 332}
333 333
334static int radeon_cs_ib_chunk(struct radeon_device *rdev, 334static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -354,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
354 DRM_ERROR("Failed to get ib !\n"); 354 DRM_ERROR("Failed to get ib !\n");
355 return r; 355 return r;
356 } 356 }
357 parser->ib->length_dw = ib_chunk->length_dw; 357 parser->ib.length_dw = ib_chunk->length_dw;
358 r = radeon_cs_parse(rdev, parser->ring, parser); 358 r = radeon_cs_parse(rdev, parser->ring, parser);
359 if (r || parser->parser_error) { 359 if (r || parser->parser_error) {
360 DRM_ERROR("Invalid command stream !\n"); 360 DRM_ERROR("Invalid command stream !\n");
@@ -369,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
369 if (r) { 369 if (r) {
370 DRM_ERROR("Failed to synchronize rings !\n"); 370 DRM_ERROR("Failed to synchronize rings !\n");
371 } 371 }
372 parser->ib->vm_id = 0; 372 parser->ib.vm_id = 0;
373 r = radeon_ib_schedule(rdev, parser->ib); 373 r = radeon_ib_schedule(rdev, &parser->ib);
374 if (r) { 374 if (r) {
375 DRM_ERROR("Failed to schedule IB !\n"); 375 DRM_ERROR("Failed to schedule IB !\n");
376 } 376 }
@@ -421,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
421 DRM_ERROR("Failed to get const ib !\n"); 421 DRM_ERROR("Failed to get const ib !\n");
422 return r; 422 return r;
423 } 423 }
424 parser->const_ib->is_const_ib = true; 424 parser->const_ib.is_const_ib = true;
425 parser->const_ib->length_dw = ib_chunk->length_dw; 425 parser->const_ib.length_dw = ib_chunk->length_dw;
426 /* Copy the packet into the IB */ 426 /* Copy the packet into the IB */
427 if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr, 427 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
428 ib_chunk->length_dw * 4)) { 428 ib_chunk->length_dw * 4)) {
429 return -EFAULT; 429 return -EFAULT;
430 } 430 }
431 r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib); 431 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
432 if (r) { 432 if (r) {
433 return r; 433 return r;
434 } 434 }
@@ -445,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
445 DRM_ERROR("Failed to get ib !\n"); 445 DRM_ERROR("Failed to get ib !\n");
446 return r; 446 return r;
447 } 447 }
448 parser->ib->length_dw = ib_chunk->length_dw; 448 parser->ib.length_dw = ib_chunk->length_dw;
449 /* Copy the packet into the IB */ 449 /* Copy the packet into the IB */
450 if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr, 450 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
451 ib_chunk->length_dw * 4)) { 451 ib_chunk->length_dw * 4)) {
452 return -EFAULT; 452 return -EFAULT;
453 } 453 }
454 r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib); 454 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
455 if (r) { 455 if (r) {
456 return r; 456 return r;
457 } 457 }
@@ -472,34 +472,44 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
472 472
473 if ((rdev->family >= CHIP_TAHITI) && 473 if ((rdev->family >= CHIP_TAHITI) &&
474 (parser->chunk_const_ib_idx != -1)) { 474 (parser->chunk_const_ib_idx != -1)) {
475 parser->const_ib->vm_id = vm->id; 475 parser->const_ib.vm_id = vm->id;
476 /* ib pool is bind at 0 in virtual address space to gpu_addr is the 476 /* ib pool is bind at 0 in virtual address space to gpu_addr is the
477 * offset inside the pool bo 477 * offset inside the pool bo
478 */ 478 */
479 parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset; 479 parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
480 r = radeon_ib_schedule(rdev, parser->const_ib); 480 r = radeon_ib_schedule(rdev, &parser->const_ib);
481 if (r) 481 if (r)
482 goto out; 482 goto out;
483 } 483 }
484 484
485 parser->ib->vm_id = vm->id; 485 parser->ib.vm_id = vm->id;
486 /* ib pool is bind at 0 in virtual address space to gpu_addr is the 486 /* ib pool is bind at 0 in virtual address space to gpu_addr is the
487 * offset inside the pool bo 487 * offset inside the pool bo
488 */ 488 */
489 parser->ib->gpu_addr = parser->ib->sa_bo.offset; 489 parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
490 parser->ib->is_const_ib = false; 490 parser->ib.is_const_ib = false;
491 r = radeon_ib_schedule(rdev, parser->ib); 491 r = radeon_ib_schedule(rdev, &parser->ib);
492out: 492out:
493 if (!r) { 493 if (!r) {
494 if (vm->fence) { 494 if (vm->fence) {
495 radeon_fence_unref(&vm->fence); 495 radeon_fence_unref(&vm->fence);
496 } 496 }
497 vm->fence = radeon_fence_ref(parser->ib->fence); 497 vm->fence = radeon_fence_ref(parser->ib.fence);
498 } 498 }
499 mutex_unlock(&fpriv->vm.mutex); 499 mutex_unlock(&fpriv->vm.mutex);
500 return r; 500 return r;
501} 501}
502 502
503static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
504{
505 if (r == -EDEADLK) {
506 r = radeon_gpu_reset(rdev);
507 if (!r)
508 r = -EAGAIN;
509 }
510 return r;
511}
512
503int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 513int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
504{ 514{
505 struct radeon_device *rdev = dev->dev_private; 515 struct radeon_device *rdev = dev->dev_private;
@@ -521,6 +531,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
521 if (r) { 531 if (r) {
522 DRM_ERROR("Failed to initialize parser !\n"); 532 DRM_ERROR("Failed to initialize parser !\n");
523 radeon_cs_parser_fini(&parser, r); 533 radeon_cs_parser_fini(&parser, r);
534 r = radeon_cs_handle_lockup(rdev, r);
524 radeon_mutex_unlock(&rdev->cs_mutex); 535 radeon_mutex_unlock(&rdev->cs_mutex);
525 return r; 536 return r;
526 } 537 }
@@ -529,6 +540,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
529 if (r != -ERESTARTSYS) 540 if (r != -ERESTARTSYS)
530 DRM_ERROR("Failed to parse relocation %d!\n", r); 541 DRM_ERROR("Failed to parse relocation %d!\n", r);
531 radeon_cs_parser_fini(&parser, r); 542 radeon_cs_parser_fini(&parser, r);
543 r = radeon_cs_handle_lockup(rdev, r);
532 radeon_mutex_unlock(&rdev->cs_mutex); 544 radeon_mutex_unlock(&rdev->cs_mutex);
533 return r; 545 return r;
534 } 546 }
@@ -542,6 +554,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
542 } 554 }
543out: 555out:
544 radeon_cs_parser_fini(&parser, r); 556 radeon_cs_parser_fini(&parser, r);
557 r = radeon_cs_handle_lockup(rdev, r);
545 radeon_mutex_unlock(&rdev->cs_mutex); 558 radeon_mutex_unlock(&rdev->cs_mutex);
546 return r; 559 return r;
547} 560}
@@ -559,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
559 size = PAGE_SIZE; 572 size = PAGE_SIZE;
560 } 573 }
561 574
562 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), 575 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
563 ibc->user_ptr + (i * PAGE_SIZE), 576 ibc->user_ptr + (i * PAGE_SIZE),
564 size)) 577 size))
565 return -EFAULT; 578 return -EFAULT;
@@ -573,9 +586,10 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
573 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 586 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
574 int i; 587 int i;
575 int size = PAGE_SIZE; 588 int size = PAGE_SIZE;
589 bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
576 590
577 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 591 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
578 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), 592 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
579 ibc->user_ptr + (i * PAGE_SIZE), 593 ibc->user_ptr + (i * PAGE_SIZE),
580 PAGE_SIZE)) { 594 PAGE_SIZE)) {
581 p->parser_error = -EFAULT; 595 p->parser_error = -EFAULT;
@@ -583,14 +597,16 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
583 } 597 }
584 } 598 }
585 599
586 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
587
588 if (pg_idx == ibc->last_page_index) { 600 if (pg_idx == ibc->last_page_index) {
589 size = (ibc->length_dw * 4) % PAGE_SIZE; 601 size = (ibc->length_dw * 4) % PAGE_SIZE;
590 if (size == 0) 602 if (size == 0)
591 size = PAGE_SIZE; 603 size = PAGE_SIZE;
592 } 604 }
593 605
606 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
607 if (copy1)
608 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
609
594 if (DRM_COPY_FROM_USER(ibc->kpage[new_page], 610 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
595 ibc->user_ptr + (pg_idx * PAGE_SIZE), 611 ibc->user_ptr + (pg_idx * PAGE_SIZE),
596 size)) { 612 size)) {
@@ -598,8 +614,9 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
598 return 0; 614 return 0;
599 } 615 }
600 616
601 /* copy to IB here */ 617 /* copy to IB for non single case */
602 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); 618 if (!copy1)
619 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
603 620
604 ibc->last_copied_page = pg_idx; 621 ibc->last_copied_page = pg_idx;
605 ibc->kpage_idx[new_page] = pg_idx; 622 ibc->kpage_idx[new_page] = pg_idx;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5992502a3448..066c98b888a5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
193 193
194 if (rdev->wb.wb_obj == NULL) { 194 if (rdev->wb.wb_obj == NULL) {
195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
196 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 196 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
197 if (r) { 197 if (r) {
198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
199 return r; 199 return r;
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
225 /* disable event_write fences */ 225 /* disable event_write fences */
226 rdev->wb.use_event = false; 226 rdev->wb.use_event = false;
227 /* disabled via module param */ 227 /* disabled via module param */
228 if (radeon_no_wb == 1) 228 if (radeon_no_wb == 1) {
229 rdev->wb.enabled = false; 229 rdev->wb.enabled = false;
230 else { 230 } else {
231 if (rdev->flags & RADEON_IS_AGP) { 231 if (rdev->flags & RADEON_IS_AGP) {
232 /* often unreliable on AGP */ 232 /* often unreliable on AGP */
233 rdev->wb.enabled = false; 233 rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
237 } else { 237 } else {
238 rdev->wb.enabled = true; 238 rdev->wb.enabled = true;
239 /* event_write fences are only available on r600+ */ 239 /* event_write fences are only available on r600+ */
240 if (rdev->family >= CHIP_R600) 240 if (rdev->family >= CHIP_R600) {
241 rdev->wb.use_event = true; 241 rdev->wb.use_event = true;
242 }
242 } 243 }
243 } 244 }
244 /* always use writeback/events on NI, APUs */ 245 /* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
696 return can_switch; 697 return can_switch;
697} 698}
698 699
700static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
701 .set_gpu_state = radeon_switcheroo_set_state,
702 .reprobe = NULL,
703 .can_switch = radeon_switcheroo_can_switch,
704};
699 705
700int radeon_device_init(struct radeon_device *rdev, 706int radeon_device_init(struct radeon_device *rdev,
701 struct drm_device *ddev, 707 struct drm_device *ddev,
@@ -714,7 +720,6 @@ int radeon_device_init(struct radeon_device *rdev,
714 rdev->is_atom_bios = false; 720 rdev->is_atom_bios = false;
715 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 721 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
716 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 722 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
717 rdev->gpu_lockup = false;
718 rdev->accel_working = false; 723 rdev->accel_working = false;
719 724
720 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 725 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -724,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
724 /* mutex initialization are all done here so we 729 /* mutex initialization are all done here so we
725 * can recall function without having locking issues */ 730 * can recall function without having locking issues */
726 radeon_mutex_init(&rdev->cs_mutex); 731 radeon_mutex_init(&rdev->cs_mutex);
727 radeon_mutex_init(&rdev->ib_pool.mutex); 732 mutex_init(&rdev->ring_lock);
728 for (i = 0; i < RADEON_NUM_RINGS; ++i)
729 mutex_init(&rdev->ring[i].mutex);
730 mutex_init(&rdev->dc_hw_i2c_mutex); 733 mutex_init(&rdev->dc_hw_i2c_mutex);
731 if (rdev->family >= CHIP_R600) 734 if (rdev->family >= CHIP_R600)
732 spin_lock_init(&rdev->ih.lock); 735 spin_lock_init(&rdev->ih.lock);
733 mutex_init(&rdev->gem.mutex); 736 mutex_init(&rdev->gem.mutex);
734 mutex_init(&rdev->pm.mutex); 737 mutex_init(&rdev->pm.mutex);
735 mutex_init(&rdev->vram_mutex); 738 mutex_init(&rdev->vram_mutex);
736 rwlock_init(&rdev->fence_lock);
737 rwlock_init(&rdev->semaphore_drv.lock);
738 INIT_LIST_HEAD(&rdev->gem.objects);
739 init_waitqueue_head(&rdev->irq.vblank_queue); 739 init_waitqueue_head(&rdev->irq.vblank_queue);
740 init_waitqueue_head(&rdev->irq.idle_queue); 740 init_waitqueue_head(&rdev->irq.idle_queue);
741 INIT_LIST_HEAD(&rdev->semaphore_drv.bo); 741 r = radeon_gem_init(rdev);
742 if (r)
743 return r;
742 /* initialize vm here */ 744 /* initialize vm here */
743 rdev->vm_manager.use_bitmap = 1; 745 rdev->vm_manager.use_bitmap = 1;
744 rdev->vm_manager.max_pfn = 1 << 20; 746 rdev->vm_manager.max_pfn = 1 << 20;
@@ -814,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
814 /* this will fail for cards that aren't VGA class devices, just 816 /* this will fail for cards that aren't VGA class devices, just
815 * ignore it */ 817 * ignore it */
816 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 818 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
817 vga_switcheroo_register_client(rdev->pdev, 819 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
818 radeon_switcheroo_set_state,
819 NULL,
820 radeon_switcheroo_can_switch);
821 820
822 r = radeon_init(rdev); 821 r = radeon_init(rdev);
823 if (r) 822 if (r)
@@ -914,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
914 } 913 }
915 /* evict vram memory */ 914 /* evict vram memory */
916 radeon_bo_evict_vram(rdev); 915 radeon_bo_evict_vram(rdev);
916
917 mutex_lock(&rdev->ring_lock);
917 /* wait for gpu to finish processing current batch */ 918 /* wait for gpu to finish processing current batch */
918 for (i = 0; i < RADEON_NUM_RINGS; i++) 919 for (i = 0; i < RADEON_NUM_RINGS; i++)
919 radeon_fence_wait_last(rdev, i); 920 radeon_fence_wait_empty_locked(rdev, i);
921 mutex_unlock(&rdev->ring_lock);
920 922
921 radeon_save_bios_scratch_regs(rdev); 923 radeon_save_bios_scratch_regs(rdev);
922 924
@@ -955,7 +957,6 @@ int radeon_resume_kms(struct drm_device *dev)
955 console_unlock(); 957 console_unlock();
956 return -1; 958 return -1;
957 } 959 }
958 pci_set_master(dev->pdev);
959 /* resume AGP if in use */ 960 /* resume AGP if in use */
960 radeon_agp_resume(rdev); 961 radeon_agp_resume(rdev);
961 radeon_resume(rdev); 962 radeon_resume(rdev);
@@ -988,9 +989,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
988 int r; 989 int r;
989 int resched; 990 int resched;
990 991
991 /* Prevent CS ioctl from interfering */
992 radeon_mutex_lock(&rdev->cs_mutex);
993
994 radeon_save_bios_scratch_regs(rdev); 992 radeon_save_bios_scratch_regs(rdev);
995 /* block TTM */ 993 /* block TTM */
996 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 994 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1005,8 +1003,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1005 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1003 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1006 } 1004 }
1007 1005
1008 radeon_mutex_unlock(&rdev->cs_mutex);
1009
1010 if (r) { 1006 if (r) {
1011 /* bad news, how to tell it to userspace ? */ 1007 /* bad news, how to tell it to userspace ? */
1012 dev_info(rdev->dev, "GPU reset failed\n"); 1008 dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0a1d4bd65edc..64a008d14493 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -573,24 +573,6 @@ static const char *encoder_names[37] = {
573 "INTERNAL_VCE" 573 "INTERNAL_VCE"
574}; 574};
575 575
576static const char *connector_names[15] = {
577 "Unknown",
578 "VGA",
579 "DVI-I",
580 "DVI-D",
581 "DVI-A",
582 "Composite",
583 "S-video",
584 "LVDS",
585 "Component",
586 "DIN",
587 "DisplayPort",
588 "HDMI-A",
589 "HDMI-B",
590 "TV",
591 "eDP",
592};
593
594static const char *hpd_names[6] = { 576static const char *hpd_names[6] = {
595 "HPD1", 577 "HPD1",
596 "HPD2", 578 "HPD2",
@@ -613,7 +595,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
613 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 595 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
614 radeon_connector = to_radeon_connector(connector); 596 radeon_connector = to_radeon_connector(connector);
615 DRM_INFO("Connector %d:\n", i); 597 DRM_INFO("Connector %d:\n", i);
616 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 598 DRM_INFO(" %s\n", drm_get_connector_name(connector));
617 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 599 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
618 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 600 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
619 if (radeon_connector->ddc_bus) { 601 if (radeon_connector->ddc_bus) {
@@ -1243,6 +1225,93 @@ void radeon_update_display_priority(struct radeon_device *rdev)
1243 1225
1244} 1226}
1245 1227
1228/*
1229 * Allocate hdmi structs and determine register offsets
1230 */
1231static void radeon_afmt_init(struct radeon_device *rdev)
1232{
1233 int i;
1234
1235 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
1236 rdev->mode_info.afmt[i] = NULL;
1237
1238 if (ASIC_IS_DCE6(rdev)) {
1239 /* todo */
1240 } else if (ASIC_IS_DCE4(rdev)) {
1241 /* DCE4/5 has 6 audio blocks tied to DIG encoders */
1242 /* DCE4.1 has 2 audio blocks tied to DIG encoders */
1243 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1244 if (rdev->mode_info.afmt[0]) {
1245 rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
1246 rdev->mode_info.afmt[0]->id = 0;
1247 }
1248 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1249 if (rdev->mode_info.afmt[1]) {
1250 rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
1251 rdev->mode_info.afmt[1]->id = 1;
1252 }
1253 if (!ASIC_IS_DCE41(rdev)) {
1254 rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1255 if (rdev->mode_info.afmt[2]) {
1256 rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
1257 rdev->mode_info.afmt[2]->id = 2;
1258 }
1259 rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1260 if (rdev->mode_info.afmt[3]) {
1261 rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1262 rdev->mode_info.afmt[3]->id = 3;
1263 }
1264 rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1265 if (rdev->mode_info.afmt[4]) {
1266 rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1267 rdev->mode_info.afmt[4]->id = 4;
1268 }
1269 rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1270 if (rdev->mode_info.afmt[5]) {
1271 rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1272 rdev->mode_info.afmt[5]->id = 5;
1273 }
1274 }
1275 } else if (ASIC_IS_DCE3(rdev)) {
1276 /* DCE3.x has 2 audio blocks tied to DIG encoders */
1277 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1278 if (rdev->mode_info.afmt[0]) {
1279 rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
1280 rdev->mode_info.afmt[0]->id = 0;
1281 }
1282 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1283 if (rdev->mode_info.afmt[1]) {
1284 rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
1285 rdev->mode_info.afmt[1]->id = 1;
1286 }
1287 } else if (ASIC_IS_DCE2(rdev)) {
1288 /* DCE2 has at least 1 routable audio block */
1289 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1290 if (rdev->mode_info.afmt[0]) {
1291 rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
1292 rdev->mode_info.afmt[0]->id = 0;
1293 }
1294 /* r6xx has 2 routable audio blocks */
1295 if (rdev->family >= CHIP_R600) {
1296 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1297 if (rdev->mode_info.afmt[1]) {
1298 rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
1299 rdev->mode_info.afmt[1]->id = 1;
1300 }
1301 }
1302 }
1303}
1304
1305static void radeon_afmt_fini(struct radeon_device *rdev)
1306{
1307 int i;
1308
1309 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
1310 kfree(rdev->mode_info.afmt[i]);
1311 rdev->mode_info.afmt[i] = NULL;
1312 }
1313}
1314
1246int radeon_modeset_init(struct radeon_device *rdev) 1315int radeon_modeset_init(struct radeon_device *rdev)
1247{ 1316{
1248 int i; 1317 int i;
@@ -1251,7 +1320,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
1251 drm_mode_config_init(rdev->ddev); 1320 drm_mode_config_init(rdev->ddev);
1252 rdev->mode_info.mode_config_initialized = true; 1321 rdev->mode_info.mode_config_initialized = true;
1253 1322
1254 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; 1323 rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
1255 1324
1256 if (ASIC_IS_DCE5(rdev)) { 1325 if (ASIC_IS_DCE5(rdev)) {
1257 rdev->ddev->mode_config.max_width = 16384; 1326 rdev->ddev->mode_config.max_width = 16384;
@@ -1303,6 +1372,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
1303 /* initialize hpd */ 1372 /* initialize hpd */
1304 radeon_hpd_init(rdev); 1373 radeon_hpd_init(rdev);
1305 1374
1375 /* setup afmt */
1376 radeon_afmt_init(rdev);
1377
1306 /* Initialize power management */ 1378 /* Initialize power management */
1307 radeon_pm_init(rdev); 1379 radeon_pm_init(rdev);
1308 1380
@@ -1319,6 +1391,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
1319 radeon_pm_fini(rdev); 1391 radeon_pm_fini(rdev);
1320 1392
1321 if (rdev->mode_info.mode_config_initialized) { 1393 if (rdev->mode_info.mode_config_initialized) {
1394 radeon_afmt_fini(rdev);
1322 drm_kms_helper_poll_fini(rdev->ddev); 1395 drm_kms_helper_poll_fini(rdev->ddev);
1323 radeon_hpd_fini(rdev); 1396 radeon_hpd_fini(rdev);
1324 drm_mode_config_cleanup(rdev->ddev); 1397 drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ef7bb3f6ecae..f0bb2b543b13 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -105,6 +105,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
105int radeon_mode_dumb_destroy(struct drm_file *file_priv, 105int radeon_mode_dumb_destroy(struct drm_file *file_priv,
106 struct drm_device *dev, 106 struct drm_device *dev,
107 uint32_t handle); 107 uint32_t handle);
108struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
109 struct drm_gem_object *obj,
110 int flags);
111struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
112 struct dma_buf *dma_buf);
108 113
109#if defined(CONFIG_DEBUG_FS) 114#if defined(CONFIG_DEBUG_FS)
110int radeon_debugfs_init(struct drm_minor *minor); 115int radeon_debugfs_init(struct drm_minor *minor);
@@ -128,6 +133,7 @@ int radeon_disp_priority = 0;
128int radeon_hw_i2c = 0; 133int radeon_hw_i2c = 0;
129int radeon_pcie_gen2 = 0; 134int radeon_pcie_gen2 = 0;
130int radeon_msi = -1; 135int radeon_msi = -1;
136int radeon_lockup_timeout = 10000;
131 137
132MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 138MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
133module_param_named(no_wb, radeon_no_wb, int, 0444); 139module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -177,6 +183,9 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
177MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); 183MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
178module_param_named(msi, radeon_msi, int, 0444); 184module_param_named(msi, radeon_msi, int, 0444);
179 185
186MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
187module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
188
180static int radeon_suspend(struct drm_device *dev, pm_message_t state) 189static int radeon_suspend(struct drm_device *dev, pm_message_t state)
181{ 190{
182 drm_radeon_private_t *dev_priv = dev->dev_private; 191 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -329,7 +338,8 @@ static const struct file_operations radeon_driver_kms_fops = {
329static struct drm_driver kms_driver = { 338static struct drm_driver kms_driver = {
330 .driver_features = 339 .driver_features =
331 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 340 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
332 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM, 341 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
342 DRIVER_PRIME,
333 .dev_priv_size = 0, 343 .dev_priv_size = 0,
334 .load = radeon_driver_load_kms, 344 .load = radeon_driver_load_kms,
335 .firstopen = radeon_driver_firstopen_kms, 345 .firstopen = radeon_driver_firstopen_kms,
@@ -364,6 +374,12 @@ static struct drm_driver kms_driver = {
364 .dumb_map_offset = radeon_mode_dumb_mmap, 374 .dumb_map_offset = radeon_mode_dumb_mmap,
365 .dumb_destroy = radeon_mode_dumb_destroy, 375 .dumb_destroy = radeon_mode_dumb_destroy,
366 .fops = &radeon_driver_kms_fops, 376 .fops = &radeon_driver_kms_fops,
377
378 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
379 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
380 .gem_prime_export = radeon_gem_prime_export,
381 .gem_prime_import = radeon_gem_prime_import,
382
367 .name = DRIVER_NAME, 383 .name = DRIVER_NAME,
368 .desc = DRIVER_DESC, 384 .desc = DRIVER_DESC,
369 .date = DRIVER_DATE, 385 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a354fbe..11f5f402d22c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,98 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
63 63
64int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 64int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
65{ 65{
66 unsigned long irq_flags; 66 /* we are protected by the ring emission mutex */
67 67 if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
68 write_lock_irqsave(&rdev->fence_lock, irq_flags);
69 if (fence->emitted) {
70 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
71 return 0; 68 return 0;
72 } 69 }
73 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); 70 fence->seq = ++rdev->fence_drv[fence->ring].seq;
74 if (!rdev->ring[fence->ring].ready) 71 radeon_fence_ring_emit(rdev, fence->ring, fence);
75 /* FIXME: cp is not running assume everythings is done right
76 * away
77 */
78 radeon_fence_write(rdev, fence->seq, fence->ring);
79 else
80 radeon_fence_ring_emit(rdev, fence->ring, fence);
81
82 trace_radeon_fence_emit(rdev->ddev, fence->seq); 72 trace_radeon_fence_emit(rdev->ddev, fence->seq);
83 fence->emitted = true;
84 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
85 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
86 return 0; 73 return 0;
87} 74}
88 75
89static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring) 76void radeon_fence_process(struct radeon_device *rdev, int ring)
90{ 77{
91 struct radeon_fence *fence; 78 uint64_t seq, last_seq;
92 struct list_head *i, *n; 79 unsigned count_loop = 0;
93 uint32_t seq;
94 bool wake = false; 80 bool wake = false;
95 unsigned long cjiffies;
96 81
97 seq = radeon_fence_read(rdev, ring); 82 /* Note there is a scenario here for an infinite loop but it's
98 if (seq != rdev->fence_drv[ring].last_seq) { 83 * very unlikely to happen. For it to happen, the current polling
99 rdev->fence_drv[ring].last_seq = seq; 84 * process need to be interrupted by another process and another
100 rdev->fence_drv[ring].last_jiffies = jiffies; 85 * process needs to update the last_seq btw the atomic read and
101 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 86 * xchg of the current process.
102 } else { 87 *
103 cjiffies = jiffies; 88 * More over for this to go in infinite loop there need to be
104 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) { 89 * continuously new fence signaled ie radeon_fence_read needs
105 cjiffies -= rdev->fence_drv[ring].last_jiffies; 90 * to return a different value each time for both the currently
106 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) { 91 * polling process and the other process that xchg the last_seq
107 /* update the timeout */ 92 * btw atomic read and xchg of the current process. And the
108 rdev->fence_drv[ring].last_timeout -= cjiffies; 93 * value the other process set as last seq must be higher than
109 } else { 94 * the seq value we just read. Which means that current process
110 /* the 500ms timeout is elapsed we should test 95 * need to be interrupted after radeon_fence_read and before
111 * for GPU lockup 96 * atomic xchg.
112 */ 97 *
113 rdev->fence_drv[ring].last_timeout = 1; 98 * To be even more safe we count the number of time we loop and
114 } 99 * we bail after 10 loop just accepting the fact that we might
115 } else { 100 * have temporarly set the last_seq not to the true real last
116 /* wrap around update last jiffies, we will just wait 101 * seq but to an older one.
117 * a little longer 102 */
118 */ 103 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
119 rdev->fence_drv[ring].last_jiffies = cjiffies; 104 do {
105 seq = radeon_fence_read(rdev, ring);
106 seq |= last_seq & 0xffffffff00000000LL;
107 if (seq < last_seq) {
108 seq += 0x100000000LL;
120 } 109 }
121 return false; 110
122 } 111 if (seq == last_seq) {
123 n = NULL;
124 list_for_each(i, &rdev->fence_drv[ring].emitted) {
125 fence = list_entry(i, struct radeon_fence, list);
126 if (fence->seq == seq) {
127 n = i;
128 break; 112 break;
129 } 113 }
130 } 114 /* If we loop over we don't want to return without
131 /* all fence previous to this one are considered as signaled */ 115 * checking if a fence is signaled as it means that the
132 if (n) { 116 * seq we just read is different from the previous on.
133 i = n; 117 */
134 do {
135 n = i->prev;
136 list_move_tail(i, &rdev->fence_drv[ring].signaled);
137 fence = list_entry(i, struct radeon_fence, list);
138 fence->signaled = true;
139 i = n;
140 } while (i != &rdev->fence_drv[ring].emitted);
141 wake = true; 118 wake = true;
119 last_seq = seq;
120 if ((count_loop++) > 10) {
121 /* We looped over too many time leave with the
122 * fact that we might have set an older fence
123 * seq then the current real last seq as signaled
124 * by the hw.
125 */
126 break;
127 }
128 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
129
130 if (wake) {
131 rdev->fence_drv[ring].last_activity = jiffies;
132 wake_up_all(&rdev->fence_queue);
142 } 133 }
143 return wake;
144} 134}
145 135
146static void radeon_fence_destroy(struct kref *kref) 136static void radeon_fence_destroy(struct kref *kref)
147{ 137{
148 unsigned long irq_flags; 138 struct radeon_fence *fence;
149 struct radeon_fence *fence;
150 139
151 fence = container_of(kref, struct radeon_fence, kref); 140 fence = container_of(kref, struct radeon_fence, kref);
152 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags); 141 fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
153 list_del(&fence->list);
154 fence->emitted = false;
155 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
156 if (fence->semaphore)
157 radeon_semaphore_free(fence->rdev, fence->semaphore);
158 kfree(fence); 142 kfree(fence);
159} 143}
160 144
@@ -162,171 +146,342 @@ int radeon_fence_create(struct radeon_device *rdev,
162 struct radeon_fence **fence, 146 struct radeon_fence **fence,
163 int ring) 147 int ring)
164{ 148{
165 unsigned long irq_flags;
166
167 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 149 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
168 if ((*fence) == NULL) { 150 if ((*fence) == NULL) {
169 return -ENOMEM; 151 return -ENOMEM;
170 } 152 }
171 kref_init(&((*fence)->kref)); 153 kref_init(&((*fence)->kref));
172 (*fence)->rdev = rdev; 154 (*fence)->rdev = rdev;
173 (*fence)->emitted = false; 155 (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
174 (*fence)->signaled = false;
175 (*fence)->seq = 0;
176 (*fence)->ring = ring; 156 (*fence)->ring = ring;
177 (*fence)->semaphore = NULL;
178 INIT_LIST_HEAD(&(*fence)->list);
179
180 write_lock_irqsave(&rdev->fence_lock, irq_flags);
181 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
182 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
183 return 0; 157 return 0;
184} 158}
185 159
186bool radeon_fence_signaled(struct radeon_fence *fence) 160static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
161 u64 seq, unsigned ring)
187{ 162{
188 unsigned long irq_flags; 163 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
189 bool signaled = false;
190
191 if (!fence)
192 return true; 164 return true;
193 165 }
194 if (fence->rdev->gpu_lockup) 166 /* poll new last sequence at least once */
167 radeon_fence_process(rdev, ring);
168 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
195 return true; 169 return true;
170 }
171 return false;
172}
196 173
197 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags); 174bool radeon_fence_signaled(struct radeon_fence *fence)
198 signaled = fence->signaled; 175{
199 /* if we are shuting down report all fence as signaled */ 176 if (!fence) {
200 if (fence->rdev->shutdown) { 177 return true;
201 signaled = true;
202 } 178 }
203 if (!fence->emitted) { 179 if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
204 WARN(1, "Querying an unemitted fence : %p !\n", fence); 180 WARN(1, "Querying an unemitted fence : %p !\n", fence);
205 signaled = true; 181 return true;
206 } 182 }
207 if (!signaled) { 183 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
208 radeon_fence_poll_locked(fence->rdev, fence->ring); 184 return true;
209 signaled = fence->signaled; 185 }
186 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
187 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
188 return true;
210 } 189 }
211 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags); 190 return false;
212 return signaled; 191}
192
193static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
194 unsigned ring, bool intr, bool lock_ring)
195{
196 unsigned long timeout, last_activity;
197 uint64_t seq;
198 unsigned i;
199 bool signaled;
200 int r;
201
202 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
203 if (!rdev->ring[ring].ready) {
204 return -EBUSY;
205 }
206
207 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
208 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
209 /* the normal case, timeout is somewhere before last_activity */
210 timeout = rdev->fence_drv[ring].last_activity - timeout;
211 } else {
212 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
213 * anyway we will just wait for the minimum amount and then check for a lockup
214 */
215 timeout = 1;
216 }
217 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
218 /* Save current last activity valuee, used to check for GPU lockups */
219 last_activity = rdev->fence_drv[ring].last_activity;
220
221 trace_radeon_fence_wait_begin(rdev->ddev, seq);
222 radeon_irq_kms_sw_irq_get(rdev, ring);
223 if (intr) {
224 r = wait_event_interruptible_timeout(rdev->fence_queue,
225 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
226 timeout);
227 } else {
228 r = wait_event_timeout(rdev->fence_queue,
229 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
230 timeout);
231 }
232 radeon_irq_kms_sw_irq_put(rdev, ring);
233 if (unlikely(r < 0)) {
234 return r;
235 }
236 trace_radeon_fence_wait_end(rdev->ddev, seq);
237
238 if (unlikely(!signaled)) {
239 /* we were interrupted for some reason and fence
240 * isn't signaled yet, resume waiting */
241 if (r) {
242 continue;
243 }
244
245 /* check if sequence value has changed since last_activity */
246 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
247 continue;
248 }
249
250 if (lock_ring) {
251 mutex_lock(&rdev->ring_lock);
252 }
253
254 /* test if somebody else has already decided that this is a lockup */
255 if (last_activity != rdev->fence_drv[ring].last_activity) {
256 if (lock_ring) {
257 mutex_unlock(&rdev->ring_lock);
258 }
259 continue;
260 }
261
262 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
263 /* good news we believe it's a lockup */
264 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
265 target_seq, seq);
266
267 /* change last activity so nobody else think there is a lockup */
268 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
269 rdev->fence_drv[i].last_activity = jiffies;
270 }
271
272 /* mark the ring as not ready any more */
273 rdev->ring[ring].ready = false;
274 if (lock_ring) {
275 mutex_unlock(&rdev->ring_lock);
276 }
277 return -EDEADLK;
278 }
279
280 if (lock_ring) {
281 mutex_unlock(&rdev->ring_lock);
282 }
283 }
284 }
285 return 0;
213} 286}
214 287
215int radeon_fence_wait(struct radeon_fence *fence, bool intr) 288int radeon_fence_wait(struct radeon_fence *fence, bool intr)
216{ 289{
217 struct radeon_device *rdev;
218 unsigned long irq_flags, timeout;
219 u32 seq;
220 int r; 290 int r;
221 291
222 if (fence == NULL) { 292 if (fence == NULL) {
223 WARN(1, "Querying an invalid fence : %p !\n", fence); 293 WARN(1, "Querying an invalid fence : %p !\n", fence);
224 return 0; 294 return -EINVAL;
225 } 295 }
226 rdev = fence->rdev; 296
227 if (radeon_fence_signaled(fence)) { 297 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
228 return 0; 298 fence->ring, intr, true);
299 if (r) {
300 return r;
229 } 301 }
230 timeout = rdev->fence_drv[fence->ring].last_timeout; 302 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
231retry: 303 return 0;
232 /* save current sequence used to check for GPU lockup */ 304}
233 seq = rdev->fence_drv[fence->ring].last_seq; 305
234 trace_radeon_fence_wait_begin(rdev->ddev, seq); 306bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
235 if (intr) { 307{
236 radeon_irq_kms_sw_irq_get(rdev, fence->ring); 308 unsigned i;
237 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue, 309
238 radeon_fence_signaled(fence), timeout); 310 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
239 radeon_irq_kms_sw_irq_put(rdev, fence->ring); 311 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
240 if (unlikely(r < 0)) { 312 return true;
241 return r;
242 } 313 }
243 } else {
244 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
245 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
246 radeon_fence_signaled(fence), timeout);
247 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
248 } 314 }
249 trace_radeon_fence_wait_end(rdev->ddev, seq); 315 return false;
250 if (unlikely(!radeon_fence_signaled(fence))) { 316}
251 /* we were interrupted for some reason and fence isn't 317
252 * isn't signaled yet, resume wait 318static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
253 */ 319 u64 *target_seq, bool intr)
254 if (r) { 320{
255 timeout = r; 321 unsigned long timeout, last_activity, tmp;
256 goto retry; 322 unsigned i, ring = RADEON_NUM_RINGS;
323 bool signaled;
324 int r;
325
326 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
327 if (!target_seq[i]) {
328 continue;
329 }
330
331 /* use the most recent one as indicator */
332 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
333 last_activity = rdev->fence_drv[i].last_activity;
257 } 334 }
258 /* don't protect read access to rdev->fence_drv[t].last_seq 335
259 * if we experiencing a lockup the value doesn't change 336 /* For lockup detection just pick the lowest ring we are
337 * actively waiting for
260 */ 338 */
261 if (seq == rdev->fence_drv[fence->ring].last_seq && 339 if (i < ring) {
262 radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) { 340 ring = i;
263 /* good news we believe it's a lockup */ 341 }
264 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 342 }
265 fence->seq, seq); 343
266 /* FIXME: what should we do ? marking everyone 344 /* nothing to wait for ? */
267 * as signaled for now 345 if (ring == RADEON_NUM_RINGS) {
346 return 0;
347 }
348
349 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
350 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
351 if (time_after(last_activity, timeout)) {
352 /* the normal case, timeout is somewhere before last_activity */
353 timeout = last_activity - timeout;
354 } else {
355 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
356 * anyway we will just wait for the minimum amount and then check for a lockup
268 */ 357 */
269 rdev->gpu_lockup = true; 358 timeout = 1;
270 r = radeon_gpu_reset(rdev); 359 }
271 if (r) 360
272 return r; 361 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
273 radeon_fence_write(rdev, fence->seq, fence->ring); 362 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
274 rdev->gpu_lockup = false; 363 if (target_seq[i]) {
364 radeon_irq_kms_sw_irq_get(rdev, i);
365 }
366 }
367 if (intr) {
368 r = wait_event_interruptible_timeout(rdev->fence_queue,
369 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
370 timeout);
371 } else {
372 r = wait_event_timeout(rdev->fence_queue,
373 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
374 timeout);
375 }
376 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
377 if (target_seq[i]) {
378 radeon_irq_kms_sw_irq_put(rdev, i);
379 }
380 }
381 if (unlikely(r < 0)) {
382 return r;
383 }
384 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
385
386 if (unlikely(!signaled)) {
387 /* we were interrupted for some reason and fence
388 * isn't signaled yet, resume waiting */
389 if (r) {
390 continue;
391 }
392
393 mutex_lock(&rdev->ring_lock);
394 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
395 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
396 tmp = rdev->fence_drv[i].last_activity;
397 }
398 }
399 /* test if somebody else has already decided that this is a lockup */
400 if (last_activity != tmp) {
401 last_activity = tmp;
402 mutex_unlock(&rdev->ring_lock);
403 continue;
404 }
405
406 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
407 /* good news we believe it's a lockup */
408 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
409 target_seq[ring]);
410
411 /* change last activity so nobody else think there is a lockup */
412 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
413 rdev->fence_drv[i].last_activity = jiffies;
414 }
415
416 /* mark the ring as not ready any more */
417 rdev->ring[ring].ready = false;
418 mutex_unlock(&rdev->ring_lock);
419 return -EDEADLK;
420 }
421 mutex_unlock(&rdev->ring_lock);
275 } 422 }
276 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
277 write_lock_irqsave(&rdev->fence_lock, irq_flags);
278 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
280 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
281 goto retry;
282 } 423 }
283 return 0; 424 return 0;
284} 425}
285 426
286int radeon_fence_wait_next(struct radeon_device *rdev, int ring) 427int radeon_fence_wait_any(struct radeon_device *rdev,
428 struct radeon_fence **fences,
429 bool intr)
287{ 430{
288 unsigned long irq_flags; 431 uint64_t seq[RADEON_NUM_RINGS];
289 struct radeon_fence *fence; 432 unsigned i;
290 int r; 433 int r;
291 434
292 if (rdev->gpu_lockup) { 435 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
293 return 0; 436 seq[i] = 0;
437
438 if (!fences[i]) {
439 continue;
440 }
441
442 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
443 /* something was allready signaled */
444 return 0;
445 }
446
447 if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
448 seq[i] = fences[i]->seq;
449 }
294 } 450 }
295 write_lock_irqsave(&rdev->fence_lock, irq_flags); 451
296 if (list_empty(&rdev->fence_drv[ring].emitted)) { 452 r = radeon_fence_wait_any_seq(rdev, seq, intr);
297 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 453 if (r) {
298 return 0; 454 return r;
299 } 455 }
300 fence = list_entry(rdev->fence_drv[ring].emitted.next, 456 return 0;
301 struct radeon_fence, list);
302 radeon_fence_ref(fence);
303 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
304 r = radeon_fence_wait(fence, false);
305 radeon_fence_unref(&fence);
306 return r;
307} 457}
308 458
309int radeon_fence_wait_last(struct radeon_device *rdev, int ring) 459int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
310{ 460{
311 unsigned long irq_flags; 461 uint64_t seq;
312 struct radeon_fence *fence; 462
313 int r; 463 /* We are not protected by ring lock when reading current seq but
314 464 * it's ok as worst case is we return to early while we could have
315 if (rdev->gpu_lockup) { 465 * wait.
316 return 0; 466 */
467 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
468 if (seq >= rdev->fence_drv[ring].seq) {
469 /* nothing to wait for, last_seq is
470 already the last emited fence */
471 return -ENOENT;
317 } 472 }
318 write_lock_irqsave(&rdev->fence_lock, irq_flags); 473 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
319 if (list_empty(&rdev->fence_drv[ring].emitted)) { 474}
320 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 475
321 return 0; 476int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
322 } 477{
323 fence = list_entry(rdev->fence_drv[ring].emitted.prev, 478 /* We are not protected by ring lock when reading current seq
324 struct radeon_fence, list); 479 * but it's ok as wait empty is call from place where no more
325 radeon_fence_ref(fence); 480 * activity can be scheduled so there won't be concurrent access
326 write_unlock_irqrestore(&rdev->fence_lock, irq_flags); 481 * to seq value.
327 r = radeon_fence_wait(fence, false); 482 */
328 radeon_fence_unref(&fence); 483 return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
329 return r; 484 ring, false, false);
330} 485}
331 486
332struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 487struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
@@ -345,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence)
345 } 500 }
346} 501}
347 502
348void radeon_fence_process(struct radeon_device *rdev, int ring) 503unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
349{
350 unsigned long irq_flags;
351 bool wake;
352
353 write_lock_irqsave(&rdev->fence_lock, irq_flags);
354 wake = radeon_fence_poll_locked(rdev, ring);
355 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
356 if (wake) {
357 wake_up_all(&rdev->fence_drv[ring].queue);
358 }
359}
360
361int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
362{ 504{
363 unsigned long irq_flags; 505 uint64_t emitted;
364 int not_processed = 0; 506
365 507 /* We are not protected by ring lock when reading the last sequence
366 read_lock_irqsave(&rdev->fence_lock, irq_flags); 508 * but it's ok to report slightly wrong fence count here.
367 if (!rdev->fence_drv[ring].initialized) { 509 */
368 read_unlock_irqrestore(&rdev->fence_lock, irq_flags); 510 radeon_fence_process(rdev, ring);
369 return 0; 511 emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
512 /* to avoid 32bits warp around */
513 if (emitted > 0x10000000) {
514 emitted = 0x10000000;
370 } 515 }
371 516 return (unsigned)emitted;
372 if (!list_empty(&rdev->fence_drv[ring].emitted)) {
373 struct list_head *ptr;
374 list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
375 /* count up to 3, that's enought info */
376 if (++not_processed >= 3)
377 break;
378 }
379 }
380 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
381 return not_processed;
382} 517}
383 518
384int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 519int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
385{ 520{
386 unsigned long irq_flags;
387 uint64_t index; 521 uint64_t index;
388 int r; 522 int r;
389 523
390 write_lock_irqsave(&rdev->fence_lock, irq_flags);
391 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 524 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
392 if (rdev->wb.use_event) { 525 if (rdev->wb.use_event) {
393 rdev->fence_drv[ring].scratch_reg = 0; 526 rdev->fence_drv[ring].scratch_reg = 0;
@@ -396,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
396 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 529 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
397 if (r) { 530 if (r) {
398 dev_err(rdev->dev, "fence failed to get scratch register\n"); 531 dev_err(rdev->dev, "fence failed to get scratch register\n");
399 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
400 return r; 532 return r;
401 } 533 }
402 index = RADEON_WB_SCRATCH_OFFSET + 534 index = RADEON_WB_SCRATCH_OFFSET +
@@ -405,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
405 } 537 }
406 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 538 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
407 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 539 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
408 radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring); 540 radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
409 rdev->fence_drv[ring].initialized = true; 541 rdev->fence_drv[ring].initialized = true;
410 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n", 542 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
411 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 543 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
412 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
413 return 0; 544 return 0;
414} 545}
415 546
@@ -418,24 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
418 rdev->fence_drv[ring].scratch_reg = -1; 549 rdev->fence_drv[ring].scratch_reg = -1;
419 rdev->fence_drv[ring].cpu_addr = NULL; 550 rdev->fence_drv[ring].cpu_addr = NULL;
420 rdev->fence_drv[ring].gpu_addr = 0; 551 rdev->fence_drv[ring].gpu_addr = 0;
421 atomic_set(&rdev->fence_drv[ring].seq, 0); 552 rdev->fence_drv[ring].seq = 0;
422 INIT_LIST_HEAD(&rdev->fence_drv[ring].created); 553 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
423 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted); 554 rdev->fence_drv[ring].last_activity = jiffies;
424 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
425 init_waitqueue_head(&rdev->fence_drv[ring].queue);
426 rdev->fence_drv[ring].initialized = false; 555 rdev->fence_drv[ring].initialized = false;
427} 556}
428 557
429int radeon_fence_driver_init(struct radeon_device *rdev) 558int radeon_fence_driver_init(struct radeon_device *rdev)
430{ 559{
431 unsigned long irq_flags;
432 int ring; 560 int ring;
433 561
434 write_lock_irqsave(&rdev->fence_lock, irq_flags); 562 init_waitqueue_head(&rdev->fence_queue);
435 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 563 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
436 radeon_fence_driver_init_ring(rdev, ring); 564 radeon_fence_driver_init_ring(rdev, ring);
437 } 565 }
438 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
439 if (radeon_debugfs_fence_init(rdev)) { 566 if (radeon_debugfs_fence_init(rdev)) {
440 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 567 dev_err(rdev->dev, "fence debugfs file creation failed\n");
441 } 568 }
@@ -444,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
444 571
445void radeon_fence_driver_fini(struct radeon_device *rdev) 572void radeon_fence_driver_fini(struct radeon_device *rdev)
446{ 573{
447 unsigned long irq_flags;
448 int ring; 574 int ring;
449 575
576 mutex_lock(&rdev->ring_lock);
450 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 577 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
451 if (!rdev->fence_drv[ring].initialized) 578 if (!rdev->fence_drv[ring].initialized)
452 continue; 579 continue;
453 radeon_fence_wait_last(rdev, ring); 580 radeon_fence_wait_empty_locked(rdev, ring);
454 wake_up_all(&rdev->fence_drv[ring].queue); 581 wake_up_all(&rdev->fence_queue);
455 write_lock_irqsave(&rdev->fence_lock, irq_flags);
456 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 582 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
457 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
458 rdev->fence_drv[ring].initialized = false; 583 rdev->fence_drv[ring].initialized = false;
459 } 584 }
585 mutex_unlock(&rdev->ring_lock);
460} 586}
461 587
462 588
@@ -469,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
469 struct drm_info_node *node = (struct drm_info_node *)m->private; 595 struct drm_info_node *node = (struct drm_info_node *)m->private;
470 struct drm_device *dev = node->minor->dev; 596 struct drm_device *dev = node->minor->dev;
471 struct radeon_device *rdev = dev->dev_private; 597 struct radeon_device *rdev = dev->dev_private;
472 struct radeon_fence *fence;
473 int i; 598 int i;
474 599
475 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 600 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -477,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
477 continue; 602 continue;
478 603
479 seq_printf(m, "--- ring %d ---\n", i); 604 seq_printf(m, "--- ring %d ---\n", i);
480 seq_printf(m, "Last signaled fence 0x%08X\n", 605 seq_printf(m, "Last signaled fence 0x%016llx\n",
481 radeon_fence_read(rdev, i)); 606 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
482 if (!list_empty(&rdev->fence_drv[i].emitted)) { 607 seq_printf(m, "Last emitted 0x%016llx\n",
483 fence = list_entry(rdev->fence_drv[i].emitted.prev, 608 rdev->fence_drv[i].seq);
484 struct radeon_fence, list);
485 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
486 fence, fence->seq);
487 }
488 } 609 }
489 return 0; 610 return 0;
490} 611}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 456a77cf4b7f..79db56e6c2ac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
80 if (rdev->gart.robj == NULL) { 80 if (rdev->gart.robj == NULL) {
81 r = radeon_bo_create(rdev, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.robj); 83 NULL, &rdev->gart.robj);
84 if (r) { 84 if (r) {
85 return r; 85 return r;
86 } 86 }
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
326 rdev->vm_manager.use_bitmap &= ~(1 << vm->id); 326 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
327 list_del_init(&vm->list); 327 list_del_init(&vm->list);
328 vm->id = -1; 328 vm->id = -1;
329 radeon_sa_bo_free(rdev, &vm->sa_bo); 329 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
330 vm->pt = NULL; 330 vm->pt = NULL;
331 331
332 list_for_each_entry(bo_va, &vm->va, vm_list) { 332 list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
395retry: 395retry:
396 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 396 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
397 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), 397 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
398 RADEON_GPU_PAGE_SIZE); 398 RADEON_GPU_PAGE_SIZE, false);
399 if (r) { 399 if (r) {
400 if (list_empty(&rdev->vm_manager.lru_vm)) { 400 if (list_empty(&rdev->vm_manager.lru_vm)) {
401 return r; 401 return r;
@@ -404,10 +404,8 @@ retry:
404 radeon_vm_unbind(rdev, vm_evict); 404 radeon_vm_unbind(rdev, vm_evict);
405 goto retry; 405 goto retry;
406 } 406 }
407 vm->pt = rdev->vm_manager.sa_manager.cpu_ptr; 407 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
408 vm->pt += (vm->sa_bo.offset >> 3); 408 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
409 vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
410 vm->pt_gpu_addr += vm->sa_bo.offset;
411 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); 409 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
412 410
413retry_id: 411retry_id:
@@ -428,14 +426,14 @@ retry_id:
428 /* do hw bind */ 426 /* do hw bind */
429 r = rdev->vm_manager.funcs->bind(rdev, vm, id); 427 r = rdev->vm_manager.funcs->bind(rdev, vm, id);
430 if (r) { 428 if (r) {
431 radeon_sa_bo_free(rdev, &vm->sa_bo); 429 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
432 return r; 430 return r;
433 } 431 }
434 rdev->vm_manager.use_bitmap |= 1 << id; 432 rdev->vm_manager.use_bitmap |= 1 << id;
435 vm->id = id; 433 vm->id = id;
436 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 434 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
437 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo, 435 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
438 &rdev->ib_pool.sa_manager.bo->tbo.mem); 436 &rdev->ring_tmp_bo.bo->tbo.mem);
439} 437}
440 438
441/* object have to be reserved */ 439/* object have to be reserved */
@@ -633,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
633 /* map the ib pool buffer at 0 in virtual address space, set 631 /* map the ib pool buffer at 0 in virtual address space, set
634 * read only 632 * read only
635 */ 633 */
636 r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0, 634 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
637 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); 635 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
638 return r; 636 return r;
639} 637}
@@ -650,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
650 radeon_mutex_unlock(&rdev->cs_mutex); 648 radeon_mutex_unlock(&rdev->cs_mutex);
651 649
652 /* remove all bo */ 650 /* remove all bo */
653 r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false); 651 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
654 if (!r) { 652 if (!r) {
655 bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm); 653 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
656 list_del_init(&bo_va->bo_list); 654 list_del_init(&bo_va->bo_list);
657 list_del_init(&bo_va->vm_list); 655 list_del_init(&bo_va->vm_list);
658 radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo); 656 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
659 kfree(bo_va); 657 kfree(bo_va);
660 } 658 }
661 if (!list_empty(&vm->va)) { 659 if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0519b05968b5..f28bd4b7ef98 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -42,6 +42,8 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
42 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
43 43
44 if (robj) { 44 if (robj) {
45 if (robj->gem_base.import_attach)
46 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
45 radeon_bo_unref(&robj); 47 radeon_bo_unref(&robj);
46 } 48 }
47} 49}
@@ -59,7 +61,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
59 if (alignment < PAGE_SIZE) { 61 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE; 62 alignment = PAGE_SIZE;
61 } 63 }
62 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj); 64 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
63 if (r) { 65 if (r) {
64 if (r != -ERESTARTSYS) 66 if (r != -ERESTARTSYS)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 67 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
@@ -154,6 +156,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
154 radeon_bo_unreserve(rbo); 156 radeon_bo_unreserve(rbo);
155} 157}
156 158
159static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
160{
161 if (r == -EDEADLK) {
162 radeon_mutex_lock(&rdev->cs_mutex);
163 r = radeon_gpu_reset(rdev);
164 if (!r)
165 r = -EAGAIN;
166 radeon_mutex_unlock(&rdev->cs_mutex);
167 }
168 return r;
169}
157 170
158/* 171/*
159 * GEM ioctls. 172 * GEM ioctls.
@@ -210,12 +223,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
210 args->initial_domain, false, 223 args->initial_domain, false,
211 false, &gobj); 224 false, &gobj);
212 if (r) { 225 if (r) {
226 r = radeon_gem_handle_lockup(rdev, r);
213 return r; 227 return r;
214 } 228 }
215 r = drm_gem_handle_create(filp, gobj, &handle); 229 r = drm_gem_handle_create(filp, gobj, &handle);
216 /* drop reference from allocate - handle holds it now */ 230 /* drop reference from allocate - handle holds it now */
217 drm_gem_object_unreference_unlocked(gobj); 231 drm_gem_object_unreference_unlocked(gobj);
218 if (r) { 232 if (r) {
233 r = radeon_gem_handle_lockup(rdev, r);
219 return r; 234 return r;
220 } 235 }
221 args->handle = handle; 236 args->handle = handle;
@@ -245,6 +260,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
245 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 260 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
246 261
247 drm_gem_object_unreference_unlocked(gobj); 262 drm_gem_object_unreference_unlocked(gobj);
263 r = radeon_gem_handle_lockup(robj->rdev, r);
248 return r; 264 return r;
249} 265}
250 266
@@ -301,6 +317,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
301 break; 317 break;
302 } 318 }
303 drm_gem_object_unreference_unlocked(gobj); 319 drm_gem_object_unreference_unlocked(gobj);
320 r = radeon_gem_handle_lockup(robj->rdev, r);
304 return r; 321 return r;
305} 322}
306 323
@@ -322,6 +339,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
322 if (robj->rdev->asic->ioctl_wait_idle) 339 if (robj->rdev->asic->ioctl_wait_idle)
323 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 340 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
324 drm_gem_object_unreference_unlocked(gobj); 341 drm_gem_object_unreference_unlocked(gobj);
342 r = radeon_gem_handle_lockup(robj->rdev, r);
325 return r; 343 return r;
326} 344}
327 345
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 65060b77c805..5df58d1aba06 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -73,6 +73,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
73 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 73 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
74 rdev->irq.crtc_vblank_int[i] = false; 74 rdev->irq.crtc_vblank_int[i] = false;
75 rdev->irq.pflip[i] = false; 75 rdev->irq.pflip[i] = false;
76 rdev->irq.afmt[i] = false;
76 } 77 }
77 radeon_irq_set(rdev); 78 radeon_irq_set(rdev);
78 /* Clear bits */ 79 /* Clear bits */
@@ -108,6 +109,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
108 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 109 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
109 rdev->irq.crtc_vblank_int[i] = false; 110 rdev->irq.crtc_vblank_int[i] = false;
110 rdev->irq.pflip[i] = false; 111 rdev->irq.pflip[i] = false;
112 rdev->irq.afmt[i] = false;
111 } 113 }
112 radeon_irq_set(rdev); 114 radeon_irq_set(rdev);
113} 115}
@@ -170,6 +172,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
170 int r = 0; 172 int r = 0;
171 173
172 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 174 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
175 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
173 176
174 spin_lock_init(&rdev->irq.sw_lock); 177 spin_lock_init(&rdev->irq.sw_lock);
175 for (i = 0; i < rdev->num_crtc; i++) 178 for (i = 0; i < rdev->num_crtc; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3c2628b14d56..f1016a5820d1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,8 +57,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
57 } 57 }
58 dev->dev_private = (void *)rdev; 58 dev->dev_private = (void *)rdev;
59 59
60 pci_set_master(dev->pdev);
61
62 /* update BUS flag */ 60 /* update BUS flag */
63 if (drm_pci_device_is_agp(dev)) { 61 if (drm_pci_device_is_agp(dev)) {
64 flags |= RADEON_IS_AGP; 62 flags |= RADEON_IS_AGP;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 42db254f6bb0..a0c82229e8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -369,6 +369,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
369 goto error; 369 goto error;
370 } 370 }
371 371
372 memset(&props, 0, sizeof(props));
372 props.max_brightness = MAX_RADEON_LEVEL; 373 props.max_brightness = MAX_RADEON_LEVEL;
373 props.type = BACKLIGHT_RAW; 374 props.type = BACKLIGHT_RAW;
374 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 375 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f7eb5d8b9fd3..5b10ffd7bb2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -210,6 +210,7 @@ enum radeon_connector_table {
210 CT_RN50_POWER, 210 CT_RN50_POWER,
211 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600, 212 CT_MAC_G5_9600,
213 CT_SAM440EP
213}; 214};
214 215
215enum radeon_dvo_chip { 216enum radeon_dvo_chip {
@@ -219,12 +220,20 @@ enum radeon_dvo_chip {
219 220
220struct radeon_fbdev; 221struct radeon_fbdev;
221 222
223struct radeon_afmt {
224 bool enabled;
225 int offset;
226 bool last_buffer_filled_status;
227 int id;
228};
229
222struct radeon_mode_info { 230struct radeon_mode_info {
223 struct atom_context *atom_context; 231 struct atom_context *atom_context;
224 struct card_info *atom_card_info; 232 struct card_info *atom_card_info;
225 enum radeon_connector_table connector_table; 233 enum radeon_connector_table connector_table;
226 bool mode_config_initialized; 234 bool mode_config_initialized;
227 struct radeon_crtc *crtcs[6]; 235 struct radeon_crtc *crtcs[6];
236 struct radeon_afmt *afmt[6];
228 /* DVI-I properties */ 237 /* DVI-I properties */
229 struct drm_property *coherent_mode_property; 238 struct drm_property *coherent_mode_property;
230 /* DAC enable load detect */ 239 /* DAC enable load detect */
@@ -363,6 +372,7 @@ struct radeon_encoder_atom_dig {
363 int dpms_mode; 372 int dpms_mode;
364 uint8_t backlight_level; 373 uint8_t backlight_level;
365 int panel_mode; 374 int panel_mode;
375 struct radeon_afmt *afmt;
366}; 376};
367 377
368struct radeon_encoder_atom_dac { 378struct radeon_encoder_atom_dac {
@@ -384,10 +394,6 @@ struct radeon_encoder {
384 struct drm_display_mode native_mode; 394 struct drm_display_mode native_mode;
385 void *enc_priv; 395 void *enc_priv;
386 int audio_polling_active; 396 int audio_polling_active;
387 int hdmi_offset;
388 int hdmi_config_offset;
389 int hdmi_audio_workaround;
390 int hdmi_buffer_status;
391 bool is_ext_encoder; 397 bool is_ext_encoder;
392 u16 caps; 398 u16 caps;
393}; 399};
@@ -476,6 +482,7 @@ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
476extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); 482extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
477extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); 483extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
478extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); 484extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
485extern int radeon_get_monitor_bpc(struct drm_connector *connector);
479 486
480extern void radeon_connector_hotplug(struct drm_connector *connector); 487extern void radeon_connector_hotplug(struct drm_connector *connector);
481extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, 488extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index df6a4dbd93f8..830f1a7b486f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -104,7 +104,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
104 104
105int radeon_bo_create(struct radeon_device *rdev, 105int radeon_bo_create(struct radeon_device *rdev,
106 unsigned long size, int byte_align, bool kernel, u32 domain, 106 unsigned long size, int byte_align, bool kernel, u32 domain,
107 struct radeon_bo **bo_ptr) 107 struct sg_table *sg, struct radeon_bo **bo_ptr)
108{ 108{
109 struct radeon_bo *bo; 109 struct radeon_bo *bo;
110 enum ttm_bo_type type; 110 enum ttm_bo_type type;
@@ -120,6 +120,8 @@ int radeon_bo_create(struct radeon_device *rdev,
120 } 120 }
121 if (kernel) { 121 if (kernel) {
122 type = ttm_bo_type_kernel; 122 type = ttm_bo_type_kernel;
123 } else if (sg) {
124 type = ttm_bo_type_sg;
123 } else { 125 } else {
124 type = ttm_bo_type_device; 126 type = ttm_bo_type_device;
125 } 127 }
@@ -155,7 +157,7 @@ retry:
155 mutex_lock(&rdev->vram_mutex); 157 mutex_lock(&rdev->vram_mutex);
156 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 158 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
157 &bo->placement, page_align, 0, !kernel, NULL, 159 &bo->placement, page_align, 0, !kernel, NULL,
158 acc_size, &radeon_ttm_bo_destroy); 160 acc_size, sg, &radeon_ttm_bo_destroy);
159 mutex_unlock(&rdev->vram_mutex); 161 mutex_unlock(&rdev->vram_mutex);
160 if (unlikely(r != 0)) { 162 if (unlikely(r != 0)) {
161 if (r != -ERESTARTSYS) { 163 if (r != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be88d7c..17fb99f177cf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -111,9 +111,10 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
111 bool no_wait); 111 bool no_wait);
112 112
113extern int radeon_bo_create(struct radeon_device *rdev, 113extern int radeon_bo_create(struct radeon_device *rdev,
114 unsigned long size, int byte_align, 114 unsigned long size, int byte_align,
115 bool kernel, u32 domain, 115 bool kernel, u32 domain,
116 struct radeon_bo **bo_ptr); 116 struct sg_table *sg,
117 struct radeon_bo **bo_ptr);
117extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 118extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
118extern void radeon_bo_kunmap(struct radeon_bo *bo); 119extern void radeon_bo_kunmap(struct radeon_bo *bo);
119extern void radeon_bo_unref(struct radeon_bo **bo); 120extern void radeon_bo_unref(struct radeon_bo **bo);
@@ -146,6 +147,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
146/* 147/*
147 * sub allocation 148 * sub allocation
148 */ 149 */
150
151static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
152{
153 return sa_bo->manager->gpu_addr + sa_bo->soffset;
154}
155
156static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
157{
158 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
159}
160
149extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, 161extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
150 struct radeon_sa_manager *sa_manager, 162 struct radeon_sa_manager *sa_manager,
151 unsigned size, u32 domain); 163 unsigned size, u32 domain);
@@ -157,9 +169,15 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
157 struct radeon_sa_manager *sa_manager); 169 struct radeon_sa_manager *sa_manager);
158extern int radeon_sa_bo_new(struct radeon_device *rdev, 170extern int radeon_sa_bo_new(struct radeon_device *rdev,
159 struct radeon_sa_manager *sa_manager, 171 struct radeon_sa_manager *sa_manager,
160 struct radeon_sa_bo *sa_bo, 172 struct radeon_sa_bo **sa_bo,
161 unsigned size, unsigned align); 173 unsigned size, unsigned align, bool block);
162extern void radeon_sa_bo_free(struct radeon_device *rdev, 174extern void radeon_sa_bo_free(struct radeon_device *rdev,
163 struct radeon_sa_bo *sa_bo); 175 struct radeon_sa_bo **sa_bo,
176 struct radeon_fence *fence);
177#if defined(CONFIG_DEBUG_FS)
178extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
179 struct seq_file *m);
180#endif
181
164 182
165#endif 183#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index caa55d68f319..08825548ee69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
252 252
253 mutex_lock(&rdev->ddev->struct_mutex); 253 mutex_lock(&rdev->ddev->struct_mutex);
254 mutex_lock(&rdev->vram_mutex); 254 mutex_lock(&rdev->vram_mutex);
255 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 255 mutex_lock(&rdev->ring_lock);
256 if (rdev->ring[i].ring_obj)
257 mutex_lock(&rdev->ring[i].mutex);
258 }
259 256
260 /* gui idle int has issues on older chips it seems */ 257 /* gui idle int has issues on older chips it seems */
261 if (rdev->family >= CHIP_R600) { 258 if (rdev->family >= CHIP_R600) {
@@ -273,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
273 } else { 270 } else {
274 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 271 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
275 if (ring->ready) { 272 if (ring->ready) {
276 struct radeon_fence *fence; 273 radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
277 radeon_ring_alloc(rdev, ring, 64);
278 radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
279 radeon_fence_emit(rdev, fence);
280 radeon_ring_commit(rdev, ring);
281 radeon_fence_wait(fence, false);
282 radeon_fence_unref(&fence);
283 } 274 }
284 } 275 }
285 radeon_unmap_vram_bos(rdev); 276 radeon_unmap_vram_bos(rdev);
@@ -311,10 +302,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
311 302
312 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 303 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
313 304
314 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 305 mutex_unlock(&rdev->ring_lock);
315 if (rdev->ring[i].ring_obj)
316 mutex_unlock(&rdev->ring[i].mutex);
317 }
318 mutex_unlock(&rdev->vram_mutex); 306 mutex_unlock(&rdev->vram_mutex);
319 mutex_unlock(&rdev->ddev->struct_mutex); 307 mutex_unlock(&rdev->ddev->struct_mutex);
320} 308}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 000000000000..b8f835d8ecb4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "radeon.h"
30#include "radeon_drm.h"
31
32#include <linux/dma-buf.h>
33
34static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
35 enum dma_data_direction dir)
36{
37 struct radeon_bo *bo = attachment->dmabuf->priv;
38 struct drm_device *dev = bo->rdev->ddev;
39 int npages = bo->tbo.num_pages;
40 struct sg_table *sg;
41 int nents;
42
43 mutex_lock(&dev->struct_mutex);
44 sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
45 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
46 mutex_unlock(&dev->struct_mutex);
47 return sg;
48}
49
50static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
51 struct sg_table *sg, enum dma_data_direction dir)
52{
53 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
54 sg_free_table(sg);
55 kfree(sg);
56}
57
58static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
59{
60 struct radeon_bo *bo = dma_buf->priv;
61
62 if (bo->gem_base.export_dma_buf == dma_buf) {
63 DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
64 bo->gem_base.export_dma_buf = NULL;
65 drm_gem_object_unreference_unlocked(&bo->gem_base);
66 }
67}
68
69static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
70{
71 return NULL;
72}
73
74static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
75{
76
77}
78static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
79{
80 return NULL;
81}
82
83static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
84{
85
86}
87
88const static struct dma_buf_ops radeon_dmabuf_ops = {
89 .map_dma_buf = radeon_gem_map_dma_buf,
90 .unmap_dma_buf = radeon_gem_unmap_dma_buf,
91 .release = radeon_gem_dmabuf_release,
92 .kmap = radeon_gem_kmap,
93 .kmap_atomic = radeon_gem_kmap_atomic,
94 .kunmap = radeon_gem_kunmap,
95 .kunmap_atomic = radeon_gem_kunmap_atomic,
96};
97
98static int radeon_prime_create(struct drm_device *dev,
99 size_t size,
100 struct sg_table *sg,
101 struct radeon_bo **pbo)
102{
103 struct radeon_device *rdev = dev->dev_private;
104 struct radeon_bo *bo;
105 int ret;
106
107 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
108 RADEON_GEM_DOMAIN_GTT, sg, pbo);
109 if (ret)
110 return ret;
111 bo = *pbo;
112 bo->gem_base.driver_private = bo;
113
114 mutex_lock(&rdev->gem.mutex);
115 list_add_tail(&bo->list, &rdev->gem.objects);
116 mutex_unlock(&rdev->gem.mutex);
117
118 return 0;
119}
120
121struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
122 struct drm_gem_object *obj,
123 int flags)
124{
125 struct radeon_bo *bo = gem_to_radeon_bo(obj);
126 int ret = 0;
127
128 /* pin buffer into GTT */
129 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
130 if (ret)
131 return ERR_PTR(ret);
132
133 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
134}
135
136struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
137 struct dma_buf *dma_buf)
138{
139 struct dma_buf_attachment *attach;
140 struct sg_table *sg;
141 struct radeon_bo *bo;
142 int ret;
143
144 if (dma_buf->ops == &radeon_dmabuf_ops) {
145 bo = dma_buf->priv;
146 if (bo->gem_base.dev == dev) {
147 drm_gem_object_reference(&bo->gem_base);
148 return &bo->gem_base;
149 }
150 }
151
152 /* need to attach */
153 attach = dma_buf_attach(dma_buf, dev->dev);
154 if (IS_ERR(attach))
155 return ERR_CAST(attach);
156
157 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
158 if (IS_ERR(sg)) {
159 ret = PTR_ERR(sg);
160 goto fail_detach;
161 }
162
163 ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
164 if (ret)
165 goto fail_unmap;
166
167 bo->gem_base.import_attach = attach;
168
169 return &bo->gem_base;
170
171fail_unmap:
172 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
173fail_detach:
174 dma_buf_detach(dma_buf, attach);
175 return ERR_PTR(ret);
176}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cc33b3d7c33b..493a7be75306 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 * Christian König
27 */ 28 */
28#include <linux/seq_file.h> 29#include <linux/seq_file.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -33,8 +34,10 @@
33#include "radeon.h" 34#include "radeon.h"
34#include "atom.h" 35#include "atom.h"
35 36
36int radeon_debugfs_ib_init(struct radeon_device *rdev); 37/*
37int radeon_debugfs_ring_init(struct radeon_device *rdev); 38 * IB.
39 */
40int radeon_debugfs_sa_init(struct radeon_device *rdev);
38 41
39u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 42u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{ 43{
@@ -61,123 +64,37 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
61 return idx_value; 64 return idx_value;
62} 65}
63 66
64void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65{
66#if DRM_DEBUG_CODE
67 if (ring->count_dw <= 0) {
68 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
71 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
75}
76
77/*
78 * IB.
79 */
80bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
81{
82 bool done = false;
83
84 /* only free ib which have been emited */
85 if (ib->fence && ib->fence->emitted) {
86 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
88 radeon_sa_bo_free(rdev, &ib->sa_bo);
89 done = true;
90 }
91 }
92 return done;
93}
94
95int radeon_ib_get(struct radeon_device *rdev, int ring, 67int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size) 68 struct radeon_ib *ib, unsigned size)
97{ 69{
98 struct radeon_fence *fence; 70 int r;
99 unsigned cretry = 0;
100 int r = 0, i, idx;
101
102 *ib = NULL;
103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
105 71
106 r = radeon_fence_create(rdev, &fence, ring); 72 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
107 if (r) { 73 if (r) {
108 dev_err(rdev->dev, "failed to create fence for new IB\n"); 74 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
109 return r; 75 return r;
110 } 76 }
111 77 r = radeon_fence_create(rdev, &ib->fence, ring);
112 radeon_mutex_lock(&rdev->ib_pool.mutex); 78 if (r) {
113 idx = rdev->ib_pool.head_id; 79 dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
114retry: 80 radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
115 if (cretry > 5) { 81 return r;
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117 radeon_mutex_unlock(&rdev->ib_pool.mutex);
118 radeon_fence_unref(&fence);
119 return -ENOMEM;
120 }
121 cretry++;
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 &rdev->ib_pool.ibs[idx].sa_bo,
127 size, 256);
128 if (!r) {
129 *ib = &rdev->ib_pool.ibs[idx];
130 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 (*ib)->fence = fence;
135 (*ib)->vm_id = 0;
136 (*ib)->is_const_ib = false;
137 /* ib are most likely to be allocated in a ring fashion
138 * thus rdev->ib_pool.head_id should be the id of the
139 * oldest ib
140 */
141 rdev->ib_pool.head_id = (1 + idx);
142 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
143 radeon_mutex_unlock(&rdev->ib_pool.mutex);
144 return 0;
145 }
146 }
147 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
148 }
149 /* this should be rare event, ie all ib scheduled none signaled yet.
150 */
151 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
152 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
153 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
154 if (!r) {
155 goto retry;
156 }
157 /* an error happened */
158 break;
159 }
160 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
161 } 82 }
162 radeon_mutex_unlock(&rdev->ib_pool.mutex); 83
163 radeon_fence_unref(&fence); 84 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
164 return r; 85 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
86 ib->vm_id = 0;
87 ib->is_const_ib = false;
88 ib->semaphore = NULL;
89
90 return 0;
165} 91}
166 92
167void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 93void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
168{ 94{
169 struct radeon_ib *tmp = *ib; 95 radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
170 96 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
171 *ib = NULL; 97 radeon_fence_unref(&ib->fence);
172 if (tmp == NULL) {
173 return;
174 }
175 radeon_mutex_lock(&rdev->ib_pool.mutex);
176 if (tmp->fence && !tmp->fence->emitted) {
177 radeon_sa_bo_free(rdev, &tmp->sa_bo);
178 radeon_fence_unref(&tmp->fence);
179 }
180 radeon_mutex_unlock(&rdev->ib_pool.mutex);
181} 98}
182 99
183int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 100int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -187,14 +104,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
187 104
188 if (!ib->length_dw || !ring->ready) { 105 if (!ib->length_dw || !ring->ready) {
189 /* TODO: Nothings in the ib we should report. */ 106 /* TODO: Nothings in the ib we should report. */
190 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 107 dev_err(rdev->dev, "couldn't schedule ib\n");
191 return -EINVAL; 108 return -EINVAL;
192 } 109 }
193 110
194 /* 64 dwords should be enough for fence too */ 111 /* 64 dwords should be enough for fence too */
195 r = radeon_ring_lock(rdev, ring, 64); 112 r = radeon_ring_lock(rdev, ring, 64);
196 if (r) { 113 if (r) {
197 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); 114 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
198 return r; 115 return r;
199 } 116 }
200 radeon_ring_ib_execute(rdev, ib->fence->ring, ib); 117 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -205,74 +122,90 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
205 122
206int radeon_ib_pool_init(struct radeon_device *rdev) 123int radeon_ib_pool_init(struct radeon_device *rdev)
207{ 124{
208 struct radeon_sa_manager tmp; 125 int r;
209 int i, r;
210 126
211 r = radeon_sa_bo_manager_init(rdev, &tmp, 127 if (rdev->ib_pool_ready) {
128 return 0;
129 }
130 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
212 RADEON_IB_POOL_SIZE*64*1024, 131 RADEON_IB_POOL_SIZE*64*1024,
213 RADEON_GEM_DOMAIN_GTT); 132 RADEON_GEM_DOMAIN_GTT);
214 if (r) { 133 if (r) {
215 return r; 134 return r;
216 } 135 }
217 136 rdev->ib_pool_ready = true;
218 radeon_mutex_lock(&rdev->ib_pool.mutex); 137 if (radeon_debugfs_sa_init(rdev)) {
219 if (rdev->ib_pool.ready) { 138 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
220 radeon_mutex_unlock(&rdev->ib_pool.mutex);
221 radeon_sa_bo_manager_fini(rdev, &tmp);
222 return 0;
223 }
224
225 rdev->ib_pool.sa_manager = tmp;
226 INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
227 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
228 rdev->ib_pool.ibs[i].fence = NULL;
229 rdev->ib_pool.ibs[i].idx = i;
230 rdev->ib_pool.ibs[i].length_dw = 0;
231 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
232 }
233 rdev->ib_pool.head_id = 0;
234 rdev->ib_pool.ready = true;
235 DRM_INFO("radeon: ib pool ready.\n");
236
237 if (radeon_debugfs_ib_init(rdev)) {
238 DRM_ERROR("Failed to register debugfs file for IB !\n");
239 }
240 if (radeon_debugfs_ring_init(rdev)) {
241 DRM_ERROR("Failed to register debugfs file for rings !\n");
242 } 139 }
243 radeon_mutex_unlock(&rdev->ib_pool.mutex);
244 return 0; 140 return 0;
245} 141}
246 142
247void radeon_ib_pool_fini(struct radeon_device *rdev) 143void radeon_ib_pool_fini(struct radeon_device *rdev)
248{ 144{
249 unsigned i; 145 if (rdev->ib_pool_ready) {
250 146 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
251 radeon_mutex_lock(&rdev->ib_pool.mutex); 147 rdev->ib_pool_ready = false;
252 if (rdev->ib_pool.ready) {
253 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
254 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
255 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
256 }
257 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
258 rdev->ib_pool.ready = false;
259 } 148 }
260 radeon_mutex_unlock(&rdev->ib_pool.mutex);
261} 149}
262 150
263int radeon_ib_pool_start(struct radeon_device *rdev) 151int radeon_ib_pool_start(struct radeon_device *rdev)
264{ 152{
265 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); 153 return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
266} 154}
267 155
268int radeon_ib_pool_suspend(struct radeon_device *rdev) 156int radeon_ib_pool_suspend(struct radeon_device *rdev)
269{ 157{
270 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); 158 return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
159}
160
161int radeon_ib_ring_tests(struct radeon_device *rdev)
162{
163 unsigned i;
164 int r;
165
166 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
167 struct radeon_ring *ring = &rdev->ring[i];
168
169 if (!ring->ready)
170 continue;
171
172 r = radeon_ib_test(rdev, i, ring);
173 if (r) {
174 ring->ready = false;
175
176 if (i == RADEON_RING_TYPE_GFX_INDEX) {
177 /* oh, oh, that's really bad */
178 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
179 rdev->accel_working = false;
180 return r;
181
182 } else {
183 /* still not good, but we can live with it */
184 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
185 }
186 }
187 }
188 return 0;
271} 189}
272 190
273/* 191/*
274 * Ring. 192 * Ring.
275 */ 193 */
194int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
195
196void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
197{
198#if DRM_DEBUG_CODE
199 if (ring->count_dw <= 0) {
200 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
201 }
202#endif
203 ring->ring[ring->wptr++] = v;
204 ring->wptr &= ring->ptr_mask;
205 ring->count_dw--;
206 ring->ring_free_dw--;
207}
208
276int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) 209int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
277{ 210{
278 /* r1xx-r5xx only has CP ring */ 211 /* r1xx-r5xx only has CP ring */
@@ -319,7 +252,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
319 if (ndw < ring->ring_free_dw) { 252 if (ndw < ring->ring_free_dw) {
320 break; 253 break;
321 } 254 }
322 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring)); 255 r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
323 if (r) 256 if (r)
324 return r; 257 return r;
325 } 258 }
@@ -332,10 +265,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
332{ 265{
333 int r; 266 int r;
334 267
335 mutex_lock(&ring->mutex); 268 mutex_lock(&rdev->ring_lock);
336 r = radeon_ring_alloc(rdev, ring, ndw); 269 r = radeon_ring_alloc(rdev, ring, ndw);
337 if (r) { 270 if (r) {
338 mutex_unlock(&ring->mutex); 271 mutex_unlock(&rdev->ring_lock);
339 return r; 272 return r;
340 } 273 }
341 return 0; 274 return 0;
@@ -360,13 +293,85 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
360void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 293void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
361{ 294{
362 radeon_ring_commit(rdev, ring); 295 radeon_ring_commit(rdev, ring);
363 mutex_unlock(&ring->mutex); 296 mutex_unlock(&rdev->ring_lock);
364} 297}
365 298
366void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) 299void radeon_ring_undo(struct radeon_ring *ring)
367{ 300{
368 ring->wptr = ring->wptr_old; 301 ring->wptr = ring->wptr_old;
369 mutex_unlock(&ring->mutex); 302}
303
304void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
305{
306 radeon_ring_undo(ring);
307 mutex_unlock(&rdev->ring_lock);
308}
309
310void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
311{
312 int r;
313
314 radeon_ring_free_size(rdev, ring);
315 if (ring->rptr == ring->wptr) {
316 r = radeon_ring_alloc(rdev, ring, 1);
317 if (!r) {
318 radeon_ring_write(ring, ring->nop);
319 radeon_ring_commit(rdev, ring);
320 }
321 }
322}
323
324void radeon_ring_lockup_update(struct radeon_ring *ring)
325{
326 ring->last_rptr = ring->rptr;
327 ring->last_activity = jiffies;
328}
329
330/**
331 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
332 * @rdev: radeon device structure
333 * @ring: radeon_ring structure holding ring information
334 *
335 * We don't need to initialize the lockup tracking information as we will either
336 * have CP rptr to a different value of jiffies wrap around which will force
337 * initialization of the lockup tracking informations.
338 *
339 * A possible false positivie is if we get call after while and last_cp_rptr ==
340 * the current CP rptr, even if it's unlikely it might happen. To avoid this
341 * if the elapsed time since last call is bigger than 2 second than we return
342 * false and update the tracking information. Due to this the caller must call
343 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
344 * the fencing code should be cautious about that.
345 *
346 * Caller should write to the ring to force CP to do something so we don't get
347 * false positive when CP is just gived nothing to do.
348 *
349 **/
350bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
351{
352 unsigned long cjiffies, elapsed;
353 uint32_t rptr;
354
355 cjiffies = jiffies;
356 if (!time_after(cjiffies, ring->last_activity)) {
357 /* likely a wrap around */
358 radeon_ring_lockup_update(ring);
359 return false;
360 }
361 rptr = RREG32(ring->rptr_reg);
362 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
363 if (ring->rptr != ring->last_rptr) {
364 /* CP is still working no lockup */
365 radeon_ring_lockup_update(ring);
366 return false;
367 }
368 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
369 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
370 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
371 return true;
372 }
373 /* give a chance to the GPU ... */
374 return false;
370} 375}
371 376
372int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, 377int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
@@ -385,8 +390,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
385 /* Allocate ring buffer */ 390 /* Allocate ring buffer */
386 if (ring->ring_obj == NULL) { 391 if (ring->ring_obj == NULL) {
387 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 392 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
388 RADEON_GEM_DOMAIN_GTT, 393 RADEON_GEM_DOMAIN_GTT,
389 &ring->ring_obj); 394 NULL, &ring->ring_obj);
390 if (r) { 395 if (r) {
391 dev_err(rdev->dev, "(%d) ring create failed\n", r); 396 dev_err(rdev->dev, "(%d) ring create failed\n", r);
392 return r; 397 return r;
@@ -411,6 +416,9 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
411 } 416 }
412 ring->ptr_mask = (ring->ring_size / 4) - 1; 417 ring->ptr_mask = (ring->ring_size / 4) - 1;
413 ring->ring_free_dw = ring->ring_size / 4; 418 ring->ring_free_dw = ring->ring_size / 4;
419 if (radeon_debugfs_ring_init(rdev, ring)) {
420 DRM_ERROR("Failed to register debugfs file for rings !\n");
421 }
414 return 0; 422 return 0;
415} 423}
416 424
@@ -419,11 +427,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
419 int r; 427 int r;
420 struct radeon_bo *ring_obj; 428 struct radeon_bo *ring_obj;
421 429
422 mutex_lock(&ring->mutex); 430 mutex_lock(&rdev->ring_lock);
423 ring_obj = ring->ring_obj; 431 ring_obj = ring->ring_obj;
432 ring->ready = false;
424 ring->ring = NULL; 433 ring->ring = NULL;
425 ring->ring_obj = NULL; 434 ring->ring_obj = NULL;
426 mutex_unlock(&ring->mutex); 435 mutex_unlock(&rdev->ring_lock);
427 436
428 if (ring_obj) { 437 if (ring_obj) {
429 r = radeon_bo_reserve(ring_obj, false); 438 r = radeon_bo_reserve(ring_obj, false);
@@ -476,59 +485,48 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
476 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 485 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
477}; 486};
478 487
479static int radeon_debugfs_ib_info(struct seq_file *m, void *data) 488static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
480{ 489{
481 struct drm_info_node *node = (struct drm_info_node *) m->private; 490 struct drm_info_node *node = (struct drm_info_node *) m->private;
482 struct drm_device *dev = node->minor->dev; 491 struct drm_device *dev = node->minor->dev;
483 struct radeon_device *rdev = dev->dev_private; 492 struct radeon_device *rdev = dev->dev_private;
484 struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
485 unsigned i;
486 493
487 if (ib == NULL) { 494 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
488 return 0; 495
489 }
490 seq_printf(m, "IB %04u\n", ib->idx);
491 seq_printf(m, "IB fence %p\n", ib->fence);
492 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
493 for (i = 0; i < ib->length_dw; i++) {
494 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
495 }
496 return 0; 496 return 0;
497
497} 498}
498 499
499static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 500static struct drm_info_list radeon_debugfs_sa_list[] = {
500static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 501 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
501static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE]; 502};
503
502#endif 504#endif
503 505
504int radeon_debugfs_ring_init(struct radeon_device *rdev) 506int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
505{ 507{
506#if defined(CONFIG_DEBUG_FS) 508#if defined(CONFIG_DEBUG_FS)
507 if (rdev->family >= CHIP_CAYMAN) 509 unsigned i;
508 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 510 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
509 ARRAY_SIZE(radeon_debugfs_ring_info_list)); 511 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
510 else 512 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
511 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1); 513 unsigned r;
512#else 514
513 return 0; 515 if (&rdev->ring[ridx] != ring)
516 continue;
517
518 r = radeon_debugfs_add_files(rdev, info, 1);
519 if (r)
520 return r;
521 }
514#endif 522#endif
523 return 0;
515} 524}
516 525
517int radeon_debugfs_ib_init(struct radeon_device *rdev) 526int radeon_debugfs_sa_init(struct radeon_device *rdev)
518{ 527{
519#if defined(CONFIG_DEBUG_FS) 528#if defined(CONFIG_DEBUG_FS)
520 unsigned i; 529 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
521
522 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
523 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
524 radeon_debugfs_ib_idx[i] = i;
525 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
526 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
527 radeon_debugfs_ib_list[i].driver_features = 0;
528 radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
529 }
530 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
531 RADEON_IB_POOL_SIZE);
532#else 530#else
533 return 0; 531 return 0;
534#endif 532#endif
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4cce47e7dc0d..32059b745728 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -27,23 +27,45 @@
27 * Authors: 27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org> 28 * Jerome Glisse <glisse@freedesktop.org>
29 */ 29 */
30/* Algorithm:
31 *
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
36 *
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
40 *
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
43 */
30#include "drmP.h" 44#include "drmP.h"
31#include "drm.h" 45#include "drm.h"
32#include "radeon.h" 46#include "radeon.h"
33 47
48static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
49static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
50
34int radeon_sa_bo_manager_init(struct radeon_device *rdev, 51int radeon_sa_bo_manager_init(struct radeon_device *rdev,
35 struct radeon_sa_manager *sa_manager, 52 struct radeon_sa_manager *sa_manager,
36 unsigned size, u32 domain) 53 unsigned size, u32 domain)
37{ 54{
38 int r; 55 int i, r;
39 56
57 spin_lock_init(&sa_manager->lock);
40 sa_manager->bo = NULL; 58 sa_manager->bo = NULL;
41 sa_manager->size = size; 59 sa_manager->size = size;
42 sa_manager->domain = domain; 60 sa_manager->domain = domain;
43 INIT_LIST_HEAD(&sa_manager->sa_bo); 61 sa_manager->hole = &sa_manager->olist;
62 INIT_LIST_HEAD(&sa_manager->olist);
63 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 }
44 66
45 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 67 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
46 RADEON_GEM_DOMAIN_CPU, &sa_manager->bo); 68 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
47 if (r) { 69 if (r) {
48 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
49 return r; 71 return r;
@@ -57,11 +79,15 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
57{ 79{
58 struct radeon_sa_bo *sa_bo, *tmp; 80 struct radeon_sa_bo *sa_bo, *tmp;
59 81
60 if (!list_empty(&sa_manager->sa_bo)) { 82 if (!list_empty(&sa_manager->olist)) {
61 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); 83 sa_manager->hole = &sa_manager->olist,
84 radeon_sa_bo_try_free(sa_manager);
85 if (!list_empty(&sa_manager->olist)) {
86 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
87 }
62 } 88 }
63 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) { 89 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
64 list_del_init(&sa_bo->list); 90 radeon_sa_bo_remove_locked(sa_bo);
65 } 91 }
66 radeon_bo_unref(&sa_manager->bo); 92 radeon_bo_unref(&sa_manager->bo);
67 sa_manager->size = 0; 93 sa_manager->size = 0;
@@ -113,77 +139,248 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
113 return r; 139 return r;
114} 140}
115 141
116/* 142static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
117 * Principe is simple, we keep a list of sub allocation in offset 143{
118 * order (first entry has offset == 0, last entry has the highest 144 struct radeon_sa_manager *sa_manager = sa_bo->manager;
119 * offset). 145 if (sa_manager->hole == &sa_bo->olist) {
120 * 146 sa_manager->hole = sa_bo->olist.prev;
121 * When allocating new object we first check if there is room at 147 }
122 * the end total_size - (last_object_offset + last_object_size) >= 148 list_del_init(&sa_bo->olist);
123 * alloc_size. If so we allocate new object there. 149 list_del_init(&sa_bo->flist);
124 * 150 radeon_fence_unref(&sa_bo->fence);
125 * When there is not enough room at the end, we start waiting for 151 kfree(sa_bo);
126 * each sub object until we reach object_offset+object_size >= 152}
127 * alloc_size, this object then become the sub object we return. 153
128 * 154static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
129 * Alignment can't be bigger than page size 155{
130 */ 156 struct radeon_sa_bo *sa_bo, *tmp;
157
158 if (sa_manager->hole->next == &sa_manager->olist)
159 return;
160
161 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
164 return;
165 }
166 radeon_sa_bo_remove_locked(sa_bo);
167 }
168}
169
170static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
171{
172 struct list_head *hole = sa_manager->hole;
173
174 if (hole != &sa_manager->olist) {
175 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
176 }
177 return 0;
178}
179
180static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
181{
182 struct list_head *hole = sa_manager->hole;
183
184 if (hole->next != &sa_manager->olist) {
185 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
186 }
187 return sa_manager->size;
188}
189
190static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
191 struct radeon_sa_bo *sa_bo,
192 unsigned size, unsigned align)
193{
194 unsigned soffset, eoffset, wasted;
195
196 soffset = radeon_sa_bo_hole_soffset(sa_manager);
197 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
198 wasted = (align - (soffset % align)) % align;
199
200 if ((eoffset - soffset) >= (size + wasted)) {
201 soffset += wasted;
202
203 sa_bo->manager = sa_manager;
204 sa_bo->soffset = soffset;
205 sa_bo->eoffset = soffset + size;
206 list_add(&sa_bo->olist, sa_manager->hole);
207 INIT_LIST_HEAD(&sa_bo->flist);
208 sa_manager->hole = &sa_bo->olist;
209 return true;
210 }
211 return false;
212}
213
214static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
215 struct radeon_fence **fences,
216 unsigned *tries)
217{
218 struct radeon_sa_bo *best_bo = NULL;
219 unsigned i, soffset, best, tmp;
220
221 /* if hole points to the end of the buffer */
222 if (sa_manager->hole->next == &sa_manager->olist) {
223 /* try again with its beginning */
224 sa_manager->hole = &sa_manager->olist;
225 return true;
226 }
227
228 soffset = radeon_sa_bo_hole_soffset(sa_manager);
229 /* to handle wrap around we add sa_manager->size */
230 best = sa_manager->size * 2;
231 /* go over all fence list and try to find the closest sa_bo
232 * of the current last
233 */
234 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
235 struct radeon_sa_bo *sa_bo;
236
237 if (list_empty(&sa_manager->flist[i])) {
238 continue;
239 }
240
241 sa_bo = list_first_entry(&sa_manager->flist[i],
242 struct radeon_sa_bo, flist);
243
244 if (!radeon_fence_signaled(sa_bo->fence)) {
245 fences[i] = sa_bo->fence;
246 continue;
247 }
248
249 /* limit the number of tries each ring gets */
250 if (tries[i] > 2) {
251 continue;
252 }
253
254 tmp = sa_bo->soffset;
255 if (tmp < soffset) {
256 /* wrap around, pretend it's after */
257 tmp += sa_manager->size;
258 }
259 tmp -= soffset;
260 if (tmp < best) {
261 /* this sa bo is the closest one */
262 best = tmp;
263 best_bo = sa_bo;
264 }
265 }
266
267 if (best_bo) {
268 ++tries[best_bo->fence->ring];
269 sa_manager->hole = best_bo->olist.prev;
270
271 /* we knew that this one is signaled,
272 so it's save to remote it */
273 radeon_sa_bo_remove_locked(best_bo);
274 return true;
275 }
276 return false;
277}
278
131int radeon_sa_bo_new(struct radeon_device *rdev, 279int radeon_sa_bo_new(struct radeon_device *rdev,
132 struct radeon_sa_manager *sa_manager, 280 struct radeon_sa_manager *sa_manager,
133 struct radeon_sa_bo *sa_bo, 281 struct radeon_sa_bo **sa_bo,
134 unsigned size, unsigned align) 282 unsigned size, unsigned align, bool block)
135{ 283{
136 struct radeon_sa_bo *tmp; 284 struct radeon_fence *fences[RADEON_NUM_RINGS];
137 struct list_head *head; 285 unsigned tries[RADEON_NUM_RINGS];
138 unsigned offset = 0, wasted = 0; 286 int i, r = -ENOMEM;
139 287
140 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 288 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
141 BUG_ON(size > sa_manager->size); 289 BUG_ON(size > sa_manager->size);
142 290
143 /* no one ? */ 291 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
144 head = sa_manager->sa_bo.prev; 292 if ((*sa_bo) == NULL) {
145 if (list_empty(&sa_manager->sa_bo)) { 293 return -ENOMEM;
146 goto out;
147 } 294 }
295 (*sa_bo)->manager = sa_manager;
296 (*sa_bo)->fence = NULL;
297 INIT_LIST_HEAD(&(*sa_bo)->olist);
298 INIT_LIST_HEAD(&(*sa_bo)->flist);
148 299
149 /* look for a hole big enough */ 300 spin_lock(&sa_manager->lock);
150 offset = 0; 301 do {
151 list_for_each_entry(tmp, &sa_manager->sa_bo, list) { 302 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
152 /* room before this object ? */ 303 fences[i] = NULL;
153 if ((tmp->offset - offset) >= size) { 304 tries[i] = 0;
154 head = tmp->list.prev;
155 goto out;
156 } 305 }
157 offset = tmp->offset + tmp->size; 306
158 wasted = offset % align; 307 do {
159 if (wasted) { 308 radeon_sa_bo_try_free(sa_manager);
160 wasted = align - wasted; 309
310 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
311 size, align)) {
312 spin_unlock(&sa_manager->lock);
313 return 0;
314 }
315
316 /* see if we can skip over some allocations */
317 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
318
319 if (block) {
320 spin_unlock(&sa_manager->lock);
321 r = radeon_fence_wait_any(rdev, fences, false);
322 spin_lock(&sa_manager->lock);
323 if (r) {
324 /* if we have nothing to wait for we
325 are practically out of memory */
326 if (r == -ENOENT) {
327 r = -ENOMEM;
328 }
329 goto out_err;
330 }
161 } 331 }
162 offset += wasted; 332 } while (block);
163 } 333
164 /* room at the end ? */ 334out_err:
165 head = sa_manager->sa_bo.prev; 335 spin_unlock(&sa_manager->lock);
166 tmp = list_entry(head, struct radeon_sa_bo, list); 336 kfree(*sa_bo);
167 offset = tmp->offset + tmp->size; 337 *sa_bo = NULL;
168 wasted = offset % align; 338 return r;
169 if (wasted) { 339}
170 wasted = align - wasted; 340
171 } 341void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
172 offset += wasted; 342 struct radeon_fence *fence)
173 if ((sa_manager->size - offset) < size) { 343{
174 /* failed to find somethings big enough */ 344 struct radeon_sa_manager *sa_manager;
175 return -ENOMEM; 345
346 if (sa_bo == NULL || *sa_bo == NULL) {
347 return;
176 } 348 }
177 349
178out: 350 sa_manager = (*sa_bo)->manager;
179 sa_bo->manager = sa_manager; 351 spin_lock(&sa_manager->lock);
180 sa_bo->offset = offset; 352 if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
181 sa_bo->size = size; 353 (*sa_bo)->fence = radeon_fence_ref(fence);
182 list_add(&sa_bo->list, head); 354 list_add_tail(&(*sa_bo)->flist,
183 return 0; 355 &sa_manager->flist[fence->ring]);
356 } else {
357 radeon_sa_bo_remove_locked(*sa_bo);
358 }
359 spin_unlock(&sa_manager->lock);
360 *sa_bo = NULL;
184} 361}
185 362
186void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo) 363#if defined(CONFIG_DEBUG_FS)
364void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
365 struct seq_file *m)
187{ 366{
188 list_del_init(&sa_bo->list); 367 struct radeon_sa_bo *i;
368
369 spin_lock(&sa_manager->lock);
370 list_for_each_entry(i, &sa_manager->olist, olist) {
371 if (&i->olist == sa_manager->hole) {
372 seq_printf(m, ">");
373 } else {
374 seq_printf(m, " ");
375 }
376 seq_printf(m, "[0x%08x 0x%08x] size %8d",
377 i->soffset, i->eoffset, i->eoffset - i->soffset);
378 if (i->fence) {
379 seq_printf(m, " protected by 0x%016llx on ring %d",
380 i->fence->seq, i->fence->ring);
381 }
382 seq_printf(m, "\n");
383 }
384 spin_unlock(&sa_manager->lock);
189} 385}
386#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c9209..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,148 +31,107 @@
31#include "drm.h" 31#include "drm.h"
32#include "radeon.h" 32#include "radeon.h"
33 33
34static int radeon_semaphore_add_bo(struct radeon_device *rdev)
35{
36 struct radeon_semaphore_bo *bo;
37 unsigned long irq_flags;
38 uint64_t gpu_addr;
39 uint32_t *cpu_ptr;
40 int r, i;
41
42
43 bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
44 if (bo == NULL) {
45 return -ENOMEM;
46 }
47 INIT_LIST_HEAD(&bo->free);
48 INIT_LIST_HEAD(&bo->list);
49 bo->nused = 0;
50
51 r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
52 if (r) {
53 dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
54 kfree(bo);
55 return r;
56 }
57 gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
58 gpu_addr += bo->ib->sa_bo.offset;
59 cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
60 cpu_ptr += (bo->ib->sa_bo.offset >> 2);
61 for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
62 bo->semaphores[i].gpu_addr = gpu_addr;
63 bo->semaphores[i].cpu_ptr = cpu_ptr;
64 bo->semaphores[i].bo = bo;
65 list_add_tail(&bo->semaphores[i].list, &bo->free);
66 gpu_addr += 8;
67 cpu_ptr += 2;
68 }
69 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
70 list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
71 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
72 return 0;
73}
74
75static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
76 struct radeon_semaphore_bo *bo)
77{
78 radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
79 radeon_fence_unref(&bo->ib->fence);
80 list_del(&bo->list);
81 kfree(bo);
82}
83
84void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
85{
86 struct radeon_semaphore_bo *bo, *n;
87
88 if (list_empty(&rdev->semaphore_drv.bo)) {
89 return;
90 }
91 /* only shrink if first bo has free semaphore */
92 bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
93 if (list_empty(&bo->free)) {
94 return;
95 }
96 list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
97 if (bo->nused)
98 continue;
99 radeon_semaphore_del_bo_locked(rdev, bo);
100 }
101}
102 34
103int radeon_semaphore_create(struct radeon_device *rdev, 35int radeon_semaphore_create(struct radeon_device *rdev,
104 struct radeon_semaphore **semaphore) 36 struct radeon_semaphore **semaphore)
105{ 37{
106 struct radeon_semaphore_bo *bo;
107 unsigned long irq_flags;
108 bool do_retry = true;
109 int r; 38 int r;
110 39
111retry: 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
112 *semaphore = NULL;
113 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
114 list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
115 if (list_empty(&bo->free))
116 continue;
117 *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
118 (*semaphore)->cpu_ptr[0] = 0;
119 (*semaphore)->cpu_ptr[1] = 0;
120 list_del(&(*semaphore)->list);
121 bo->nused++;
122 break;
123 }
124 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
125
126 if (*semaphore == NULL) { 41 if (*semaphore == NULL) {
127 if (do_retry) {
128 do_retry = false;
129 r = radeon_semaphore_add_bo(rdev);
130 if (r)
131 return r;
132 goto retry;
133 }
134 return -ENOMEM; 42 return -ENOMEM;
135 } 43 }
136 44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
45 &(*semaphore)->sa_bo, 8, 8, true);
46 if (r) {
47 kfree(*semaphore);
48 *semaphore = NULL;
49 return r;
50 }
51 (*semaphore)->waiters = 0;
52 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
53 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
137 return 0; 54 return 0;
138} 55}
139 56
140void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 57void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
141 struct radeon_semaphore *semaphore) 58 struct radeon_semaphore *semaphore)
142{ 59{
60 --semaphore->waiters;
143 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); 61 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
144} 62}
145 63
146void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 64void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
147 struct radeon_semaphore *semaphore) 65 struct radeon_semaphore *semaphore)
148{ 66{
67 ++semaphore->waiters;
149 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); 68 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
150} 69}
151 70
152void radeon_semaphore_free(struct radeon_device *rdev, 71int radeon_semaphore_sync_rings(struct radeon_device *rdev,
153 struct radeon_semaphore *semaphore) 72 struct radeon_semaphore *semaphore,
73 bool sync_to[RADEON_NUM_RINGS],
74 int dst_ring)
154{ 75{
155 unsigned long irq_flags; 76 int i = 0, r;
77
78 mutex_lock(&rdev->ring_lock);
79 r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
80 if (r) {
81 goto error;
82 }
83
84 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
85 /* no need to sync to our own or unused rings */
86 if (!sync_to[i] || i == dst_ring)
87 continue;
156 88
157 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags); 89 /* prevent GPU deadlocks */
158 semaphore->bo->nused--; 90 if (!rdev->ring[i].ready) {
159 list_add_tail(&semaphore->list, &semaphore->bo->free); 91 dev_err(rdev->dev, "Trying to sync to a disabled ring!");
160 radeon_semaphore_shrink_locked(rdev); 92 r = -EINVAL;
161 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags); 93 goto error;
94 }
95
96 r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
97 if (r) {
98 goto error;
99 }
100
101 radeon_semaphore_emit_signal(rdev, i, semaphore);
102 radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
103
104 radeon_ring_commit(rdev, &rdev->ring[i]);
105 }
106
107 radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
108 mutex_unlock(&rdev->ring_lock);
109
110 return 0;
111
112error:
113 /* unlock all locks taken so far */
114 for (--i; i >= 0; --i) {
115 if (sync_to[i] || i == dst_ring) {
116 radeon_ring_undo(&rdev->ring[i]);
117 }
118 }
119 radeon_ring_undo(&rdev->ring[dst_ring]);
120 mutex_unlock(&rdev->ring_lock);
121 return r;
162} 122}
163 123
164void radeon_semaphore_driver_fini(struct radeon_device *rdev) 124void radeon_semaphore_free(struct radeon_device *rdev,
125 struct radeon_semaphore *semaphore,
126 struct radeon_fence *fence)
165{ 127{
166 struct radeon_semaphore_bo *bo, *n; 128 if (semaphore == NULL) {
167 unsigned long irq_flags; 129 return;
168 130 }
169 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags); 131 if (semaphore->waiters > 0) {
170 /* we force to free everything */ 132 dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
171 list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) { 133 " hardware lockup imminent!\n", semaphore);
172 if (!list_empty(&bo->free)) {
173 dev_err(rdev->dev, "still in use semaphore\n");
174 }
175 radeon_semaphore_del_bo_locked(rdev, bo);
176 } 134 }
177 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags); 135 radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
136 kfree(semaphore);
178} 137}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf483aa3..efff929ea49d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -59,7 +59,7 @@ void radeon_test_moves(struct radeon_device *rdev)
59 } 59 }
60 60
61 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 61 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
62 &vram_obj); 62 NULL, &vram_obj);
63 if (r) { 63 if (r) {
64 DRM_ERROR("Failed to create VRAM object\n"); 64 DRM_ERROR("Failed to create VRAM object\n");
65 goto out_cleanup; 65 goto out_cleanup;
@@ -78,7 +78,7 @@ void radeon_test_moves(struct radeon_device *rdev)
78 void **vram_start, **vram_end; 78 void **vram_start, **vram_end;
79 79
80 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 80 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
81 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 81 RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
82 if (r) { 82 if (r) {
83 DRM_ERROR("Failed to create GTT object %d\n", i); 83 DRM_ERROR("Failed to create GTT object %d\n", i);
84 goto out_cleanup; 84 goto out_cleanup;
@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
317 317
318out_cleanup: 318out_cleanup:
319 if (semaphore) 319 if (semaphore)
320 radeon_semaphore_free(rdev, semaphore); 320 radeon_semaphore_free(rdev, semaphore, NULL);
321 321
322 if (fence1) 322 if (fence1)
323 radeon_fence_unref(&fence1); 323 radeon_fence_unref(&fence1);
@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
437 437
438out_cleanup: 438out_cleanup:
439 if (semaphore) 439 if (semaphore)
440 radeon_semaphore_free(rdev, semaphore); 440 radeon_semaphore_free(rdev, semaphore, NULL);
441 441
442 if (fenceA) 442 if (fenceA)
443 radeon_fence_unref(&fenceA); 443 radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c6403af5..c94a2257761f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,8 +222,9 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
222{ 222{
223 struct radeon_device *rdev; 223 struct radeon_device *rdev;
224 uint64_t old_start, new_start; 224 uint64_t old_start, new_start;
225 struct radeon_fence *fence; 225 struct radeon_fence *fence, *old_fence;
226 int r, i; 226 struct radeon_semaphore *sem = NULL;
227 int r;
227 228
228 rdev = radeon_get_rdev(bo->bdev); 229 rdev = radeon_get_rdev(bo->bdev);
229 r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); 230 r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
@@ -242,6 +243,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
242 break; 243 break;
243 default: 244 default:
244 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 245 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
246 radeon_fence_unref(&fence);
245 return -EINVAL; 247 return -EINVAL;
246 } 248 }
247 switch (new_mem->mem_type) { 249 switch (new_mem->mem_type) {
@@ -253,42 +255,36 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
253 break; 255 break;
254 default: 256 default:
255 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 257 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
258 radeon_fence_unref(&fence);
256 return -EINVAL; 259 return -EINVAL;
257 } 260 }
258 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { 261 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
259 DRM_ERROR("Trying to move memory with ring turned off.\n"); 262 DRM_ERROR("Trying to move memory with ring turned off.\n");
263 radeon_fence_unref(&fence);
260 return -EINVAL; 264 return -EINVAL;
261 } 265 }
262 266
263 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 267 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
264 268
265 /* sync other rings */ 269 /* sync other rings */
266 if (rdev->family >= CHIP_R600) { 270 old_fence = bo->sync_obj;
267 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 271 if (old_fence && old_fence->ring != fence->ring
268 /* no need to sync to our own or unused rings */ 272 && !radeon_fence_signaled(old_fence)) {
269 if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready) 273 bool sync_to_ring[RADEON_NUM_RINGS] = { };
270 continue; 274 sync_to_ring[old_fence->ring] = true;
271 275
272 if (!fence->semaphore) { 276 r = radeon_semaphore_create(rdev, &sem);
273 r = radeon_semaphore_create(rdev, &fence->semaphore); 277 if (r) {
274 /* FIXME: handle semaphore error */ 278 radeon_fence_unref(&fence);
275 if (r) 279 return r;
276 continue; 280 }
277 }
278 281
279 r = radeon_ring_lock(rdev, &rdev->ring[i], 3); 282 r = radeon_semaphore_sync_rings(rdev, sem,
280 /* FIXME: handle ring lock error */ 283 sync_to_ring, fence->ring);
281 if (r) 284 if (r) {
282 continue; 285 radeon_semaphore_free(rdev, sem, NULL);
283 radeon_semaphore_emit_signal(rdev, i, fence->semaphore); 286 radeon_fence_unref(&fence);
284 radeon_ring_unlock_commit(rdev, &rdev->ring[i]); 287 return r;
285
286 r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
287 /* FIXME: handle ring lock error */
288 if (r)
289 continue;
290 radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
291 radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
292 } 288 }
293 } 289 }
294 290
@@ -298,6 +294,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
298 /* FIXME: handle copy error */ 294 /* FIXME: handle copy error */
299 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 295 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
300 evict, no_wait_reserve, no_wait_gpu, new_mem); 296 evict, no_wait_reserve, no_wait_gpu, new_mem);
297 radeon_semaphore_free(rdev, sem, fence);
301 radeon_fence_unref(&fence); 298 radeon_fence_unref(&fence);
302 return r; 299 return r;
303} 300}
@@ -614,10 +611,18 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
614 struct radeon_ttm_tt *gtt = (void *)ttm; 611 struct radeon_ttm_tt *gtt = (void *)ttm;
615 unsigned i; 612 unsigned i;
616 int r; 613 int r;
614 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
617 615
618 if (ttm->state != tt_unpopulated) 616 if (ttm->state != tt_unpopulated)
619 return 0; 617 return 0;
620 618
619 if (slave && ttm->sg) {
620 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
621 gtt->ttm.dma_address, ttm->num_pages);
622 ttm->state = tt_unbound;
623 return 0;
624 }
625
621 rdev = radeon_get_rdev(ttm->bdev); 626 rdev = radeon_get_rdev(ttm->bdev);
622#if __OS_HAS_AGP 627#if __OS_HAS_AGP
623 if (rdev->flags & RADEON_IS_AGP) { 628 if (rdev->flags & RADEON_IS_AGP) {
@@ -658,6 +663,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
658 struct radeon_device *rdev; 663 struct radeon_device *rdev;
659 struct radeon_ttm_tt *gtt = (void *)ttm; 664 struct radeon_ttm_tt *gtt = (void *)ttm;
660 unsigned i; 665 unsigned i;
666 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
667
668 if (slave)
669 return;
661 670
662 rdev = radeon_get_rdev(ttm->bdev); 671 rdev = radeon_get_rdev(ttm->bdev);
663#if __OS_HAS_AGP 672#if __OS_HAS_AGP
@@ -729,8 +738,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
729 return r; 738 return r;
730 } 739 }
731 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, 740 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
732 RADEON_GEM_DOMAIN_VRAM, 741 RADEON_GEM_DOMAIN_VRAM,
733 &rdev->stollen_vga_memory); 742 NULL, &rdev->stollen_vga_memory);
734 if (r) { 743 if (r) {
735 return r; 744 return r;
736 } 745 }
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4cf381b3a6d8..a464eb5e2df2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,12 +430,9 @@ static int rs400_startup(struct radeon_device *rdev)
430 if (r) 430 if (r)
431 return r; 431 return r;
432 432
433 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 433 r = radeon_ib_ring_tests(rdev);
434 if (r) { 434 if (r)
435 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
436 rdev->accel_working = false;
437 return r; 435 return r;
438 }
439 436
440 return 0; 437 return 0;
441} 438}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d25cf869d08d..25f9eef12c42 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,6 @@ int rs600_asic_reset(struct radeon_device *rdev)
396 /* Check if GPU is idle */ 396 /* Check if GPU is idle */
397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
398 dev_err(rdev->dev, "failed to reset GPU\n"); 398 dev_err(rdev->dev, "failed to reset GPU\n");
399 rdev->gpu_lockup = true;
400 ret = -1; 399 ret = -1;
401 } else 400 } else
402 dev_info(rdev->dev, "GPU reset succeed\n"); 401 dev_info(rdev->dev, "GPU reset succeed\n");
@@ -553,6 +552,12 @@ int rs600_irq_set(struct radeon_device *rdev)
553 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 552 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
554 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 553 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
555 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 554 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
555 u32 hdmi0;
556 if (ASIC_IS_DCE2(rdev))
557 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
558 ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
559 else
560 hdmi0 = 0;
556 561
557 if (!rdev->irq.installed) { 562 if (!rdev->irq.installed) {
558 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 563 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -579,10 +584,15 @@ int rs600_irq_set(struct radeon_device *rdev)
579 if (rdev->irq.hpd[1]) { 584 if (rdev->irq.hpd[1]) {
580 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 585 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
581 } 586 }
587 if (rdev->irq.afmt[0]) {
588 hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
589 }
582 WREG32(R_000040_GEN_INT_CNTL, tmp); 590 WREG32(R_000040_GEN_INT_CNTL, tmp);
583 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 591 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
584 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 592 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
585 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 593 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
594 if (ASIC_IS_DCE2(rdev))
595 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
586 return 0; 596 return 0;
587} 597}
588 598
@@ -622,6 +632,17 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
622 rdev->irq.stat_regs.r500.disp_int = 0; 632 rdev->irq.stat_regs.r500.disp_int = 0;
623 } 633 }
624 634
635 if (ASIC_IS_DCE2(rdev)) {
636 rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
637 S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
638 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
639 tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
640 tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
641 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
642 }
643 } else
644 rdev->irq.stat_regs.r500.hdmi0_status = 0;
645
625 if (irqs) { 646 if (irqs) {
626 WREG32(R_000044_GEN_INT_STATUS, irqs); 647 WREG32(R_000044_GEN_INT_STATUS, irqs);
627 } 648 }
@@ -630,6 +651,9 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
630 651
631void rs600_irq_disable(struct radeon_device *rdev) 652void rs600_irq_disable(struct radeon_device *rdev)
632{ 653{
654 u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
655 ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
656 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
633 WREG32(R_000040_GEN_INT_CNTL, 0); 657 WREG32(R_000040_GEN_INT_CNTL, 0);
634 WREG32(R_006540_DxMODE_INT_MASK, 0); 658 WREG32(R_006540_DxMODE_INT_MASK, 0);
635 /* Wait and acknowledge irq */ 659 /* Wait and acknowledge irq */
@@ -641,15 +665,20 @@ int rs600_irq_process(struct radeon_device *rdev)
641{ 665{
642 u32 status, msi_rearm; 666 u32 status, msi_rearm;
643 bool queue_hotplug = false; 667 bool queue_hotplug = false;
668 bool queue_hdmi = false;
644 669
645 /* reset gui idle ack. the status bit is broken */ 670 /* reset gui idle ack. the status bit is broken */
646 rdev->irq.gui_idle_acked = false; 671 rdev->irq.gui_idle_acked = false;
647 672
648 status = rs600_irq_ack(rdev); 673 status = rs600_irq_ack(rdev);
649 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 674 if (!status &&
675 !rdev->irq.stat_regs.r500.disp_int &&
676 !rdev->irq.stat_regs.r500.hdmi0_status) {
650 return IRQ_NONE; 677 return IRQ_NONE;
651 } 678 }
652 while (status || rdev->irq.stat_regs.r500.disp_int) { 679 while (status ||
680 rdev->irq.stat_regs.r500.disp_int ||
681 rdev->irq.stat_regs.r500.hdmi0_status) {
653 /* SW interrupt */ 682 /* SW interrupt */
654 if (G_000044_SW_INT(status)) { 683 if (G_000044_SW_INT(status)) {
655 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 684 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -687,12 +716,18 @@ int rs600_irq_process(struct radeon_device *rdev)
687 queue_hotplug = true; 716 queue_hotplug = true;
688 DRM_DEBUG("HPD2\n"); 717 DRM_DEBUG("HPD2\n");
689 } 718 }
719 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
720 queue_hdmi = true;
721 DRM_DEBUG("HDMI0\n");
722 }
690 status = rs600_irq_ack(rdev); 723 status = rs600_irq_ack(rdev);
691 } 724 }
692 /* reset gui idle ack. the status bit is broken */ 725 /* reset gui idle ack. the status bit is broken */
693 rdev->irq.gui_idle_acked = false; 726 rdev->irq.gui_idle_acked = false;
694 if (queue_hotplug) 727 if (queue_hotplug)
695 schedule_work(&rdev->hotplug_work); 728 schedule_work(&rdev->hotplug_work);
729 if (queue_hdmi)
730 schedule_work(&rdev->audio_work);
696 if (rdev->msi_enabled) { 731 if (rdev->msi_enabled) {
697 switch (rdev->family) { 732 switch (rdev->family) {
698 case CHIP_RS600: 733 case CHIP_RS600:
@@ -883,12 +918,9 @@ static int rs600_startup(struct radeon_device *rdev)
883 if (r) 918 if (r)
884 return r; 919 return r;
885 920
886 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 921 r = radeon_ib_ring_tests(rdev);
887 if (r) { 922 if (r)
888 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
889 rdev->accel_working = false;
890 return r; 923 return r;
891 }
892 924
893 return 0; 925 return 0;
894} 926}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index a27c13ac47c3..f1f89414dc63 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -485,6 +485,20 @@
485#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16) 485#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
486#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1) 486#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
487#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF 487#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
488#define R_007404_HDMI0_STATUS 0x007404
489#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
490#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
491#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
492#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
493#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
494#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
495#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
496#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
497#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
498#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
499#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
500#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
501#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
488 502
489/* MC registers */ 503/* MC registers */
490#define R_000000_MC_STATUS 0x000000 504#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d75f18..3277ddecfe9f 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -647,12 +647,9 @@ static int rs690_startup(struct radeon_device *rdev)
647 if (r) 647 if (r)
648 return r; 648 return r;
649 649
650 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 650 r = radeon_ib_ring_tests(rdev);
651 if (r) { 651 if (r)
652 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
653 rdev->accel_working = false;
654 return r; 652 return r;
655 }
656 653
657 return 0; 654 return 0;
658} 655}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index d8d78fe17946..7f08cedb5333 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -412,12 +412,10 @@ static int rv515_startup(struct radeon_device *rdev)
412 if (r) 412 if (r)
413 return r; 413 return r;
414 414
415 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 415 r = radeon_ib_ring_tests(rdev);
416 if (r) { 416 if (r)
417 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
418 rdev->accel_working = false;
419 return r; 417 return r;
420 } 418
421 return 0; 419 return 0;
422} 420}
423 421
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cdab1aeaed6e..c2f473bc13b8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1114,12 +1114,9 @@ static int rv770_startup(struct radeon_device *rdev)
1114 if (r) 1114 if (r)
1115 return r; 1115 return r;
1116 1116
1117 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1117 r = radeon_ib_ring_tests(rdev);
1118 if (r) { 1118 if (r)
1119 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1120 rdev->accel_working = false;
1121 return r; 1119 return r;
1122 }
1123 1120
1124 return 0; 1121 return 0;
1125} 1122}
@@ -1178,10 +1175,6 @@ int rv770_init(struct radeon_device *rdev)
1178{ 1175{
1179 int r; 1176 int r;
1180 1177
1181 /* This don't do much */
1182 r = radeon_gem_init(rdev);
1183 if (r)
1184 return r;
1185 /* Read BIOS */ 1178 /* Read BIOS */
1186 if (!radeon_get_bios(rdev)) { 1179 if (!radeon_get_bios(rdev)) {
1187 if (ASIC_IS_AVIVO(rdev)) 1180 if (ASIC_IS_AVIVO(rdev))
@@ -1281,7 +1274,6 @@ void rv770_fini(struct radeon_device *rdev)
1281 rv770_pcie_gart_fini(rdev); 1274 rv770_pcie_gart_fini(rdev);
1282 r600_vram_scratch_fini(rdev); 1275 r600_vram_scratch_fini(rdev);
1283 radeon_gem_fini(rdev); 1276 radeon_gem_fini(rdev);
1284 radeon_semaphore_driver_fini(rdev);
1285 radeon_fence_driver_fini(rdev); 1277 radeon_fence_driver_fini(rdev);
1286 radeon_agp_fini(rdev); 1278 radeon_agp_fini(rdev);
1287 radeon_bo_fini(rdev); 1279 radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 79fa588e9ed5..9c549f702f2f 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -353,6 +353,197 @@
353 353
354#define SRBM_STATUS 0x0E50 354#define SRBM_STATUS 0x0E50
355 355
356/* DCE 3.2 HDMI */
357#define HDMI_CONTROL 0x7400
358# define HDMI_KEEPOUT_MODE (1 << 0)
359# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
360# define HDMI_ERROR_ACK (1 << 8)
361# define HDMI_ERROR_MASK (1 << 9)
362#define HDMI_STATUS 0x7404
363# define HDMI_ACTIVE_AVMUTE (1 << 0)
364# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
365# define HDMI_VBI_PACKET_ERROR (1 << 20)
366#define HDMI_AUDIO_PACKET_CONTROL 0x7408
367# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
368# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
369#define HDMI_ACR_PACKET_CONTROL 0x740c
370# define HDMI_ACR_SEND (1 << 0)
371# define HDMI_ACR_CONT (1 << 1)
372# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
373# define HDMI_ACR_HW 0
374# define HDMI_ACR_32 1
375# define HDMI_ACR_44 2
376# define HDMI_ACR_48 3
377# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
378# define HDMI_ACR_AUTO_SEND (1 << 12)
379#define HDMI_VBI_PACKET_CONTROL 0x7410
380# define HDMI_NULL_SEND (1 << 0)
381# define HDMI_GC_SEND (1 << 4)
382# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
383#define HDMI_INFOFRAME_CONTROL0 0x7414
384# define HDMI_AVI_INFO_SEND (1 << 0)
385# define HDMI_AVI_INFO_CONT (1 << 1)
386# define HDMI_AUDIO_INFO_SEND (1 << 4)
387# define HDMI_AUDIO_INFO_CONT (1 << 5)
388# define HDMI_MPEG_INFO_SEND (1 << 8)
389# define HDMI_MPEG_INFO_CONT (1 << 9)
390#define HDMI_INFOFRAME_CONTROL1 0x7418
391# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
392# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
393# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
394#define HDMI_GENERIC_PACKET_CONTROL 0x741c
395# define HDMI_GENERIC0_SEND (1 << 0)
396# define HDMI_GENERIC0_CONT (1 << 1)
397# define HDMI_GENERIC1_SEND (1 << 4)
398# define HDMI_GENERIC1_CONT (1 << 5)
399# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
400# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
401#define HDMI_GC 0x7428
402# define HDMI_GC_AVMUTE (1 << 0)
403#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
404# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
405# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
406# define AFMT_60958_CS_SOURCE (1 << 4)
407# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
408# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
409#define AFMT_AVI_INFO0 0x7454
410# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
411# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
412# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
413# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
414# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
415# define AFMT_AVI_INFO_Y_RGB 0
416# define AFMT_AVI_INFO_Y_YCBCR422 1
417# define AFMT_AVI_INFO_Y_YCBCR444 2
418# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
419# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
420# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
421# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
422# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
423# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
424# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
425# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
426# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
427# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
428#define AFMT_AVI_INFO1 0x7458
429# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
430# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
431# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
432#define AFMT_AVI_INFO2 0x745c
433# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
434# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
435#define AFMT_AVI_INFO3 0x7460
436# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
437# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
438#define AFMT_MPEG_INFO0 0x7464
439# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
440# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
441# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
442# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
443#define AFMT_MPEG_INFO1 0x7468
444# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
445# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
446# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
447#define AFMT_GENERIC0_HDR 0x746c
448#define AFMT_GENERIC0_0 0x7470
449#define AFMT_GENERIC0_1 0x7474
450#define AFMT_GENERIC0_2 0x7478
451#define AFMT_GENERIC0_3 0x747c
452#define AFMT_GENERIC0_4 0x7480
453#define AFMT_GENERIC0_5 0x7484
454#define AFMT_GENERIC0_6 0x7488
455#define AFMT_GENERIC1_HDR 0x748c
456#define AFMT_GENERIC1_0 0x7490
457#define AFMT_GENERIC1_1 0x7494
458#define AFMT_GENERIC1_2 0x7498
459#define AFMT_GENERIC1_3 0x749c
460#define AFMT_GENERIC1_4 0x74a0
461#define AFMT_GENERIC1_5 0x74a4
462#define AFMT_GENERIC1_6 0x74a8
463#define HDMI_ACR_32_0 0x74ac
464# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
465#define HDMI_ACR_32_1 0x74b0
466# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
467#define HDMI_ACR_44_0 0x74b4
468# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
469#define HDMI_ACR_44_1 0x74b8
470# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
471#define HDMI_ACR_48_0 0x74bc
472# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
473#define HDMI_ACR_48_1 0x74c0
474# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
475#define HDMI_ACR_STATUS_0 0x74c4
476#define HDMI_ACR_STATUS_1 0x74c8
477#define AFMT_AUDIO_INFO0 0x74cc
478# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
479# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
480# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
481#define AFMT_AUDIO_INFO1 0x74d0
482# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
483# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
484# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
485# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
486#define AFMT_60958_0 0x74d4
487# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
488# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
489# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
490# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
491# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
492# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
493# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
494# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
495# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
496# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
497#define AFMT_60958_1 0x74d8
498# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
499# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
500# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
501# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
502# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
503#define AFMT_AUDIO_CRC_CONTROL 0x74dc
504# define AFMT_AUDIO_CRC_EN (1 << 0)
505#define AFMT_RAMP_CONTROL0 0x74e0
506# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
507# define AFMT_RAMP_DATA_SIGN (1 << 31)
508#define AFMT_RAMP_CONTROL1 0x74e4
509# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
510# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
511#define AFMT_RAMP_CONTROL2 0x74e8
512# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
513#define AFMT_RAMP_CONTROL3 0x74ec
514# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
515#define AFMT_60958_2 0x74f0
516# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
517# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
518# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
519# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
520# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
521# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
522#define AFMT_STATUS 0x7600
523# define AFMT_AUDIO_ENABLE (1 << 4)
524# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
525# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
526# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
527#define AFMT_AUDIO_PACKET_CONTROL 0x7604
528# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
529# define AFMT_AUDIO_TEST_EN (1 << 12)
530# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
531# define AFMT_60958_CS_UPDATE (1 << 26)
532# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
533# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
534# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
535# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
536#define AFMT_VBI_PACKET_CONTROL 0x7608
537# define AFMT_GENERIC0_UPDATE (1 << 2)
538#define AFMT_INFOFRAME_CONTROL0 0x760c
539# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
540# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
541# define AFMT_MPEG_INFO_UPDATE (1 << 10)
542#define AFMT_GENERIC0_7 0x7610
543/* second instance starts at 0x7800 */
544#define HDMI_OFFSET0 (0x7400 - 0x7400)
545#define HDMI_OFFSET1 (0x7800 - 0x7400)
546
356#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 547#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
357#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 548#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
358#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 549#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 27bda986fc2b..549732e56ca9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2217,8 +2217,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2217 u32 srbm_status; 2217 u32 srbm_status;
2218 u32 grbm_status, grbm_status2; 2218 u32 grbm_status, grbm_status2;
2219 u32 grbm_status_se0, grbm_status_se1; 2219 u32 grbm_status_se0, grbm_status_se1;
2220 struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
2221 int r;
2222 2220
2223 srbm_status = RREG32(SRBM_STATUS); 2221 srbm_status = RREG32(SRBM_STATUS);
2224 grbm_status = RREG32(GRBM_STATUS); 2222 grbm_status = RREG32(GRBM_STATUS);
@@ -2226,20 +2224,12 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2226 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2224 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2227 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2225 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2228 if (!(grbm_status & GUI_ACTIVE)) { 2226 if (!(grbm_status & GUI_ACTIVE)) {
2229 r100_gpu_lockup_update(lockup, ring); 2227 radeon_ring_lockup_update(ring);
2230 return false; 2228 return false;
2231 } 2229 }
2232 /* force CP activities */ 2230 /* force CP activities */
2233 r = radeon_ring_lock(rdev, ring, 2); 2231 radeon_ring_force_activity(rdev, ring);
2234 if (!r) { 2232 return radeon_ring_test_lockup(rdev, ring);
2235 /* PACKET2 NOP */
2236 radeon_ring_write(ring, 0x80000000);
2237 radeon_ring_write(ring, 0x80000000);
2238 radeon_ring_unlock_commit(rdev, ring);
2239 }
2240 /* XXX deal with CP0,1,2 */
2241 ring->rptr = RREG32(ring->rptr_reg);
2242 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
2243} 2233}
2244 2234
2245static int si_gpu_soft_reset(struct radeon_device *rdev) 2235static int si_gpu_soft_reset(struct radeon_device *rdev)
@@ -2275,6 +2265,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2275 SOFT_RESET_GDS | 2265 SOFT_RESET_GDS |
2276 SOFT_RESET_PA | 2266 SOFT_RESET_PA |
2277 SOFT_RESET_SC | 2267 SOFT_RESET_SC |
2268 SOFT_RESET_BCI |
2278 SOFT_RESET_SPI | 2269 SOFT_RESET_SPI |
2279 SOFT_RESET_SX | 2270 SOFT_RESET_SX |
2280 SOFT_RESET_TC | 2271 SOFT_RESET_TC |
@@ -2985,7 +2976,8 @@ int si_rlc_init(struct radeon_device *rdev)
2985 /* save restore block */ 2976 /* save restore block */
2986 if (rdev->rlc.save_restore_obj == NULL) { 2977 if (rdev->rlc.save_restore_obj == NULL) {
2987 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 2978 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
2988 RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj); 2979 RADEON_GEM_DOMAIN_VRAM, NULL,
2980 &rdev->rlc.save_restore_obj);
2989 if (r) { 2981 if (r) {
2990 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 2982 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
2991 return r; 2983 return r;
@@ -3009,7 +3001,8 @@ int si_rlc_init(struct radeon_device *rdev)
3009 /* clear state block */ 3001 /* clear state block */
3010 if (rdev->rlc.clear_state_obj == NULL) { 3002 if (rdev->rlc.clear_state_obj == NULL) {
3011 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 3003 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
3012 RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj); 3004 RADEON_GEM_DOMAIN_VRAM, NULL,
3005 &rdev->rlc.clear_state_obj);
3013 if (r) { 3006 if (r) {
3014 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 3007 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
3015 si_rlc_fini(rdev); 3008 si_rlc_fini(rdev);
@@ -3216,6 +3209,8 @@ static int si_irq_init(struct radeon_device *rdev)
3216 /* force the active interrupt state to all disabled */ 3209 /* force the active interrupt state to all disabled */
3217 si_disable_interrupt_state(rdev); 3210 si_disable_interrupt_state(rdev);
3218 3211
3212 pci_set_master(rdev->pdev);
3213
3219 /* enable irqs */ 3214 /* enable irqs */
3220 si_enable_interrupts(rdev); 3215 si_enable_interrupts(rdev);
3221 3216
@@ -3994,10 +3989,6 @@ int si_init(struct radeon_device *rdev)
3994 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3989 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3995 int r; 3990 int r;
3996 3991
3997 /* This don't do much */
3998 r = radeon_gem_init(rdev);
3999 if (r)
4000 return r;
4001 /* Read BIOS */ 3992 /* Read BIOS */
4002 if (!radeon_get_bios(rdev)) { 3993 if (!radeon_get_bios(rdev)) {
4003 if (ASIC_IS_AVIVO(rdev)) 3994 if (ASIC_IS_AVIVO(rdev))
@@ -4117,7 +4108,6 @@ void si_fini(struct radeon_device *rdev)
4117 si_pcie_gart_fini(rdev); 4108 si_pcie_gart_fini(rdev);
4118 r600_vram_scratch_fini(rdev); 4109 r600_vram_scratch_fini(rdev);
4119 radeon_gem_fini(rdev); 4110 radeon_gem_fini(rdev);
4120 radeon_semaphore_driver_fini(rdev);
4121 radeon_fence_driver_fini(rdev); 4111 radeon_fence_driver_fini(rdev);
4122 radeon_bo_fini(rdev); 4112 radeon_bo_fini(rdev);
4123 radeon_atombios_fini(rdev); 4113 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index cb1ee4e0050a..6eb507a5d130 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -735,7 +735,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
735 return -EINVAL; 735 return -EINVAL;
736 } 736 }
737 drm_core_ioremap(dev->agp_buffer_map, dev); 737 drm_core_ioremap(dev->agp_buffer_map, dev);
738 if (!dev->agp_buffer_map) { 738 if (!dev->agp_buffer_map->handle) {
739 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 739 DRM_ERROR("failed to ioremap DMA buffer region!\n");
740 savage_do_cleanup_bci(dev); 740 savage_do_cleanup_bci(dev);
741 return -ENOMEM; 741 return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1f5c67c579cf..36792bd4da77 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -343,6 +343,16 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
343 if (unlikely(bo->ttm == NULL)) 343 if (unlikely(bo->ttm == NULL))
344 ret = -ENOMEM; 344 ret = -ENOMEM;
345 break; 345 break;
346 case ttm_bo_type_sg:
347 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
348 page_flags | TTM_PAGE_FLAG_SG,
349 glob->dummy_read_page);
350 if (unlikely(bo->ttm == NULL)) {
351 ret = -ENOMEM;
352 break;
353 }
354 bo->ttm->sg = bo->sg;
355 break;
346 default: 356 default:
347 pr_err("Illegal buffer object type\n"); 357 pr_err("Illegal buffer object type\n");
348 ret = -EINVAL; 358 ret = -EINVAL;
@@ -1169,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1169 bool interruptible, 1179 bool interruptible,
1170 struct file *persistent_swap_storage, 1180 struct file *persistent_swap_storage,
1171 size_t acc_size, 1181 size_t acc_size,
1182 struct sg_table *sg,
1172 void (*destroy) (struct ttm_buffer_object *)) 1183 void (*destroy) (struct ttm_buffer_object *))
1173{ 1184{
1174 int ret = 0; 1185 int ret = 0;
@@ -1223,6 +1234,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1223 bo->seq_valid = false; 1234 bo->seq_valid = false;
1224 bo->persistent_swap_storage = persistent_swap_storage; 1235 bo->persistent_swap_storage = persistent_swap_storage;
1225 bo->acc_size = acc_size; 1236 bo->acc_size = acc_size;
1237 bo->sg = sg;
1226 atomic_inc(&bo->glob->bo_count); 1238 atomic_inc(&bo->glob->bo_count);
1227 1239
1228 ret = ttm_bo_check_placement(bo, placement); 1240 ret = ttm_bo_check_placement(bo, placement);
@@ -1233,7 +1245,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1233 * For ttm_bo_type_device buffers, allocate 1245 * For ttm_bo_type_device buffers, allocate
1234 * address space from the device. 1246 * address space from the device.
1235 */ 1247 */
1236 if (bo->type == ttm_bo_type_device) { 1248 if (bo->type == ttm_bo_type_device ||
1249 bo->type == ttm_bo_type_sg) {
1237 ret = ttm_bo_setup_vm(bo); 1250 ret = ttm_bo_setup_vm(bo);
1238 if (ret) 1251 if (ret)
1239 goto out_err; 1252 goto out_err;
@@ -1312,7 +1325,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1312 1325
1313 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1326 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1314 buffer_start, interruptible, 1327 buffer_start, interruptible,
1315 persistent_swap_storage, acc_size, NULL); 1328 persistent_swap_storage, acc_size, NULL, NULL);
1316 if (likely(ret == 0)) 1329 if (likely(ret == 0))
1317 *p_bo = bo; 1330 *p_bo = bo;
1318 1331
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 53673907a6a0..4d02c46a9420 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -38,7 +38,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
38 drm_unplug_dev(dev); 38 drm_unplug_dev(dev);
39} 39}
40 40
41static struct vm_operations_struct udl_gem_vm_ops = { 41static const struct vm_operations_struct udl_gem_vm_ops = {
42 .fault = udl_gem_fault, 42 .fault = udl_gem_fault,
43 .open = drm_gem_vm_open, 43 .open = drm_gem_vm_open,
44 .close = drm_gem_vm_close, 44 .close = drm_gem_vm_close,
@@ -57,7 +57,7 @@ static const struct file_operations udl_driver_fops = {
57}; 57};
58 58
59static struct drm_driver driver = { 59static struct drm_driver driver = {
60 .driver_features = DRIVER_MODESET | DRIVER_GEM, 60 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
61 .load = udl_driver_load, 61 .load = udl_driver_load,
62 .unload = udl_driver_unload, 62 .unload = udl_driver_unload,
63 63
@@ -70,6 +70,10 @@ static struct drm_driver driver = {
70 .dumb_map_offset = udl_gem_mmap, 70 .dumb_map_offset = udl_gem_mmap,
71 .dumb_destroy = udl_dumb_destroy, 71 .dumb_destroy = udl_dumb_destroy,
72 .fops = &udl_driver_fops, 72 .fops = &udl_driver_fops,
73
74 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
75 .gem_prime_import = udl_gem_prime_import,
76
73 .name = DRIVER_NAME, 77 .name = DRIVER_NAME,
74 .desc = DRIVER_DESC, 78 .desc = DRIVER_DESC,
75 .date = DRIVER_DATE, 79 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 96820d03a303..fccd361f7b50 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -66,6 +66,7 @@ struct udl_gem_object {
66 struct drm_gem_object base; 66 struct drm_gem_object base;
67 struct page **pages; 67 struct page **pages;
68 void *vmapping; 68 void *vmapping;
69 struct sg_table *sg;
69}; 70};
70 71
71#define to_udl_bo(x) container_of(x, struct udl_gem_object, base) 72#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj);
118void udl_gem_free_object(struct drm_gem_object *gem_obj); 119void udl_gem_free_object(struct drm_gem_object *gem_obj);
119struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 120struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
120 size_t size); 121 size_t size);
122struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
123 struct dma_buf *dma_buf);
121 124
122int udl_gem_vmap(struct udl_gem_object *obj); 125int udl_gem_vmap(struct udl_gem_object *obj);
123void udl_gem_vunmap(struct udl_gem_object *obj); 126void udl_gem_vunmap(struct udl_gem_object *obj);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4d9c3a5d8a45..a029ee39b0c5 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -593,11 +593,20 @@ udl_fb_user_fb_create(struct drm_device *dev,
593 struct drm_gem_object *obj; 593 struct drm_gem_object *obj;
594 struct udl_framebuffer *ufb; 594 struct udl_framebuffer *ufb;
595 int ret; 595 int ret;
596 uint32_t size;
596 597
597 obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); 598 obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
598 if (obj == NULL) 599 if (obj == NULL)
599 return ERR_PTR(-ENOENT); 600 return ERR_PTR(-ENOENT);
600 601
602 size = mode_cmd->pitches[0] * mode_cmd->height;
603 size = ALIGN(size, PAGE_SIZE);
604
605 if (size > obj->size) {
606 DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
607 return ERR_PTR(-ENOMEM);
608 }
609
601 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); 610 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
602 if (ufb == NULL) 611 if (ufb == NULL)
603 return ERR_PTR(-ENOMEM); 612 return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 92f19ef329b0..40efd32f7dce 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -9,6 +9,7 @@
9#include "drmP.h" 9#include "drmP.h"
10#include "udl_drv.h" 10#include "udl_drv.h"
11#include <linux/shmem_fs.h> 11#include <linux/shmem_fs.h>
12#include <linux/dma-buf.h>
12 13
13struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
14 size_t size) 15 size_t size)
@@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj)
161 int page_count = obj->base.size / PAGE_SIZE; 162 int page_count = obj->base.size / PAGE_SIZE;
162 int i; 163 int i;
163 164
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
167 obj->pages = NULL;
168 return;
169 }
170
164 for (i = 0; i < page_count; i++) 171 for (i = 0; i < page_count; i++)
165 page_cache_release(obj->pages[i]); 172 page_cache_release(obj->pages[i]);
166 173
@@ -195,6 +202,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
195{ 202{
196 struct udl_gem_object *obj = to_udl_bo(gem_obj); 203 struct udl_gem_object *obj = to_udl_bo(gem_obj);
197 204
205 if (gem_obj->import_attach)
206 drm_prime_gem_destroy(gem_obj, obj->sg);
207
198 if (obj->vmapping) 208 if (obj->vmapping)
199 udl_gem_vunmap(obj); 209 udl_gem_vunmap(obj);
200 210
@@ -239,3 +249,68 @@ unlock:
239 mutex_unlock(&dev->struct_mutex); 249 mutex_unlock(&dev->struct_mutex);
240 return ret; 250 return ret;
241} 251}
252
253static int udl_prime_create(struct drm_device *dev,
254 size_t size,
255 struct sg_table *sg,
256 struct udl_gem_object **obj_p)
257{
258 struct udl_gem_object *obj;
259 int npages;
260 int i;
261 struct scatterlist *iter;
262
263 npages = size / PAGE_SIZE;
264
265 *obj_p = NULL;
266 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
267 if (!obj)
268 return -ENOMEM;
269
270 obj->sg = sg;
271 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
272 if (obj->pages == NULL) {
273 DRM_ERROR("obj pages is NULL %d\n", npages);
274 return -ENOMEM;
275 }
276
277 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
278
279 *obj_p = obj;
280 return 0;
281}
282
283struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
284 struct dma_buf *dma_buf)
285{
286 struct dma_buf_attachment *attach;
287 struct sg_table *sg;
288 struct udl_gem_object *uobj;
289 int ret;
290
291 /* need to attach */
292 attach = dma_buf_attach(dma_buf, dev->dev);
293 if (IS_ERR(attach))
294 return ERR_PTR(PTR_ERR(attach));
295
296 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
297 if (IS_ERR(sg)) {
298 ret = PTR_ERR(sg);
299 goto fail_detach;
300 }
301
302 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
303 if (ret) {
304 goto fail_unmap;
305 }
306
307 uobj->base.import_attach = attach;
308
309 return &uobj->base;
310
311fail_unmap:
312 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
313fail_detach:
314 dma_buf_detach(dma_buf, attach);
315 return ERR_PTR(ret);
316}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b3ecb3d12a1d..0d7816789da1 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -395,7 +395,7 @@ int udl_modeset_init(struct drm_device *dev)
395 dev->mode_config.prefer_shadow = 0; 395 dev->mode_config.prefer_shadow = 0;
396 dev->mode_config.preferred_depth = 24; 396 dev->mode_config.preferred_depth = 24;
397 397
398 dev->mode_config.funcs = (void *)&udl_mode_funcs; 398 dev->mode_config.funcs = &udl_mode_funcs;
399 399
400 drm_mode_create_dirty_info_property(dev); 400 drm_mode_create_dirty_info_property(dev);
401 401
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2286d47e5022..6b0078ffa763 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1178,7 +1178,7 @@ err_out:
1178 return &vfb->base; 1178 return &vfb->base;
1179} 1179}
1180 1180
1181static struct drm_mode_config_funcs vmw_kms_funcs = { 1181static const struct drm_mode_config_funcs vmw_kms_funcs = {
1182 .fb_create = vmw_kms_fb_create, 1182 .fb_create = vmw_kms_fb_create,
1183}; 1183};
1184 1184
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a37abb581cbb..22bf9a21ec71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1567,7 +1567,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1567 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 1567 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1568 ttm_bo_type_device, placement, 1568 ttm_bo_type_device, placement,
1569 0, 0, interruptible, 1569 0, 0, interruptible,
1570 NULL, acc_size, bo_free); 1570 NULL, acc_size, NULL, bo_free);
1571 return ret; 1571 return ret;
1572} 1572}
1573 1573
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 96c83a9a76bb..f34838839b08 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
21 bool "Laptop Hybrid Graphics - GPU switching support" 21 bool "Laptop Hybrid Graphics - GPU switching support"
22 depends on X86 22 depends on X86
23 depends on ACPI 23 depends on ACPI
24 select VGA_ARB
24 help 25 help
25 Many laptops released in 2008/9/10 have two GPUs with a multiplexer 26 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
26 to switch between them. This adds support for dynamic switching when 27 to switch between them. This adds support for dynamic switching when
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 58434e804d91..38f9534ac513 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -28,15 +28,16 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
30 30
31#include <linux/vgaarb.h>
32
31struct vga_switcheroo_client { 33struct vga_switcheroo_client {
32 struct pci_dev *pdev; 34 struct pci_dev *pdev;
33 struct fb_info *fb_info; 35 struct fb_info *fb_info;
34 int pwr_state; 36 int pwr_state;
35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); 37 const struct vga_switcheroo_client_ops *ops;
36 void (*reprobe)(struct pci_dev *pdev);
37 bool (*can_switch)(struct pci_dev *pdev);
38 int id; 38 int id;
39 bool active; 39 bool active;
40 struct list_head list;
40}; 41};
41 42
42static DEFINE_MUTEX(vgasr_mutex); 43static DEFINE_MUTEX(vgasr_mutex);
@@ -51,16 +52,23 @@ struct vgasr_priv {
51 struct dentry *switch_file; 52 struct dentry *switch_file;
52 53
53 int registered_clients; 54 int registered_clients;
54 struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS]; 55 struct list_head clients;
55 56
56 struct vga_switcheroo_handler *handler; 57 struct vga_switcheroo_handler *handler;
57}; 58};
58 59
60#define ID_BIT_AUDIO 0x100
61#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
62#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
63#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
64
59static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv); 65static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
60static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv); 66static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
61 67
62/* only one switcheroo per system */ 68/* only one switcheroo per system */
63static struct vgasr_priv vgasr_priv; 69static struct vgasr_priv vgasr_priv = {
70 .clients = LIST_HEAD_INIT(vgasr_priv.clients),
71};
64 72
65int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) 73int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
66{ 74{
@@ -86,72 +94,119 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
86 94
87static void vga_switcheroo_enable(void) 95static void vga_switcheroo_enable(void)
88{ 96{
89 int i;
90 int ret; 97 int ret;
98 struct vga_switcheroo_client *client;
99
91 /* call the handler to init */ 100 /* call the handler to init */
92 vgasr_priv.handler->init(); 101 vgasr_priv.handler->init();
93 102
94 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 103 list_for_each_entry(client, &vgasr_priv.clients, list) {
95 ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev); 104 if (client->id != -1)
105 continue;
106 ret = vgasr_priv.handler->get_client_id(client->pdev);
96 if (ret < 0) 107 if (ret < 0)
97 return; 108 return;
98 109
99 vgasr_priv.clients[i].id = ret; 110 client->id = ret;
100 } 111 }
101 vga_switcheroo_debugfs_init(&vgasr_priv); 112 vga_switcheroo_debugfs_init(&vgasr_priv);
102 vgasr_priv.active = true; 113 vgasr_priv.active = true;
103} 114}
104 115
105int vga_switcheroo_register_client(struct pci_dev *pdev, 116static int register_client(struct pci_dev *pdev,
106 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), 117 const struct vga_switcheroo_client_ops *ops,
107 void (*reprobe)(struct pci_dev *pdev), 118 int id, bool active)
108 bool (*can_switch)(struct pci_dev *pdev))
109{ 119{
110 int index; 120 struct vga_switcheroo_client *client;
121
122 client = kzalloc(sizeof(*client), GFP_KERNEL);
123 if (!client)
124 return -ENOMEM;
125
126 client->pwr_state = VGA_SWITCHEROO_ON;
127 client->pdev = pdev;
128 client->ops = ops;
129 client->id = id;
130 client->active = active;
111 131
112 mutex_lock(&vgasr_mutex); 132 mutex_lock(&vgasr_mutex);
113 /* don't do IGD vs DIS here */ 133 list_add_tail(&client->list, &vgasr_priv.clients);
114 if (vgasr_priv.registered_clients & 1) 134 if (client_is_vga(client))
115 index = 1; 135 vgasr_priv.registered_clients++;
116 else
117 index = 0;
118
119 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
120 vgasr_priv.clients[index].pdev = pdev;
121 vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
122 vgasr_priv.clients[index].reprobe = reprobe;
123 vgasr_priv.clients[index].can_switch = can_switch;
124 vgasr_priv.clients[index].id = -1;
125 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
126 vgasr_priv.clients[index].active = true;
127
128 vgasr_priv.registered_clients |= (1 << index);
129 136
130 /* if we get two clients + handler */ 137 /* if we get two clients + handler */
131 if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) { 138 if (!vgasr_priv.active &&
139 vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
132 printk(KERN_INFO "vga_switcheroo: enabled\n"); 140 printk(KERN_INFO "vga_switcheroo: enabled\n");
133 vga_switcheroo_enable(); 141 vga_switcheroo_enable();
134 } 142 }
135 mutex_unlock(&vgasr_mutex); 143 mutex_unlock(&vgasr_mutex);
136 return 0; 144 return 0;
137} 145}
146
147int vga_switcheroo_register_client(struct pci_dev *pdev,
148 const struct vga_switcheroo_client_ops *ops)
149{
150 return register_client(pdev, ops, -1,
151 pdev == vga_default_device());
152}
138EXPORT_SYMBOL(vga_switcheroo_register_client); 153EXPORT_SYMBOL(vga_switcheroo_register_client);
139 154
155int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
156 const struct vga_switcheroo_client_ops *ops,
157 int id, bool active)
158{
159 return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
160}
161EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
162
163static struct vga_switcheroo_client *
164find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
165{
166 struct vga_switcheroo_client *client;
167 list_for_each_entry(client, head, list)
168 if (client->pdev == pdev)
169 return client;
170 return NULL;
171}
172
173static struct vga_switcheroo_client *
174find_client_from_id(struct list_head *head, int client_id)
175{
176 struct vga_switcheroo_client *client;
177 list_for_each_entry(client, head, list)
178 if (client->id == client_id)
179 return client;
180 return NULL;
181}
182
183static struct vga_switcheroo_client *
184find_active_client(struct list_head *head)
185{
186 struct vga_switcheroo_client *client;
187 list_for_each_entry(client, head, list)
188 if (client->active && client_is_vga(client))
189 return client;
190 return NULL;
191}
192
140void vga_switcheroo_unregister_client(struct pci_dev *pdev) 193void vga_switcheroo_unregister_client(struct pci_dev *pdev)
141{ 194{
142 int i; 195 struct vga_switcheroo_client *client;
143 196
144 mutex_lock(&vgasr_mutex); 197 mutex_lock(&vgasr_mutex);
145 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 198 client = find_client_from_pci(&vgasr_priv.clients, pdev);
146 if (vgasr_priv.clients[i].pdev == pdev) { 199 if (client) {
147 vgasr_priv.registered_clients &= ~(1 << i); 200 if (client_is_vga(client))
148 break; 201 vgasr_priv.registered_clients--;
149 } 202 list_del(&client->list);
203 kfree(client);
204 }
205 if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
206 printk(KERN_INFO "vga_switcheroo: disabled\n");
207 vga_switcheroo_debugfs_fini(&vgasr_priv);
208 vgasr_priv.active = false;
150 } 209 }
151
152 printk(KERN_INFO "vga_switcheroo: disabled\n");
153 vga_switcheroo_debugfs_fini(&vgasr_priv);
154 vgasr_priv.active = false;
155 mutex_unlock(&vgasr_mutex); 210 mutex_unlock(&vgasr_mutex);
156} 211}
157EXPORT_SYMBOL(vga_switcheroo_unregister_client); 212EXPORT_SYMBOL(vga_switcheroo_unregister_client);
@@ -159,29 +214,29 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_client);
159void vga_switcheroo_client_fb_set(struct pci_dev *pdev, 214void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
160 struct fb_info *info) 215 struct fb_info *info)
161{ 216{
162 int i; 217 struct vga_switcheroo_client *client;
163 218
164 mutex_lock(&vgasr_mutex); 219 mutex_lock(&vgasr_mutex);
165 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 220 client = find_client_from_pci(&vgasr_priv.clients, pdev);
166 if (vgasr_priv.clients[i].pdev == pdev) { 221 if (client)
167 vgasr_priv.clients[i].fb_info = info; 222 client->fb_info = info;
168 break;
169 }
170 }
171 mutex_unlock(&vgasr_mutex); 223 mutex_unlock(&vgasr_mutex);
172} 224}
173EXPORT_SYMBOL(vga_switcheroo_client_fb_set); 225EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
174 226
175static int vga_switcheroo_show(struct seq_file *m, void *v) 227static int vga_switcheroo_show(struct seq_file *m, void *v)
176{ 228{
177 int i; 229 struct vga_switcheroo_client *client;
230 int i = 0;
178 mutex_lock(&vgasr_mutex); 231 mutex_lock(&vgasr_mutex);
179 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 232 list_for_each_entry(client, &vgasr_priv.clients, list) {
180 seq_printf(m, "%d:%s:%c:%s:%s\n", i, 233 seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
181 vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 234 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
182 vgasr_priv.clients[i].active ? '+' : ' ', 235 client_is_vga(client) ? "" : "-Audio",
183 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", 236 client->active ? '+' : ' ',
184 pci_name(vgasr_priv.clients[i].pdev)); 237 client->pwr_state ? "Pwr" : "Off",
238 pci_name(client->pdev));
239 i++;
185 } 240 }
186 mutex_unlock(&vgasr_mutex); 241 mutex_unlock(&vgasr_mutex);
187 return 0; 242 return 0;
@@ -197,7 +252,7 @@ static int vga_switchon(struct vga_switcheroo_client *client)
197 if (vgasr_priv.handler->power_state) 252 if (vgasr_priv.handler->power_state)
198 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); 253 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
199 /* call the driver callback to turn on device */ 254 /* call the driver callback to turn on device */
200 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); 255 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
201 client->pwr_state = VGA_SWITCHEROO_ON; 256 client->pwr_state = VGA_SWITCHEROO_ON;
202 return 0; 257 return 0;
203} 258}
@@ -205,34 +260,39 @@ static int vga_switchon(struct vga_switcheroo_client *client)
205static int vga_switchoff(struct vga_switcheroo_client *client) 260static int vga_switchoff(struct vga_switcheroo_client *client)
206{ 261{
207 /* call the driver callback to turn off device */ 262 /* call the driver callback to turn off device */
208 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); 263 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
209 if (vgasr_priv.handler->power_state) 264 if (vgasr_priv.handler->power_state)
210 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); 265 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
211 client->pwr_state = VGA_SWITCHEROO_OFF; 266 client->pwr_state = VGA_SWITCHEROO_OFF;
212 return 0; 267 return 0;
213} 268}
214 269
270static void set_audio_state(int id, int state)
271{
272 struct vga_switcheroo_client *client;
273
274 client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
275 if (client && client->pwr_state != state) {
276 client->ops->set_gpu_state(client->pdev, state);
277 client->pwr_state = state;
278 }
279}
280
215/* stage one happens before delay */ 281/* stage one happens before delay */
216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) 282static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
217{ 283{
218 int i; 284 struct vga_switcheroo_client *active;
219 struct vga_switcheroo_client *active = NULL;
220 285
221 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 286 active = find_active_client(&vgasr_priv.clients);
222 if (vgasr_priv.clients[i].active == true) {
223 active = &vgasr_priv.clients[i];
224 break;
225 }
226 }
227 if (!active) 287 if (!active)
228 return 0; 288 return 0;
229 289
230 if (new_client->pwr_state == VGA_SWITCHEROO_OFF) 290 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
231 vga_switchon(new_client); 291 vga_switchon(new_client);
232 292
233 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ 293 vga_set_default_device(new_client->pdev);
234 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; 294 set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
235 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; 295
236 return 0; 296 return 0;
237} 297}
238 298
@@ -240,15 +300,9 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
240static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) 300static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
241{ 301{
242 int ret; 302 int ret;
243 int i; 303 struct vga_switcheroo_client *active;
244 struct vga_switcheroo_client *active = NULL;
245 304
246 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 305 active = find_active_client(&vgasr_priv.clients);
247 if (vgasr_priv.clients[i].active == true) {
248 active = &vgasr_priv.clients[i];
249 break;
250 }
251 }
252 if (!active) 306 if (!active)
253 return 0; 307 return 0;
254 308
@@ -264,8 +318,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
264 if (ret) 318 if (ret)
265 return ret; 319 return ret;
266 320
267 if (new_client->reprobe) 321 if (new_client->ops->reprobe)
268 new_client->reprobe(new_client->pdev); 322 new_client->ops->reprobe(new_client->pdev);
323
324 set_audio_state(active->id, VGA_SWITCHEROO_OFF);
269 325
270 if (active->pwr_state == VGA_SWITCHEROO_ON) 326 if (active->pwr_state == VGA_SWITCHEROO_ON)
271 vga_switchoff(active); 327 vga_switchoff(active);
@@ -274,13 +330,26 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
274 return 0; 330 return 0;
275} 331}
276 332
333static bool check_can_switch(void)
334{
335 struct vga_switcheroo_client *client;
336
337 list_for_each_entry(client, &vgasr_priv.clients, list) {
338 if (!client->ops->can_switch(client->pdev)) {
339 printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
340 return false;
341 }
342 }
343 return true;
344}
345
277static ssize_t 346static ssize_t
278vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, 347vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
279 size_t cnt, loff_t *ppos) 348 size_t cnt, loff_t *ppos)
280{ 349{
281 char usercmd[64]; 350 char usercmd[64];
282 const char *pdev_name; 351 const char *pdev_name;
283 int i, ret; 352 int ret;
284 bool delay = false, can_switch; 353 bool delay = false, can_switch;
285 bool just_mux = false; 354 bool just_mux = false;
286 int client_id = -1; 355 int client_id = -1;
@@ -301,21 +370,21 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
301 370
302 /* pwr off the device not in use */ 371 /* pwr off the device not in use */
303 if (strncmp(usercmd, "OFF", 3) == 0) { 372 if (strncmp(usercmd, "OFF", 3) == 0) {
304 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 373 list_for_each_entry(client, &vgasr_priv.clients, list) {
305 if (vgasr_priv.clients[i].active) 374 if (client->active)
306 continue; 375 continue;
307 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON) 376 if (client->pwr_state == VGA_SWITCHEROO_ON)
308 vga_switchoff(&vgasr_priv.clients[i]); 377 vga_switchoff(client);
309 } 378 }
310 goto out; 379 goto out;
311 } 380 }
312 /* pwr on the device not in use */ 381 /* pwr on the device not in use */
313 if (strncmp(usercmd, "ON", 2) == 0) { 382 if (strncmp(usercmd, "ON", 2) == 0) {
314 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 383 list_for_each_entry(client, &vgasr_priv.clients, list) {
315 if (vgasr_priv.clients[i].active) 384 if (client->active)
316 continue; 385 continue;
317 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF) 386 if (client->pwr_state == VGA_SWITCHEROO_OFF)
318 vga_switchon(&vgasr_priv.clients[i]); 387 vga_switchon(client);
319 } 388 }
320 goto out; 389 goto out;
321 } 390 }
@@ -348,13 +417,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
348 417
349 if (client_id == -1) 418 if (client_id == -1)
350 goto out; 419 goto out;
351 420 client = find_client_from_id(&vgasr_priv.clients, client_id);
352 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 421 if (!client)
353 if (vgasr_priv.clients[i].id == client_id) { 422 goto out;
354 client = &vgasr_priv.clients[i];
355 break;
356 }
357 }
358 423
359 vgasr_priv.delayed_switch_active = false; 424 vgasr_priv.delayed_switch_active = false;
360 425
@@ -363,23 +428,16 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
363 goto out; 428 goto out;
364 } 429 }
365 430
366 if (client->active == true) 431 if (client->active)
367 goto out; 432 goto out;
368 433
369 /* okay we want a switch - test if devices are willing to switch */ 434 /* okay we want a switch - test if devices are willing to switch */
370 can_switch = true; 435 can_switch = check_can_switch();
371 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
372 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
373 if (can_switch == false) {
374 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
375 break;
376 }
377 }
378 436
379 if (can_switch == false && delay == false) 437 if (can_switch == false && delay == false)
380 goto out; 438 goto out;
381 439
382 if (can_switch == true) { 440 if (can_switch) {
383 pdev_name = pci_name(client->pdev); 441 pdev_name = pci_name(client->pdev);
384 ret = vga_switchto_stage1(client); 442 ret = vga_switchto_stage1(client);
385 if (ret) 443 if (ret)
@@ -451,10 +509,8 @@ fail:
451 509
452int vga_switcheroo_process_delayed_switch(void) 510int vga_switcheroo_process_delayed_switch(void)
453{ 511{
454 struct vga_switcheroo_client *client = NULL; 512 struct vga_switcheroo_client *client;
455 const char *pdev_name; 513 const char *pdev_name;
456 bool can_switch = true;
457 int i;
458 int ret; 514 int ret;
459 int err = -EINVAL; 515 int err = -EINVAL;
460 516
@@ -464,17 +520,9 @@ int vga_switcheroo_process_delayed_switch(void)
464 520
465 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id); 521 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
466 522
467 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 523 client = find_client_from_id(&vgasr_priv.clients,
468 if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id) 524 vgasr_priv.delayed_client_id);
469 client = &vgasr_priv.clients[i]; 525 if (!client || !check_can_switch())
470 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
471 if (can_switch == false) {
472 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
473 break;
474 }
475 }
476
477 if (can_switch == false || client == NULL)
478 goto err; 526 goto err;
479 527
480 pdev_name = pci_name(client->pdev); 528 pdev_name = pci_name(client->pdev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 111d956d8e7d..3df8fc0ec01a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -136,6 +136,13 @@ struct pci_dev *vga_default_device(void)
136{ 136{
137 return vga_default; 137 return vga_default;
138} 138}
139
140EXPORT_SYMBOL_GPL(vga_default_device);
141
142void vga_set_default_device(struct pci_dev *pdev)
143{
144 vga_default = pdev;
145}
139#endif 146#endif
140 147
141static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) 148static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
@@ -605,10 +612,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
605 goto bail; 612 goto bail;
606 } 613 }
607 614
615#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
608 if (vga_default == pdev) { 616 if (vga_default == pdev) {
609 pci_dev_put(vga_default); 617 pci_dev_put(vga_default);
610 vga_default = NULL; 618 vga_default = NULL;
611 } 619 }
620#endif
612 621
613 if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 622 if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
614 vga_decode_count--; 623 vga_decode_count--;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a55e248618cd..86c63fe45d11 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -27,6 +27,7 @@
27#include <linux/security.h> 27#include <linux/security.h>
28#include <linux/pci-aspm.h> 28#include <linux/pci-aspm.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/vgaarb.h>
30#include "pci.h" 31#include "pci.h"
31 32
32static int sysfs_initialized; /* = 0 */ 33static int sysfs_initialized; /* = 0 */
@@ -417,6 +418,10 @@ static ssize_t
417boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) 418boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
418{ 419{
419 struct pci_dev *pdev = to_pci_dev(dev); 420 struct pci_dev *pdev = to_pci_dev(dev);
421 struct pci_dev *vga_dev = vga_default_device();
422
423 if (vga_dev)
424 return sprintf(buf, "%u\n", (pdev == vga_dev));
420 425
421 return sprintf(buf, "%u\n", 426 return sprintf(buf, "%u\n",
422 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 427 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 490a7f15604b..8b864afb40b6 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -36,12 +36,6 @@ struct omap_crtc {
36 struct drm_framebuffer *old_fb; 36 struct drm_framebuffer *old_fb;
37}; 37};
38 38
39static void omap_crtc_gamma_set(struct drm_crtc *crtc,
40 u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
41{
42 /* not supported.. at least not yet */
43}
44
45static void omap_crtc_destroy(struct drm_crtc *crtc) 39static void omap_crtc_destroy(struct drm_crtc *crtc)
46{ 40{
47 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 41 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -198,7 +192,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
198} 192}
199 193
200static const struct drm_crtc_funcs omap_crtc_funcs = { 194static const struct drm_crtc_funcs omap_crtc_funcs = {
201 .gamma_set = omap_crtc_gamma_set,
202 .set_config = drm_crtc_helper_set_config, 195 .set_config = drm_crtc_helper_set_config,
203 .destroy = omap_crtc_destroy, 196 .destroy = omap_crtc_destroy,
204 .page_flip = omap_crtc_page_flip_locked, 197 .page_flip = omap_crtc_page_flip_locked,
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 0d2acca376ca..4beab9447ceb 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -58,7 +58,7 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
58 } 58 }
59} 59}
60 60
61static struct drm_mode_config_funcs omap_mode_config_funcs = { 61static const struct drm_mode_config_funcs omap_mode_config_funcs = {
62 .fb_create = omap_framebuffer_create, 62 .fb_create = omap_framebuffer_create,
63 .output_poll_changed = omap_fb_output_poll_changed, 63 .output_poll_changed = omap_fb_output_poll_changed,
64}; 64};
@@ -726,7 +726,7 @@ static void dev_irq_uninstall(struct drm_device *dev)
726 DBG("irq_uninstall: dev=%p", dev); 726 DBG("irq_uninstall: dev=%p", dev);
727} 727}
728 728
729static struct vm_operations_struct omap_gem_vm_ops = { 729static const struct vm_operations_struct omap_gem_vm_ops = {
730 .fault = omap_gem_fault, 730 .fault = omap_gem_fault,
731 .open = drm_gem_vm_open, 731 .open = drm_gem_vm_open,
732 .close = drm_gem_vm_close, 732 .close = drm_gem_vm_close,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 784139aed079..b4a632ada401 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -18,6 +18,8 @@
18 18
19static bool request_mem_succeeded = false; 19static bool request_mem_succeeded = false;
20 20
21static struct pci_dev *default_vga;
22
21static struct fb_var_screeninfo efifb_defined __devinitdata = { 23static struct fb_var_screeninfo efifb_defined __devinitdata = {
22 .activate = FB_ACTIVATE_NOW, 24 .activate = FB_ACTIVATE_NOW,
23 .height = -1, 25 .height = -1,
@@ -298,35 +300,72 @@ static struct fb_ops efifb_ops = {
298 .fb_imageblit = cfb_imageblit, 300 .fb_imageblit = cfb_imageblit,
299}; 301};
300 302
303struct pci_dev *vga_default_device(void)
304{
305 return default_vga;
306}
307
308EXPORT_SYMBOL_GPL(vga_default_device);
309
310void vga_set_default_device(struct pci_dev *pdev)
311{
312 default_vga = pdev;
313}
314
301static int __init efifb_setup(char *options) 315static int __init efifb_setup(char *options)
302{ 316{
303 char *this_opt; 317 char *this_opt;
304 int i; 318 int i;
319 struct pci_dev *dev = NULL;
320
321 if (options && *options) {
322 while ((this_opt = strsep(&options, ",")) != NULL) {
323 if (!*this_opt) continue;
324
325 for (i = 0; i < M_UNKNOWN; i++) {
326 if (!strcmp(this_opt, dmi_list[i].optname) &&
327 dmi_list[i].base != 0) {
328 screen_info.lfb_base = dmi_list[i].base;
329 screen_info.lfb_linelength = dmi_list[i].stride;
330 screen_info.lfb_width = dmi_list[i].width;
331 screen_info.lfb_height = dmi_list[i].height;
332 }
333 }
334 if (!strncmp(this_opt, "base:", 5))
335 screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
336 else if (!strncmp(this_opt, "stride:", 7))
337 screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
338 else if (!strncmp(this_opt, "height:", 7))
339 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
340 else if (!strncmp(this_opt, "width:", 6))
341 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
342 }
343 }
305 344
306 if (!options || !*options) 345 for_each_pci_dev(dev) {
307 return 0; 346 int i;
308 347
309 while ((this_opt = strsep(&options, ",")) != NULL) { 348 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
310 if (!*this_opt) continue; 349 continue;
311 350
312 for (i = 0; i < M_UNKNOWN; i++) { 351 for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
313 if (!strcmp(this_opt, dmi_list[i].optname) && 352 resource_size_t start, end;
314 dmi_list[i].base != 0) { 353
315 screen_info.lfb_base = dmi_list[i].base; 354 if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
316 screen_info.lfb_linelength = dmi_list[i].stride; 355 continue;
317 screen_info.lfb_width = dmi_list[i].width; 356
318 screen_info.lfb_height = dmi_list[i].height; 357 start = pci_resource_start(dev, i);
319 } 358 end = pci_resource_end(dev, i);
359
360 if (!start || !end)
361 continue;
362
363 if (screen_info.lfb_base >= start &&
364 (screen_info.lfb_base + screen_info.lfb_size) < end)
365 default_vga = dev;
320 } 366 }
321 if (!strncmp(this_opt, "base:", 5))
322 screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
323 else if (!strncmp(this_opt, "stride:", 7))
324 screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
325 else if (!strncmp(this_opt, "height:", 7))
326 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
327 else if (!strncmp(this_opt, "width:", 6))
328 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
329 } 367 }
368
330 return 0; 369 return 0;
331} 370}
332 371