aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_context.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c608
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c50
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_edid.c103
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h284
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c15
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_sman.c351
-rw-r--r--drivers/gpu/drm/exynos/Kconfig7
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c135
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c166
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c84
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c292
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c227
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h73
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c163
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1176
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1070
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h92
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h147
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h141
-rw-r--r--drivers/gpu/drm/exynos/regs-vp.h91
-rw-r--r--drivers/gpu/drm/gma500/Kconfig27
-rw-r--r--drivers/gpu/drm/gma500/Makefile40
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c364
-rw-r--r--drivers/gpu/drm/gma500/backlight.c49
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c351
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h36
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c333
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1508
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c394
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c732
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c831
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h47
-rw-r--r--drivers/gpu/drm/gma500/gem.c292
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c89
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c553
-rw-r--r--drivers/gpu/drm/gma500/gtt.h64
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c303
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h430
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c493
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c169
-rw-r--r--drivers/gpu/drm/gma500/intel_opregion.c81
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c263
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h21
-rw-r--r--drivers/gpu/drm/gma500/mmu.c858
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h252
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c604
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c512
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c859
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c328
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c449
-rw-r--r--drivers/gpu/drm/gma500/power.c316
-rw-r--r--drivers/gpu/drm/gma500/power.h67
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c328
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c703
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h956
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1446
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h289
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c868
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1309
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2617
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h723
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c564
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h45
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c88
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h582
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c19
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c24
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c86
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c38
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c63
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h186
-rw-r--r--drivers/gpu/drm/i915/intel_display.c360
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h51
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c668
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c904
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c403
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c258
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c556
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c677
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c109
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c347
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c272
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c783
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c78
-rw-r--r--drivers/gpu/drm/nouveau/nv98_ppp.c78
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc262
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc56
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc217
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc311
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c833
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c30
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c241
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c242
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c246
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h65
-rw-r--r--drivers/gpu/drm/radeon/ni.c395
-rw-r--r--drivers/gpu/drm/radeon/nid.h35
-rw-r--r--drivers/gpu/drm/radeon/r100.c232
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r300.c160
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c273
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c57
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c235
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h367
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c197
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c307
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c425
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c147
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c465
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c189
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c269
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c355
-rw-r--r--drivers/gpu/drm/radeon/rs400.c27
-rw-r--r--drivers/gpu/drm/radeon/rs600.c38
-rw-r--r--drivers/gpu/drm/radeon/rs690.c30
-rw-r--r--drivers/gpu/drm/radeon/rv515.c106
-rw-r--r--drivers/gpu/drm/radeon/rv770.c63
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c23
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c56
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h7
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c199
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c23
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c105
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c90
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c184
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1143
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c324
-rw-r--r--drivers/gpu/drm/via/via_drv.c48
-rw-r--r--drivers/gpu/drm/via/via_drv.h7
-rw-r--r--drivers/gpu/drm/via/via_map.c10
-rw-r--r--drivers/gpu/drm/via/via_mm.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c35
-rw-r--r--drivers/staging/gma500/accel_2d.c2
-rw-r--r--drivers/staging/gma500/cdv_intel_display.c4
-rw-r--r--drivers/staging/gma500/framebuffer.c41
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/staging/gma500/mrst_crtc.c4
-rw-r--r--drivers/staging/gma500/psb_drv.c23
-rw-r--r--drivers/staging/gma500/psb_intel_display.c4
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h4
-rw-r--r--include/drm/drmP.h10
-rw-r--r--include/drm/drm_crtc.h212
-rw-r--r--include/drm/drm_crtc_helper.h5
-rw-r--r--include/drm/drm_fourcc.h137
-rw-r--r--include/drm/drm_mode.h74
-rw-r--r--include/drm/drm_sman.h176
-rw-r--r--include/drm/exynos_drm.h37
-rw-r--r--include/drm/gma_drm.h91
-rw-r--r--include/drm/i915_drm.h40
-rw-r--r--include/drm/radeon_drm.h36
-rw-r--r--include/drm/sis_drm.h4
-rw-r--r--include/drm/ttm/ttm_bo_api.h24
-rw-r--r--include/drm/ttm/ttm_bo_driver.h203
-rw-r--r--include/drm/ttm/ttm_page_alloc.h77
-rw-r--r--include/drm/via_drm.h4
-rw-r--r--include/linux/swiotlb.h2
-rw-r--r--lib/swiotlb.c5
288 files changed, 43103 insertions, 6739 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index b072648dc3f..17e05d1076b 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
514 switch (*bridge_agpstat & 7) { 514 switch (*bridge_agpstat & 7) {
515 case 4: 515 case 4:
516 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 516 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
517 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 517 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
518 "Fixing up support for x2 & x1\n"); 518 "Fixing up support for x2 & x1\n");
519 break; 519 break;
520 case 2: 520 case 2:
521 *bridge_agpstat |= AGPSTAT2_1X; 521 *bridge_agpstat |= AGPSTAT2_1X;
522 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 522 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
523 "Fixing up support for x1\n"); 523 "Fixing up support for x1\n");
524 break; 524 break;
525 default: 525 default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
693 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 693 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
694 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 694 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
695 } else { 695 } else {
696 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 696 printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
697 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 697 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
698 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 698 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
699 *bridge_agpstat, origbridge); 699 *bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
956 bridge->driver->cache_flush(); 956 bridge->driver->cache_flush();
957#ifdef CONFIG_X86 957#ifdef CONFIG_X86
958 if (set_memory_uc((unsigned long)table, 1 << page_order)) 958 if (set_memory_uc((unsigned long)table, 1 << page_order))
959 printk(KERN_WARNING "Could not set GATT table memory to UC!"); 959 printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
960 960
961 bridge->gatt_table = (void *)table; 961 bridge->gatt_table = (void *)table;
962#else 962#else
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1368826ef28..2418429a983 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -162,3 +162,6 @@ config DRM_SAVAGE
162source "drivers/gpu/drm/exynos/Kconfig" 162source "drivers/gpu/drm/exynos/Kconfig"
163 163
164source "drivers/gpu/drm/vmwgfx/Kconfig" 164source "drivers/gpu/drm/vmwgfx/Kconfig"
165
166source "drivers/gpu/drm/gma500/Kconfig"
167
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c0496f66070..0cde1b80fdb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_usb.o 15 drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
36obj-$(CONFIG_DRM_VIA) +=via/ 36obj-$(CONFIG_DRM_VIA) +=via/
37obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ 37obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
38obj-$(CONFIG_DRM_EXYNOS) +=exynos/ 38obj-$(CONFIG_DRM_EXYNOS) +=exynos/
39obj-$(CONFIG_DRM_GMA500) += gma500/
39obj-y += i2c/ 40obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 6d440fb894c..325365f6d35 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
154 return -EINVAL; 154 return -EINVAL;
155 } 155 }
156 156
157 mutex_unlock(&dev->struct_mutex);
158
159 request->handle = NULL; 157 request->handle = NULL;
160 list_for_each_entry(_entry, &dev->maplist, head) { 158 list_for_each_entry(_entry, &dev->maplist, head) {
161 if (_entry->map == map) { 159 if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
164 break; 162 break;
165 } 163 }
166 } 164 }
165
166 mutex_unlock(&dev->struct_mutex);
167
167 if (request->handle == NULL) 168 if (request->handle == NULL)
168 return -EINVAL; 169 return -EINVAL;
169 170
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8323fc38984..5e818a808ac 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -36,6 +36,7 @@
36#include "drmP.h" 36#include "drmP.h"
37#include "drm_crtc.h" 37#include "drm_crtc.h"
38#include "drm_edid.h" 38#include "drm_edid.h"
39#include "drm_fourcc.h"
39 40
40struct drm_prop_enum_list { 41struct drm_prop_enum_list {
41 int type; 42 int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
324{ 325{
325 struct drm_device *dev = fb->dev; 326 struct drm_device *dev = fb->dev;
326 struct drm_crtc *crtc; 327 struct drm_crtc *crtc;
328 struct drm_plane *plane;
327 struct drm_mode_set set; 329 struct drm_mode_set set;
328 int ret; 330 int ret;
329 331
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
340 } 342 }
341 } 343 }
342 344
345 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
346 if (plane->fb == fb) {
347 /* should turn off the crtc */
348 ret = plane->funcs->disable_plane(plane);
349 if (ret)
350 DRM_ERROR("failed to disable plane with busy fb\n");
351 /* disconnect the plane from the fb and crtc: */
352 plane->fb = NULL;
353 plane->crtc = NULL;
354 }
355 }
356
343 drm_mode_object_put(dev, &fb->base); 357 drm_mode_object_put(dev, &fb->base);
344 list_del(&fb->head); 358 list_del(&fb->head);
345 dev->mode_config.num_fb--; 359 dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
540} 554}
541EXPORT_SYMBOL(drm_encoder_cleanup); 555EXPORT_SYMBOL(drm_encoder_cleanup);
542 556
557int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
558 unsigned long possible_crtcs,
559 const struct drm_plane_funcs *funcs,
560 const uint32_t *formats, uint32_t format_count,
561 bool priv)
562{
563 mutex_lock(&dev->mode_config.mutex);
564
565 plane->dev = dev;
566 drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
567 plane->funcs = funcs;
568 plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
569 GFP_KERNEL);
570 if (!plane->format_types) {
571 DRM_DEBUG_KMS("out of memory when allocating plane\n");
572 drm_mode_object_put(dev, &plane->base);
573 mutex_unlock(&dev->mode_config.mutex);
574 return -ENOMEM;
575 }
576
577 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
578 plane->format_count = format_count;
579 plane->possible_crtcs = possible_crtcs;
580
581 /* private planes are not exposed to userspace, but depending on
582 * display hardware, might be convenient to allow sharing programming
583 * for the scanout engine with the crtc implementation.
584 */
585 if (!priv) {
586 list_add_tail(&plane->head, &dev->mode_config.plane_list);
587 dev->mode_config.num_plane++;
588 } else {
589 INIT_LIST_HEAD(&plane->head);
590 }
591
592 mutex_unlock(&dev->mode_config.mutex);
593
594 return 0;
595}
596EXPORT_SYMBOL(drm_plane_init);
597
598void drm_plane_cleanup(struct drm_plane *plane)
599{
600 struct drm_device *dev = plane->dev;
601
602 mutex_lock(&dev->mode_config.mutex);
603 kfree(plane->format_types);
604 drm_mode_object_put(dev, &plane->base);
605 /* if not added to a list, it must be a private plane */
606 if (!list_empty(&plane->head)) {
607 list_del(&plane->head);
608 dev->mode_config.num_plane--;
609 }
610 mutex_unlock(&dev->mode_config.mutex);
611}
612EXPORT_SYMBOL(drm_plane_cleanup);
613
543/** 614/**
544 * drm_mode_create - create a new display mode 615 * drm_mode_create - create a new display mode
545 * @dev: DRM device 616 * @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
871 INIT_LIST_HEAD(&dev->mode_config.encoder_list); 942 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
872 INIT_LIST_HEAD(&dev->mode_config.property_list); 943 INIT_LIST_HEAD(&dev->mode_config.property_list);
873 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 944 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
945 INIT_LIST_HEAD(&dev->mode_config.plane_list);
874 idr_init(&dev->mode_config.crtc_idr); 946 idr_init(&dev->mode_config.crtc_idr);
875 947
876 mutex_lock(&dev->mode_config.mutex); 948 mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
947 struct drm_encoder *encoder, *enct; 1019 struct drm_encoder *encoder, *enct;
948 struct drm_framebuffer *fb, *fbt; 1020 struct drm_framebuffer *fb, *fbt;
949 struct drm_property *property, *pt; 1021 struct drm_property *property, *pt;
1022 struct drm_plane *plane, *plt;
950 1023
951 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, 1024 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
952 head) { 1025 head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
971 crtc->funcs->destroy(crtc); 1044 crtc->funcs->destroy(crtc);
972 } 1045 }
973 1046
1047 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
1048 head) {
1049 plane->funcs->destroy(plane);
1050 }
974} 1051}
975EXPORT_SYMBOL(drm_mode_config_cleanup); 1052EXPORT_SYMBOL(drm_mode_config_cleanup);
976 1053
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1379 */ 1456 */
1380 if ((out_resp->count_modes >= mode_count) && mode_count) { 1457 if ((out_resp->count_modes >= mode_count) && mode_count) {
1381 copied = 0; 1458 copied = 0;
1382 mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; 1459 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
1383 list_for_each_entry(mode, &connector->modes, head) { 1460 list_for_each_entry(mode, &connector->modes, head) {
1384 drm_crtc_convert_to_umode(&u_mode, mode); 1461 drm_crtc_convert_to_umode(&u_mode, mode);
1385 if (copy_to_user(mode_ptr + copied, 1462 if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1394 1471
1395 if ((out_resp->count_props >= props_count) && props_count) { 1472 if ((out_resp->count_props >= props_count) && props_count) {
1396 copied = 0; 1473 copied = 0;
1397 prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); 1474 prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
1398 prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); 1475 prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
1399 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { 1476 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
1400 if (connector->property_ids[i] != 0) { 1477 if (connector->property_ids[i] != 0) {
1401 if (put_user(connector->property_ids[i], 1478 if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1417 1494
1418 if ((out_resp->count_encoders >= encoders_count) && encoders_count) { 1495 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
1419 copied = 0; 1496 copied = 0;
1420 encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); 1497 encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
1421 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1498 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1422 if (connector->encoder_ids[i] != 0) { 1499 if (connector->encoder_ids[i] != 0) {
1423 if (put_user(connector->encoder_ids[i], 1500 if (put_user(connector->encoder_ids[i],
@@ -1471,6 +1548,245 @@ out:
1471} 1548}
1472 1549
1473/** 1550/**
1551 * drm_mode_getplane_res - get plane info
1552 * @dev: DRM device
1553 * @data: ioctl data
1554 * @file_priv: DRM file info
1555 *
1556 * Return an plane count and set of IDs.
1557 */
1558int drm_mode_getplane_res(struct drm_device *dev, void *data,
1559 struct drm_file *file_priv)
1560{
1561 struct drm_mode_get_plane_res *plane_resp = data;
1562 struct drm_mode_config *config;
1563 struct drm_plane *plane;
1564 uint32_t __user *plane_ptr;
1565 int copied = 0, ret = 0;
1566
1567 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1568 return -EINVAL;
1569
1570 mutex_lock(&dev->mode_config.mutex);
1571 config = &dev->mode_config;
1572
1573 /*
1574 * This ioctl is called twice, once to determine how much space is
1575 * needed, and the 2nd time to fill it.
1576 */
1577 if (config->num_plane &&
1578 (plane_resp->count_planes >= config->num_plane)) {
1579 plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
1580
1581 list_for_each_entry(plane, &config->plane_list, head) {
1582 if (put_user(plane->base.id, plane_ptr + copied)) {
1583 ret = -EFAULT;
1584 goto out;
1585 }
1586 copied++;
1587 }
1588 }
1589 plane_resp->count_planes = config->num_plane;
1590
1591out:
1592 mutex_unlock(&dev->mode_config.mutex);
1593 return ret;
1594}
1595
1596/**
1597 * drm_mode_getplane - get plane info
1598 * @dev: DRM device
1599 * @data: ioctl data
1600 * @file_priv: DRM file info
1601 *
1602 * Return plane info, including formats supported, gamma size, any
1603 * current fb, etc.
1604 */
1605int drm_mode_getplane(struct drm_device *dev, void *data,
1606 struct drm_file *file_priv)
1607{
1608 struct drm_mode_get_plane *plane_resp = data;
1609 struct drm_mode_object *obj;
1610 struct drm_plane *plane;
1611 uint32_t __user *format_ptr;
1612 int ret = 0;
1613
1614 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1615 return -EINVAL;
1616
1617 mutex_lock(&dev->mode_config.mutex);
1618 obj = drm_mode_object_find(dev, plane_resp->plane_id,
1619 DRM_MODE_OBJECT_PLANE);
1620 if (!obj) {
1621 ret = -ENOENT;
1622 goto out;
1623 }
1624 plane = obj_to_plane(obj);
1625
1626 if (plane->crtc)
1627 plane_resp->crtc_id = plane->crtc->base.id;
1628 else
1629 plane_resp->crtc_id = 0;
1630
1631 if (plane->fb)
1632 plane_resp->fb_id = plane->fb->base.id;
1633 else
1634 plane_resp->fb_id = 0;
1635
1636 plane_resp->plane_id = plane->base.id;
1637 plane_resp->possible_crtcs = plane->possible_crtcs;
1638 plane_resp->gamma_size = plane->gamma_size;
1639
1640 /*
1641 * This ioctl is called twice, once to determine how much space is
1642 * needed, and the 2nd time to fill it.
1643 */
1644 if (plane->format_count &&
1645 (plane_resp->count_format_types >= plane->format_count)) {
1646 format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
1647 if (copy_to_user(format_ptr,
1648 plane->format_types,
1649 sizeof(uint32_t) * plane->format_count)) {
1650 ret = -EFAULT;
1651 goto out;
1652 }
1653 }
1654 plane_resp->count_format_types = plane->format_count;
1655
1656out:
1657 mutex_unlock(&dev->mode_config.mutex);
1658 return ret;
1659}
1660
1661/**
1662 * drm_mode_setplane - set up or tear down an plane
1663 * @dev: DRM device
1664 * @data: ioctl data*
1665 * @file_prive: DRM file info
1666 *
1667 * Set plane info, including placement, fb, scaling, and other factors.
1668 * Or pass a NULL fb to disable.
1669 */
1670int drm_mode_setplane(struct drm_device *dev, void *data,
1671 struct drm_file *file_priv)
1672{
1673 struct drm_mode_set_plane *plane_req = data;
1674 struct drm_mode_object *obj;
1675 struct drm_plane *plane;
1676 struct drm_crtc *crtc;
1677 struct drm_framebuffer *fb;
1678 int ret = 0;
1679 unsigned int fb_width, fb_height;
1680 int i;
1681
1682 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1683 return -EINVAL;
1684
1685 mutex_lock(&dev->mode_config.mutex);
1686
1687 /*
1688 * First, find the plane, crtc, and fb objects. If not available,
1689 * we don't bother to call the driver.
1690 */
1691 obj = drm_mode_object_find(dev, plane_req->plane_id,
1692 DRM_MODE_OBJECT_PLANE);
1693 if (!obj) {
1694 DRM_DEBUG_KMS("Unknown plane ID %d\n",
1695 plane_req->plane_id);
1696 ret = -ENOENT;
1697 goto out;
1698 }
1699 plane = obj_to_plane(obj);
1700
1701 /* No fb means shut it down */
1702 if (!plane_req->fb_id) {
1703 plane->funcs->disable_plane(plane);
1704 plane->crtc = NULL;
1705 plane->fb = NULL;
1706 goto out;
1707 }
1708
1709 obj = drm_mode_object_find(dev, plane_req->crtc_id,
1710 DRM_MODE_OBJECT_CRTC);
1711 if (!obj) {
1712 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
1713 plane_req->crtc_id);
1714 ret = -ENOENT;
1715 goto out;
1716 }
1717 crtc = obj_to_crtc(obj);
1718
1719 obj = drm_mode_object_find(dev, plane_req->fb_id,
1720 DRM_MODE_OBJECT_FB);
1721 if (!obj) {
1722 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
1723 plane_req->fb_id);
1724 ret = -ENOENT;
1725 goto out;
1726 }
1727 fb = obj_to_fb(obj);
1728
1729 /* Check whether this plane supports the fb pixel format. */
1730 for (i = 0; i < plane->format_count; i++)
1731 if (fb->pixel_format == plane->format_types[i])
1732 break;
1733 if (i == plane->format_count) {
1734 DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
1735 ret = -EINVAL;
1736 goto out;
1737 }
1738
1739 fb_width = fb->width << 16;
1740 fb_height = fb->height << 16;
1741
1742 /* Make sure source coordinates are inside the fb. */
1743 if (plane_req->src_w > fb_width ||
1744 plane_req->src_x > fb_width - plane_req->src_w ||
1745 plane_req->src_h > fb_height ||
1746 plane_req->src_y > fb_height - plane_req->src_h) {
1747 DRM_DEBUG_KMS("Invalid source coordinates "
1748 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
1749 plane_req->src_w >> 16,
1750 ((plane_req->src_w & 0xffff) * 15625) >> 10,
1751 plane_req->src_h >> 16,
1752 ((plane_req->src_h & 0xffff) * 15625) >> 10,
1753 plane_req->src_x >> 16,
1754 ((plane_req->src_x & 0xffff) * 15625) >> 10,
1755 plane_req->src_y >> 16,
1756 ((plane_req->src_y & 0xffff) * 15625) >> 10);
1757 ret = -ENOSPC;
1758 goto out;
1759 }
1760
1761 /* Give drivers some help against integer overflows */
1762 if (plane_req->crtc_w > INT_MAX ||
1763 plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
1764 plane_req->crtc_h > INT_MAX ||
1765 plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
1766 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
1767 plane_req->crtc_w, plane_req->crtc_h,
1768 plane_req->crtc_x, plane_req->crtc_y);
1769 ret = -ERANGE;
1770 goto out;
1771 }
1772
1773 ret = plane->funcs->update_plane(plane, crtc, fb,
1774 plane_req->crtc_x, plane_req->crtc_y,
1775 plane_req->crtc_w, plane_req->crtc_h,
1776 plane_req->src_x, plane_req->src_y,
1777 plane_req->src_w, plane_req->src_h);
1778 if (!ret) {
1779 plane->crtc = crtc;
1780 plane->fb = fb;
1781 }
1782
1783out:
1784 mutex_unlock(&dev->mode_config.mutex);
1785
1786 return ret;
1787}
1788
1789/**
1474 * drm_mode_setcrtc - set CRTC configuration 1790 * drm_mode_setcrtc - set CRTC configuration
1475 * @inode: inode from the ioctl 1791 * @inode: inode from the ioctl
1476 * @filp: file * from the ioctl 1792 * @filp: file * from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1576 } 1892 }
1577 1893
1578 for (i = 0; i < crtc_req->count_connectors; i++) { 1894 for (i = 0; i < crtc_req->count_connectors; i++) {
1579 set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; 1895 set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
1580 if (get_user(out_id, &set_connectors_ptr[i])) { 1896 if (get_user(out_id, &set_connectors_ptr[i])) {
1581 ret = -EFAULT; 1897 ret = -EFAULT;
1582 goto out; 1898 goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1625 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1941 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1626 return -EINVAL; 1942 return -EINVAL;
1627 1943
1628 if (!req->flags) { 1944 if (!req->flags)
1629 DRM_ERROR("no operation set\n");
1630 return -EINVAL; 1945 return -EINVAL;
1631 }
1632 1946
1633 mutex_lock(&dev->mode_config.mutex); 1947 mutex_lock(&dev->mode_config.mutex);
1634 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); 1948 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1641 1955
1642 if (req->flags & DRM_MODE_CURSOR_BO) { 1956 if (req->flags & DRM_MODE_CURSOR_BO) {
1643 if (!crtc->funcs->cursor_set) { 1957 if (!crtc->funcs->cursor_set) {
1644 DRM_ERROR("crtc does not support cursor\n");
1645 ret = -ENXIO; 1958 ret = -ENXIO;
1646 goto out; 1959 goto out;
1647 } 1960 }
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1654 if (crtc->funcs->cursor_move) { 1967 if (crtc->funcs->cursor_move) {
1655 ret = crtc->funcs->cursor_move(crtc, req->x, req->y); 1968 ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
1656 } else { 1969 } else {
1657 DRM_ERROR("crtc does not support cursor\n");
1658 ret = -EFAULT; 1970 ret = -EFAULT;
1659 goto out; 1971 goto out;
1660 } 1972 }
@@ -1664,6 +1976,42 @@ out:
1664 return ret; 1976 return ret;
1665} 1977}
1666 1978
1979/* Original addfb only supported RGB formats, so figure out which one */
1980uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
1981{
1982 uint32_t fmt;
1983
1984 switch (bpp) {
1985 case 8:
1986 fmt = DRM_FORMAT_RGB332;
1987 break;
1988 case 16:
1989 if (depth == 15)
1990 fmt = DRM_FORMAT_XRGB1555;
1991 else
1992 fmt = DRM_FORMAT_RGB565;
1993 break;
1994 case 24:
1995 fmt = DRM_FORMAT_RGB888;
1996 break;
1997 case 32:
1998 if (depth == 24)
1999 fmt = DRM_FORMAT_XRGB8888;
2000 else if (depth == 30)
2001 fmt = DRM_FORMAT_XRGB2101010;
2002 else
2003 fmt = DRM_FORMAT_ARGB8888;
2004 break;
2005 default:
2006 DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
2007 fmt = DRM_FORMAT_XRGB8888;
2008 break;
2009 }
2010
2011 return fmt;
2012}
2013EXPORT_SYMBOL(drm_mode_legacy_fb_format);
2014
1667/** 2015/**
1668 * drm_mode_addfb - add an FB to the graphics configuration 2016 * drm_mode_addfb - add an FB to the graphics configuration
1669 * @inode: inode from the ioctl 2017 * @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
1684int drm_mode_addfb(struct drm_device *dev, 2032int drm_mode_addfb(struct drm_device *dev,
1685 void *data, struct drm_file *file_priv) 2033 void *data, struct drm_file *file_priv)
1686{ 2034{
1687 struct drm_mode_fb_cmd *r = data; 2035 struct drm_mode_fb_cmd *or = data;
2036 struct drm_mode_fb_cmd2 r = {};
2037 struct drm_mode_config *config = &dev->mode_config;
2038 struct drm_framebuffer *fb;
2039 int ret = 0;
2040
2041 /* Use new struct with format internally */
2042 r.fb_id = or->fb_id;
2043 r.width = or->width;
2044 r.height = or->height;
2045 r.pitches[0] = or->pitch;
2046 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
2047 r.handles[0] = or->handle;
2048
2049 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2050 return -EINVAL;
2051
2052 if ((config->min_width > r.width) || (r.width > config->max_width))
2053 return -EINVAL;
2054
2055 if ((config->min_height > r.height) || (r.height > config->max_height))
2056 return -EINVAL;
2057
2058 mutex_lock(&dev->mode_config.mutex);
2059
2060 /* TODO check buffer is sufficiently large */
2061 /* TODO setup destructor callback */
2062
2063 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2064 if (IS_ERR(fb)) {
2065 DRM_ERROR("could not create framebuffer\n");
2066 ret = PTR_ERR(fb);
2067 goto out;
2068 }
2069
2070 or->fb_id = fb->base.id;
2071 list_add(&fb->filp_head, &file_priv->fbs);
2072 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
2073
2074out:
2075 mutex_unlock(&dev->mode_config.mutex);
2076 return ret;
2077}
2078
2079static int format_check(struct drm_mode_fb_cmd2 *r)
2080{
2081 uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
2082
2083 switch (format) {
2084 case DRM_FORMAT_C8:
2085 case DRM_FORMAT_RGB332:
2086 case DRM_FORMAT_BGR233:
2087 case DRM_FORMAT_XRGB4444:
2088 case DRM_FORMAT_XBGR4444:
2089 case DRM_FORMAT_RGBX4444:
2090 case DRM_FORMAT_BGRX4444:
2091 case DRM_FORMAT_ARGB4444:
2092 case DRM_FORMAT_ABGR4444:
2093 case DRM_FORMAT_RGBA4444:
2094 case DRM_FORMAT_BGRA4444:
2095 case DRM_FORMAT_XRGB1555:
2096 case DRM_FORMAT_XBGR1555:
2097 case DRM_FORMAT_RGBX5551:
2098 case DRM_FORMAT_BGRX5551:
2099 case DRM_FORMAT_ARGB1555:
2100 case DRM_FORMAT_ABGR1555:
2101 case DRM_FORMAT_RGBA5551:
2102 case DRM_FORMAT_BGRA5551:
2103 case DRM_FORMAT_RGB565:
2104 case DRM_FORMAT_BGR565:
2105 case DRM_FORMAT_RGB888:
2106 case DRM_FORMAT_BGR888:
2107 case DRM_FORMAT_XRGB8888:
2108 case DRM_FORMAT_XBGR8888:
2109 case DRM_FORMAT_RGBX8888:
2110 case DRM_FORMAT_BGRX8888:
2111 case DRM_FORMAT_ARGB8888:
2112 case DRM_FORMAT_ABGR8888:
2113 case DRM_FORMAT_RGBA8888:
2114 case DRM_FORMAT_BGRA8888:
2115 case DRM_FORMAT_XRGB2101010:
2116 case DRM_FORMAT_XBGR2101010:
2117 case DRM_FORMAT_RGBX1010102:
2118 case DRM_FORMAT_BGRX1010102:
2119 case DRM_FORMAT_ARGB2101010:
2120 case DRM_FORMAT_ABGR2101010:
2121 case DRM_FORMAT_RGBA1010102:
2122 case DRM_FORMAT_BGRA1010102:
2123 case DRM_FORMAT_YUYV:
2124 case DRM_FORMAT_YVYU:
2125 case DRM_FORMAT_UYVY:
2126 case DRM_FORMAT_VYUY:
2127 case DRM_FORMAT_AYUV:
2128 case DRM_FORMAT_NV12:
2129 case DRM_FORMAT_NV21:
2130 case DRM_FORMAT_NV16:
2131 case DRM_FORMAT_NV61:
2132 case DRM_FORMAT_YUV410:
2133 case DRM_FORMAT_YVU410:
2134 case DRM_FORMAT_YUV411:
2135 case DRM_FORMAT_YVU411:
2136 case DRM_FORMAT_YUV420:
2137 case DRM_FORMAT_YVU420:
2138 case DRM_FORMAT_YUV422:
2139 case DRM_FORMAT_YVU422:
2140 case DRM_FORMAT_YUV444:
2141 case DRM_FORMAT_YVU444:
2142 return 0;
2143 default:
2144 return -EINVAL;
2145 }
2146}
2147
2148/**
2149 * drm_mode_addfb2 - add an FB to the graphics configuration
2150 * @inode: inode from the ioctl
2151 * @filp: file * from the ioctl
2152 * @cmd: cmd from ioctl
2153 * @arg: arg from ioctl
2154 *
2155 * LOCKING:
2156 * Takes mode config lock.
2157 *
2158 * Add a new FB to the specified CRTC, given a user request with format.
2159 *
2160 * Called by the user via ioctl.
2161 *
2162 * RETURNS:
2163 * Zero on success, errno on failure.
2164 */
2165int drm_mode_addfb2(struct drm_device *dev,
2166 void *data, struct drm_file *file_priv)
2167{
2168 struct drm_mode_fb_cmd2 *r = data;
1688 struct drm_mode_config *config = &dev->mode_config; 2169 struct drm_mode_config *config = &dev->mode_config;
1689 struct drm_framebuffer *fb; 2170 struct drm_framebuffer *fb;
1690 int ret = 0; 2171 int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
1693 return -EINVAL; 2174 return -EINVAL;
1694 2175
1695 if ((config->min_width > r->width) || (r->width > config->max_width)) { 2176 if ((config->min_width > r->width) || (r->width > config->max_width)) {
1696 DRM_ERROR("mode new framebuffer width not within limits\n"); 2177 DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
2178 r->width, config->min_width, config->max_width);
1697 return -EINVAL; 2179 return -EINVAL;
1698 } 2180 }
1699 if ((config->min_height > r->height) || (r->height > config->max_height)) { 2181 if ((config->min_height > r->height) || (r->height > config->max_height)) {
1700 DRM_ERROR("mode new framebuffer height not within limits\n"); 2182 DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
2183 r->height, config->min_height, config->max_height);
1701 return -EINVAL; 2184 return -EINVAL;
1702 } 2185 }
1703 2186
1704 mutex_lock(&dev->mode_config.mutex); 2187 ret = format_check(r);
2188 if (ret) {
2189 DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
2190 return ret;
2191 }
1705 2192
1706 /* TODO check buffer is sufficiently large */ 2193 mutex_lock(&dev->mode_config.mutex);
1707 /* TODO setup destructor callback */
1708 2194
1709 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2195 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
1710 if (IS_ERR(fb)) { 2196 if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
1756 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); 2242 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
1757 /* TODO check that we really get a framebuffer back. */ 2243 /* TODO check that we really get a framebuffer back. */
1758 if (!obj) { 2244 if (!obj) {
1759 DRM_ERROR("mode invalid framebuffer id\n");
1760 ret = -EINVAL; 2245 ret = -EINVAL;
1761 goto out; 2246 goto out;
1762 } 2247 }
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
1767 found = 1; 2252 found = 1;
1768 2253
1769 if (!found) { 2254 if (!found) {
1770 DRM_ERROR("tried to remove a fb that we didn't own\n");
1771 ret = -EINVAL; 2255 ret = -EINVAL;
1772 goto out; 2256 goto out;
1773 } 2257 }
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
1814 mutex_lock(&dev->mode_config.mutex); 2298 mutex_lock(&dev->mode_config.mutex);
1815 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); 2299 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
1816 if (!obj) { 2300 if (!obj) {
1817 DRM_ERROR("invalid framebuffer id\n");
1818 ret = -EINVAL; 2301 ret = -EINVAL;
1819 goto out; 2302 goto out;
1820 } 2303 }
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
1824 r->width = fb->width; 2307 r->width = fb->width;
1825 r->depth = fb->depth; 2308 r->depth = fb->depth;
1826 r->bpp = fb->bits_per_pixel; 2309 r->bpp = fb->bits_per_pixel;
1827 r->pitch = fb->pitch; 2310 r->pitch = fb->pitches[0];
1828 fb->funcs->create_handle(fb, file_priv, &r->handle); 2311 fb->funcs->create_handle(fb, file_priv, &r->handle);
1829 2312
1830out: 2313out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1850 mutex_lock(&dev->mode_config.mutex); 2333 mutex_lock(&dev->mode_config.mutex);
1851 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); 2334 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
1852 if (!obj) { 2335 if (!obj) {
1853 DRM_ERROR("invalid framebuffer id\n");
1854 ret = -EINVAL; 2336 ret = -EINVAL;
1855 goto out_err1; 2337 goto out_err1;
1856 } 2338 }
1857 fb = obj_to_fb(obj); 2339 fb = obj_to_fb(obj);
1858 2340
1859 num_clips = r->num_clips; 2341 num_clips = r->num_clips;
1860 clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; 2342 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
1861 2343
1862 if (!num_clips != !clips_ptr) { 2344 if (!num_clips != !clips_ptr) {
1863 ret = -EINVAL; 2345 ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2253 struct drm_property_enum *prop_enum; 2735 struct drm_property_enum *prop_enum;
2254 struct drm_mode_property_enum __user *enum_ptr; 2736 struct drm_mode_property_enum __user *enum_ptr;
2255 struct drm_property_blob *prop_blob; 2737 struct drm_property_blob *prop_blob;
2256 uint32_t *blob_id_ptr; 2738 uint32_t __user *blob_id_ptr;
2257 uint64_t __user *values_ptr; 2739 uint64_t __user *values_ptr;
2258 uint32_t __user *blob_length_ptr; 2740 uint32_t __user *blob_length_ptr;
2259 2741
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2283 out_resp->flags = property->flags; 2765 out_resp->flags = property->flags;
2284 2766
2285 if ((out_resp->count_values >= value_count) && value_count) { 2767 if ((out_resp->count_values >= value_count) && value_count) {
2286 values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; 2768 values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
2287 for (i = 0; i < value_count; i++) { 2769 for (i = 0; i < value_count; i++) {
2288 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { 2770 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
2289 ret = -EFAULT; 2771 ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2296 if (property->flags & DRM_MODE_PROP_ENUM) { 2778 if (property->flags & DRM_MODE_PROP_ENUM) {
2297 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { 2779 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
2298 copied = 0; 2780 copied = 0;
2299 enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; 2781 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
2300 list_for_each_entry(prop_enum, &property->enum_blob_list, head) { 2782 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
2301 2783
2302 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { 2784 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
2318 if (property->flags & DRM_MODE_PROP_BLOB) { 2800 if (property->flags & DRM_MODE_PROP_BLOB) {
2319 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { 2801 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
2320 copied = 0; 2802 copied = 0;
2321 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; 2803 blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
2322 blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; 2804 blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
2323 2805
2324 list_for_each_entry(prop_blob, &property->enum_blob_list, head) { 2806 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
2325 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { 2807 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
2380 struct drm_mode_get_blob *out_resp = data; 2862 struct drm_mode_get_blob *out_resp = data;
2381 struct drm_property_blob *blob; 2863 struct drm_property_blob *blob;
2382 int ret = 0; 2864 int ret = 0;
2383 void *blob_ptr; 2865 void __user *blob_ptr;
2384 2866
2385 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2867 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2386 return -EINVAL; 2868 return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
2394 blob = obj_to_blob(obj); 2876 blob = obj_to_blob(obj);
2395 2877
2396 if (out_resp->length == blob->length) { 2878 if (out_resp->length == blob->length) {
2397 blob_ptr = (void *)(unsigned long)out_resp->data; 2879 blob_ptr = (void __user *)(unsigned long)out_resp->data;
2398 if (copy_to_user(blob_ptr, blob->data, blob->length)){ 2880 if (copy_to_user(blob_ptr, blob->data, blob->length)){
2399 ret = -EFAULT; 2881 ret = -EFAULT;
2400 goto done; 2882 goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
2788 3270
2789 return dev->driver->dumb_destroy(file_priv, dev, args->handle); 3271 return dev->driver->dumb_destroy(file_priv, dev, args->handle);
2790} 3272}
3273
3274/*
3275 * Just need to support RGB formats here for compat with code that doesn't
3276 * use pixel formats directly yet.
3277 */
3278void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3279 int *bpp)
3280{
3281 switch (format) {
3282 case DRM_FORMAT_RGB332:
3283 case DRM_FORMAT_BGR233:
3284 *depth = 8;
3285 *bpp = 8;
3286 break;
3287 case DRM_FORMAT_XRGB1555:
3288 case DRM_FORMAT_XBGR1555:
3289 case DRM_FORMAT_RGBX5551:
3290 case DRM_FORMAT_BGRX5551:
3291 case DRM_FORMAT_ARGB1555:
3292 case DRM_FORMAT_ABGR1555:
3293 case DRM_FORMAT_RGBA5551:
3294 case DRM_FORMAT_BGRA5551:
3295 *depth = 15;
3296 *bpp = 16;
3297 break;
3298 case DRM_FORMAT_RGB565:
3299 case DRM_FORMAT_BGR565:
3300 *depth = 16;
3301 *bpp = 16;
3302 break;
3303 case DRM_FORMAT_RGB888:
3304 case DRM_FORMAT_BGR888:
3305 *depth = 24;
3306 *bpp = 24;
3307 break;
3308 case DRM_FORMAT_XRGB8888:
3309 case DRM_FORMAT_XBGR8888:
3310 case DRM_FORMAT_RGBX8888:
3311 case DRM_FORMAT_BGRX8888:
3312 *depth = 24;
3313 *bpp = 32;
3314 break;
3315 case DRM_FORMAT_XRGB2101010:
3316 case DRM_FORMAT_XBGR2101010:
3317 case DRM_FORMAT_RGBX1010102:
3318 case DRM_FORMAT_BGRX1010102:
3319 case DRM_FORMAT_ARGB2101010:
3320 case DRM_FORMAT_ABGR2101010:
3321 case DRM_FORMAT_RGBA1010102:
3322 case DRM_FORMAT_BGRA1010102:
3323 *depth = 30;
3324 *bpp = 32;
3325 break;
3326 case DRM_FORMAT_ARGB8888:
3327 case DRM_FORMAT_ABGR8888:
3328 case DRM_FORMAT_RGBA8888:
3329 case DRM_FORMAT_BGRA8888:
3330 *depth = 32;
3331 *bpp = 32;
3332 break;
3333 default:
3334 DRM_DEBUG_KMS("unsupported pixel format\n");
3335 *depth = 0;
3336 *bpp = 0;
3337 break;
3338 }
3339}
3340EXPORT_SYMBOL(drm_fb_get_bpp_depth);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2619d72cec..84a4a809793 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,7 @@
34 34
35#include "drmP.h" 35#include "drmP.h"
36#include "drm_crtc.h" 36#include "drm_crtc.h"
37#include "drm_fourcc.h"
37#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
38#include "drm_fb_helper.h" 39#include "drm_fb_helper.h"
39 40
@@ -710,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
710 for (i = 0; i < set->num_connectors; i++) { 711 for (i = 0; i < set->num_connectors; i++) {
711 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, 712 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
712 drm_get_connector_name(set->connectors[i])); 713 drm_get_connector_name(set->connectors[i]));
713 set->connectors[i]->dpms = DRM_MODE_DPMS_ON; 714 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
714 } 715 }
715 } 716 }
716 drm_helper_disable_unused_functions(dev); 717 drm_helper_disable_unused_functions(dev);
@@ -847,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
847EXPORT_SYMBOL(drm_helper_connector_dpms); 848EXPORT_SYMBOL(drm_helper_connector_dpms);
848 849
849int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 850int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
850 struct drm_mode_fb_cmd *mode_cmd) 851 struct drm_mode_fb_cmd2 *mode_cmd)
851{ 852{
853 int i;
854
852 fb->width = mode_cmd->width; 855 fb->width = mode_cmd->width;
853 fb->height = mode_cmd->height; 856 fb->height = mode_cmd->height;
854 fb->pitch = mode_cmd->pitch; 857 for (i = 0; i < 4; i++) {
855 fb->bits_per_pixel = mode_cmd->bpp; 858 fb->pitches[i] = mode_cmd->pitches[i];
856 fb->depth = mode_cmd->depth; 859 fb->offsets[i] = mode_cmd->offsets[i];
860 }
861 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
862 &fb->bits_per_pixel);
863 fb->pixel_format = mode_cmd->pixel_format;
857 864
858 return 0; 865 return 0;
859} 866}
@@ -1008,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1008 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 1015 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
1009} 1016}
1010EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1017EXPORT_SYMBOL(drm_helper_hpd_irq_event);
1018
1019
1020/**
1021 * drm_format_num_planes - get the number of planes for format
1022 * @format: pixel format (DRM_FORMAT_*)
1023 *
1024 * RETURNS:
1025 * The number of planes used by the specified pixel format.
1026 */
1027int drm_format_num_planes(uint32_t format)
1028{
1029 switch (format) {
1030 case DRM_FORMAT_YUV410:
1031 case DRM_FORMAT_YVU410:
1032 case DRM_FORMAT_YUV411:
1033 case DRM_FORMAT_YVU411:
1034 case DRM_FORMAT_YUV420:
1035 case DRM_FORMAT_YVU420:
1036 case DRM_FORMAT_YUV422:
1037 case DRM_FORMAT_YVU422:
1038 case DRM_FORMAT_YUV444:
1039 case DRM_FORMAT_YVU444:
1040 return 3;
1041 case DRM_FORMAT_NV12:
1042 case DRM_FORMAT_NV21:
1043 case DRM_FORMAT_NV16:
1044 case DRM_FORMAT_NV61:
1045 return 2;
1046 default:
1047 return 1;
1048 }
1049}
1050EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 40c187c60f4..ebf7d3f68fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static struct drm_ioctl_desc drm_ioctls[] = { 63static struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), 68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137 137
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3e927ce7557..ece03fc2d38 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -508,25 +508,10 @@ static void
508cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) 508cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
509{ 509{
510 int i, n = 0; 510 int i, n = 0;
511 u8 rev = ext[0x01], d = ext[0x02]; 511 u8 d = ext[0x02];
512 u8 *det_base = ext + d; 512 u8 *det_base = ext + d;
513 513
514 switch (rev) { 514 n = (127 - d) / 18;
515 case 0:
516 /* can't happen */
517 return;
518 case 1:
519 /* have to infer how many blocks we have, check pixel clock */
520 for (i = 0; i < 6; i++)
521 if (det_base[18*i] || det_base[18*i+1])
522 n++;
523 break;
524 default:
525 /* explicit count */
526 n = min(ext[0x03] & 0x0f, 6);
527 break;
528 }
529
530 for (i = 0; i < n; i++) 515 for (i = 0; i < n; i++)
531 cb((struct detailed_timing *)(det_base + 18 * i), closure); 516 cb((struct detailed_timing *)(det_base + 18 * i), closure);
532} 517}
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
1319 1304
1320#define HDMI_IDENTIFIER 0x000C03 1305#define HDMI_IDENTIFIER 0x000C03
1321#define AUDIO_BLOCK 0x01 1306#define AUDIO_BLOCK 0x01
1307#define VIDEO_BLOCK 0x02
1322#define VENDOR_BLOCK 0x03 1308#define VENDOR_BLOCK 0x03
1323#define SPEAKER_BLOCK 0x04 1309#define SPEAKER_BLOCK 0x04
1324#define EDID_BASIC_AUDIO (1 << 6) 1310#define EDID_BASIC_AUDIO (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
1349} 1335}
1350EXPORT_SYMBOL(drm_find_cea_extension); 1336EXPORT_SYMBOL(drm_find_cea_extension);
1351 1337
1338static int
1339do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
1340{
1341 struct drm_device *dev = connector->dev;
1342 u8 * mode, cea_mode;
1343 int modes = 0;
1344
1345 for (mode = db; mode < db + len; mode++) {
1346 cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
1347 if (cea_mode < drm_num_cea_modes) {
1348 struct drm_display_mode *newmode;
1349 newmode = drm_mode_duplicate(dev,
1350 &edid_cea_modes[cea_mode]);
1351 if (newmode) {
1352 drm_mode_probed_add(connector, newmode);
1353 modes++;
1354 }
1355 }
1356 }
1357
1358 return modes;
1359}
1360
1361static int
1362add_cea_modes(struct drm_connector *connector, struct edid *edid)
1363{
1364 u8 * cea = drm_find_cea_extension(edid);
1365 u8 * db, dbl;
1366 int modes = 0;
1367
1368 if (cea && cea[1] >= 3) {
1369 for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
1370 dbl = db[0] & 0x1f;
1371 if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
1372 modes += do_cea_modes (connector, db+1, dbl);
1373 }
1374 }
1375
1376 return modes;
1377}
1378
1352static void 1379static void
1353parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) 1380parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
1354{ 1381{
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
1432 eld[18] = edid->prod_code[0]; 1459 eld[18] = edid->prod_code[0];
1433 eld[19] = edid->prod_code[1]; 1460 eld[19] = edid->prod_code[1];
1434 1461
1435 for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { 1462 if (cea[1] >= 3)
1436 dbl = db[0] & 0x1f; 1463 for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
1437 1464 dbl = db[0] & 0x1f;
1438 switch ((db[0] & 0xe0) >> 5) { 1465
1439 case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ 1466 switch ((db[0] & 0xe0) >> 5) {
1440 sad_count = dbl / 3; 1467 case AUDIO_BLOCK:
1441 memcpy(eld + 20 + mnl, &db[1], dbl); 1468 /* Audio Data Block, contains SADs */
1442 break; 1469 sad_count = dbl / 3;
1443 case SPEAKER_BLOCK: /* Speaker Allocation Data Block */ 1470 memcpy(eld + 20 + mnl, &db[1], dbl);
1444 eld[7] = db[1]; 1471 break;
1445 break; 1472 case SPEAKER_BLOCK:
1446 case VENDOR_BLOCK: 1473 /* Speaker Allocation Data Block */
1447 /* HDMI Vendor-Specific Data Block */ 1474 eld[7] = db[1];
1448 if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) 1475 break;
1449 parse_hdmi_vsdb(connector, db); 1476 case VENDOR_BLOCK:
1450 break; 1477 /* HDMI Vendor-Specific Data Block */
1451 default: 1478 if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
1452 break; 1479 parse_hdmi_vsdb(connector, db);
1480 break;
1481 default:
1482 break;
1483 }
1453 } 1484 }
1454 }
1455 eld[5] |= sad_count << 4; 1485 eld[5] |= sad_count << 4;
1456 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; 1486 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
1457 1487
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1722 num_modes += add_standard_modes(connector, edid); 1752 num_modes += add_standard_modes(connector, edid);
1723 num_modes += add_established_modes(connector, edid); 1753 num_modes += add_established_modes(connector, edid);
1724 num_modes += add_inferred_modes(connector, edid); 1754 num_modes += add_inferred_modes(connector, edid);
1755 num_modes += add_cea_modes(connector, edid);
1725 1756
1726 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1757 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1727 edid_fixup_preferred(connector, quirks); 1758 edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 5f2064489fd..a91ffb11722 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -378,3 +378,287 @@ static const struct {
378 { 1920, 1440, 75, 0 }, 378 { 1920, 1440, 75, 0 },
379}; 379};
380static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); 380static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
381
382/*
383 * Probably taken from CEA-861 spec.
384 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
385 */
386static const struct drm_display_mode edid_cea_modes[] = {
387 /* 640x480@60Hz */
388 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
389 752, 800, 0, 480, 490, 492, 525, 0,
390 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
391 /* 720x480@60Hz */
392 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
393 798, 858, 0, 480, 489, 495, 525, 0,
394 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
395 /* 720x480@60Hz */
396 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
397 798, 858, 0, 480, 489, 495, 525, 0,
398 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
399 /* 1280x720@60Hz */
400 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
401 1430, 1650, 0, 720, 725, 730, 750, 0,
402 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
403 /* 1920x1080i@60Hz */
404 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
405 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
406 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
407 DRM_MODE_FLAG_INTERLACE) },
408 /* 1440x480i@60Hz */
409 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
410 1602, 1716, 0, 480, 488, 494, 525, 0,
411 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
412 DRM_MODE_FLAG_INTERLACE) },
413 /* 1440x480i@60Hz */
414 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
415 1602, 1716, 0, 480, 488, 494, 525, 0,
416 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
417 DRM_MODE_FLAG_INTERLACE) },
418 /* 1440x240@60Hz */
419 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
420 1602, 1716, 0, 240, 244, 247, 262, 0,
421 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
422 /* 1440x240@60Hz */
423 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
424 1602, 1716, 0, 240, 244, 247, 262, 0,
425 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
426 /* 2880x480i@60Hz */
427 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
428 3204, 3432, 0, 480, 488, 494, 525, 0,
429 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
430 DRM_MODE_FLAG_INTERLACE) },
431 /* 2880x480i@60Hz */
432 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
433 3204, 3432, 0, 480, 488, 494, 525, 0,
434 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
435 DRM_MODE_FLAG_INTERLACE) },
436 /* 2880x240@60Hz */
437 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
438 3204, 3432, 0, 240, 244, 247, 262, 0,
439 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
440 /* 2880x240@60Hz */
441 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
442 3204, 3432, 0, 240, 244, 247, 262, 0,
443 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
444 /* 1440x480@60Hz */
445 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
446 1596, 1716, 0, 480, 489, 495, 525, 0,
447 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
448 /* 1440x480@60Hz */
449 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
450 1596, 1716, 0, 480, 489, 495, 525, 0,
451 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
452 /* 1920x1080@60Hz */
453 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
454 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
455 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
456 /* 720x576@50Hz */
457 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
458 796, 864, 0, 576, 581, 586, 625, 0,
459 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
460 /* 720x576@50Hz */
461 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
462 796, 864, 0, 576, 581, 586, 625, 0,
463 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
464 /* 1280x720@50Hz */
465 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
466 1760, 1980, 0, 720, 725, 730, 750, 0,
467 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
468 /* 1920x1080i@50Hz */
469 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
470 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
471 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
472 DRM_MODE_FLAG_INTERLACE) },
473 /* 1440x576i@50Hz */
474 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
475 1590, 1728, 0, 576, 580, 586, 625, 0,
476 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
477 DRM_MODE_FLAG_INTERLACE) },
478 /* 1440x576i@50Hz */
479 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
480 1590, 1728, 0, 576, 580, 586, 625, 0,
481 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
482 DRM_MODE_FLAG_INTERLACE) },
483 /* 1440x288@50Hz */
484 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
485 1590, 1728, 0, 288, 290, 293, 312, 0,
486 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
487 /* 1440x288@50Hz */
488 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
489 1590, 1728, 0, 288, 290, 293, 312, 0,
490 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
491 /* 2880x576i@50Hz */
492 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
493 3180, 3456, 0, 576, 580, 586, 625, 0,
494 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
495 DRM_MODE_FLAG_INTERLACE) },
496 /* 2880x576i@50Hz */
497 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
498 3180, 3456, 0, 576, 580, 586, 625, 0,
499 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
500 DRM_MODE_FLAG_INTERLACE) },
501 /* 2880x288@50Hz */
502 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
503 3180, 3456, 0, 288, 290, 293, 312, 0,
504 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
505 /* 2880x288@50Hz */
506 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
507 3180, 3456, 0, 288, 290, 293, 312, 0,
508 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
509 /* 1440x576@50Hz */
510 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
511 1592, 1728, 0, 576, 581, 586, 625, 0,
512 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
513 /* 1440x576@50Hz */
514 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
515 1592, 1728, 0, 576, 581, 586, 625, 0,
516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
517 /* 1920x1080@50Hz */
518 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
519 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
520 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
521 /* 1920x1080@24Hz */
522 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
523 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
524 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
525 /* 1920x1080@25Hz */
526 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
527 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
528 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
529 /* 1920x1080@30Hz */
530 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
531 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
532 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
533 /* 2880x480@60Hz */
534 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
535 3192, 3432, 0, 480, 489, 495, 525, 0,
536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
537 /* 2880x480@60Hz */
538 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
539 3192, 3432, 0, 480, 489, 495, 525, 0,
540 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
541 /* 2880x576@50Hz */
542 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
543 3184, 3456, 0, 576, 581, 586, 625, 0,
544 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
545 /* 2880x576@50Hz */
546 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
547 3184, 3456, 0, 576, 581, 586, 625, 0,
548 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
549 /* 1920x1080i@50Hz */
550 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
551 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
552 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
553 DRM_MODE_FLAG_INTERLACE) },
554 /* 1920x1080i@100Hz */
555 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
556 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
557 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
558 DRM_MODE_FLAG_INTERLACE) },
559 /* 1280x720@100Hz */
560 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
561 1760, 1980, 0, 720, 725, 730, 750, 0,
562 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
563 /* 720x576@100Hz */
564 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
565 796, 864, 0, 576, 581, 586, 625, 0,
566 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
567 /* 720x576@100Hz */
568 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
569 796, 864, 0, 576, 581, 586, 625, 0,
570 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
571 /* 1440x576i@100Hz */
572 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
573 1590, 1728, 0, 576, 580, 586, 625, 0,
574 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
575 /* 1440x576i@100Hz */
576 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
577 1590, 1728, 0, 576, 580, 586, 625, 0,
578 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
579 /* 1920x1080i@120Hz */
580 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
581 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
582 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
583 DRM_MODE_FLAG_INTERLACE) },
584 /* 1280x720@120Hz */
585 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
586 1430, 1650, 0, 720, 725, 730, 750, 0,
587 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
588 /* 720x480@120Hz */
589 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
590 798, 858, 0, 480, 489, 495, 525, 0,
591 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
592 /* 720x480@120Hz */
593 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
594 798, 858, 0, 480, 489, 495, 525, 0,
595 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
596 /* 1440x480i@120Hz */
597 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
598 1602, 1716, 0, 480, 488, 494, 525, 0,
599 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
600 DRM_MODE_FLAG_INTERLACE) },
601 /* 1440x480i@120Hz */
602 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
603 1602, 1716, 0, 480, 488, 494, 525, 0,
604 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
605 DRM_MODE_FLAG_INTERLACE) },
606 /* 720x576@200Hz */
607 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
608 796, 864, 0, 576, 581, 586, 625, 0,
609 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
610 /* 720x576@200Hz */
611 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
612 796, 864, 0, 576, 581, 586, 625, 0,
613 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
614 /* 1440x576i@200Hz */
615 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
616 1590, 1728, 0, 576, 580, 586, 625, 0,
617 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
618 DRM_MODE_FLAG_INTERLACE) },
619 /* 1440x576i@200Hz */
620 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
621 1590, 1728, 0, 576, 580, 586, 625, 0,
622 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
623 DRM_MODE_FLAG_INTERLACE) },
624 /* 720x480@240Hz */
625 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
626 798, 858, 0, 480, 489, 495, 525, 0,
627 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
628 /* 720x480@240Hz */
629 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
630 798, 858, 0, 480, 489, 495, 525, 0,
631 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
632 /* 1440x480i@240 */
633 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
634 1602, 1716, 0, 480, 488, 494, 525, 0,
635 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
636 DRM_MODE_FLAG_INTERLACE) },
637 /* 1440x480i@240 */
638 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
639 1602, 1716, 0, 480, 488, 494, 525, 0,
640 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
641 DRM_MODE_FLAG_INTERLACE) },
642 /* 1280x720@24Hz */
643 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
644 3080, 3300, 0, 720, 725, 730, 750, 0,
645 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
646 /* 1280x720@25Hz */
647 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
648 3740, 3960, 0, 720, 725, 730, 750, 0,
649 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
650 /* 1280x720@30Hz */
651 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
652 3080, 3300, 0, 720, 725, 730, 750, 0,
653 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
654 /* 1920x1080@120Hz */
655 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
656 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
657 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
658 /* 1920x1080@100Hz */
659 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
660 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
662};
663static const int drm_num_cea_modes =
664 sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 80fe39d98b0..aada26f63de 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
255int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, 255int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
256 void *panic_str) 256 void *panic_str)
257{ 257{
258 /*
259 * It's a waste of time and effort to switch back to text console
260 * if the kernel should reboot before panic messages can be seen.
261 */
262 if (panic_timeout < 0)
263 return 0;
264
258 printk(KERN_ERR "panic occurred, switching back to text console\n"); 265 printk(KERN_ERR "panic occurred, switching back to text console\n");
259 return drm_fb_helper_force_kernel_mode(); 266 return drm_fb_helper_force_kernel_mode();
260} 267}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4911e1d1dcf..c00cf154cc0 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
182 goto out; 182 goto out;
183 183
184 old_fops = filp->f_op; 184 old_fops = filp->f_op;
185 filp->f_op = fops_get(&dev->driver->fops); 185 filp->f_op = fops_get(dev->driver->fops);
186 if (filp->f_op == NULL) { 186 if (filp->f_op == NULL) {
187 filp->f_op = old_fops; 187 filp->f_op = old_fops;
188 goto out; 188 goto out;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 904d7e9c8e4..956fd38d7c9 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
158 int i; 158 int i;
159 159
160 idx = map->offset; 160 idx = map->offset;
161 161 if (idx < 0)
162 mutex_lock(&dev->struct_mutex);
163 if (idx < 0) {
164 mutex_unlock(&dev->struct_mutex);
165 return -EINVAL; 162 return -EINVAL;
166 }
167 163
168 i = 0; 164 i = 0;
165 mutex_lock(&dev->struct_mutex);
169 list_for_each(list, &dev->maplist) { 166 list_for_each(list, &dev->maplist) {
170 if (i == idx) { 167 if (i == idx) {
171 r_list = list_entry(list, struct drm_map_list, head); 168 r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
211 int i; 208 int i;
212 209
213 idx = client->idx; 210 idx = client->idx;
214 mutex_lock(&dev->struct_mutex);
215
216 i = 0; 211 i = 0;
212
213 mutex_lock(&dev->struct_mutex);
217 list_for_each_entry(pt, &dev->filelist, lhead) { 214 list_for_each_entry(pt, &dev->filelist, lhead) {
218 if (i++ >= idx) { 215 if (i++ >= idx) {
219 client->auth = pt->authenticated; 216 client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
249 246
250 memset(stats, 0, sizeof(*stats)); 247 memset(stats, 0, sizeof(*stats));
251 248
252 mutex_lock(&dev->struct_mutex);
253
254 for (i = 0; i < dev->counters; i++) { 249 for (i = 0; i < dev->counters; i++) {
255 if (dev->types[i] == _DRM_STAT_LOCK) 250 if (dev->types[i] == _DRM_STAT_LOCK)
256 stats->data[i].value = 251 stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
262 257
263 stats->count = dev->counters; 258 stats->count = dev->counters;
264 259
265 mutex_unlock(&dev->struct_mutex);
266
267 return 0; 260 return 0;
268} 261}
269 262
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 632ae243ede..c79c713eeba 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -33,6 +33,7 @@
33 * OTHER DEALINGS IN THE SOFTWARE. 33 * OTHER DEALINGS IN THE SOFTWARE.
34 */ 34 */
35 35
36#include <linux/export.h>
36#include "drmP.h" 37#include "drmP.h"
37 38
38static int drm_notifier(void *priv); 39static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
345 } 346 }
346 spin_unlock_bh(&lock_data->spinlock); 347 spin_unlock_bh(&lock_data->spinlock);
347} 348}
349EXPORT_SYMBOL(drm_idlelock_take);
348 350
349void drm_idlelock_release(struct drm_lock_data *lock_data) 351void drm_idlelock_release(struct drm_lock_data *lock_data)
350{ 352{
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
364 } 366 }
365 spin_unlock_bh(&lock_data->spinlock); 367 spin_unlock_bh(&lock_data->spinlock);
366} 368}
369EXPORT_SYMBOL(drm_idlelock_release);
367 370
368int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) 371int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
369{ 372{
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644
index cebce45f442..00000000000
--- a/drivers/gpu/drm/drm_sman.c
+++ /dev/null
@@ -1,351 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory manager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#include <linux/export.h>
40#include "drm_sman.h"
41
42struct drm_owner_item {
43 struct drm_hash_item owner_hash;
44 struct list_head sman_list;
45 struct list_head mem_blocks;
46};
47
48void drm_sman_takedown(struct drm_sman * sman)
49{
50 drm_ht_remove(&sman->user_hash_tab);
51 drm_ht_remove(&sman->owner_hash_tab);
52 kfree(sman->mm);
53}
54
55EXPORT_SYMBOL(drm_sman_takedown);
56
57int
58drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
59 unsigned int user_order, unsigned int owner_order)
60{
61 int ret = 0;
62
63 sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
64 if (!sman->mm) {
65 ret = -ENOMEM;
66 goto out;
67 }
68 sman->num_managers = num_managers;
69 INIT_LIST_HEAD(&sman->owner_items);
70 ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
71 if (ret)
72 goto out1;
73 ret = drm_ht_create(&sman->user_hash_tab, user_order);
74 if (!ret)
75 goto out;
76
77 drm_ht_remove(&sman->owner_hash_tab);
78out1:
79 kfree(sman->mm);
80out:
81 return ret;
82}
83
84EXPORT_SYMBOL(drm_sman_init);
85
86static void *drm_sman_mm_allocate(void *private, unsigned long size,
87 unsigned alignment)
88{
89 struct drm_mm *mm = (struct drm_mm *) private;
90 struct drm_mm_node *tmp;
91
92 tmp = drm_mm_search_free(mm, size, alignment, 1);
93 if (!tmp) {
94 return NULL;
95 }
96 tmp = drm_mm_get_block(tmp, size, alignment);
97 return tmp;
98}
99
100static void drm_sman_mm_free(void *private, void *ref)
101{
102 struct drm_mm_node *node = (struct drm_mm_node *) ref;
103
104 drm_mm_put_block(node);
105}
106
107static void drm_sman_mm_destroy(void *private)
108{
109 struct drm_mm *mm = (struct drm_mm *) private;
110 drm_mm_takedown(mm);
111 kfree(mm);
112}
113
114static unsigned long drm_sman_mm_offset(void *private, void *ref)
115{
116 struct drm_mm_node *node = (struct drm_mm_node *) ref;
117 return node->start;
118}
119
120int
121drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
122 unsigned long start, unsigned long size)
123{
124 struct drm_sman_mm *sman_mm;
125 struct drm_mm *mm;
126 int ret;
127
128 BUG_ON(manager >= sman->num_managers);
129
130 sman_mm = &sman->mm[manager];
131 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
132 if (!mm) {
133 return -ENOMEM;
134 }
135 sman_mm->private = mm;
136 ret = drm_mm_init(mm, start, size);
137
138 if (ret) {
139 kfree(mm);
140 return ret;
141 }
142
143 sman_mm->allocate = drm_sman_mm_allocate;
144 sman_mm->free = drm_sman_mm_free;
145 sman_mm->destroy = drm_sman_mm_destroy;
146 sman_mm->offset = drm_sman_mm_offset;
147
148 return 0;
149}
150
151EXPORT_SYMBOL(drm_sman_set_range);
152
153int
154drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
155 struct drm_sman_mm * allocator)
156{
157 BUG_ON(manager >= sman->num_managers);
158 sman->mm[manager] = *allocator;
159
160 return 0;
161}
162EXPORT_SYMBOL(drm_sman_set_manager);
163
164static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
165 unsigned long owner)
166{
167 int ret;
168 struct drm_hash_item *owner_hash_item;
169 struct drm_owner_item *owner_item;
170
171 ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
172 if (!ret) {
173 return drm_hash_entry(owner_hash_item, struct drm_owner_item,
174 owner_hash);
175 }
176
177 owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
178 if (!owner_item)
179 goto out;
180
181 INIT_LIST_HEAD(&owner_item->mem_blocks);
182 owner_item->owner_hash.key = owner;
183 if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
184 goto out1;
185
186 list_add_tail(&owner_item->sman_list, &sman->owner_items);
187 return owner_item;
188
189out1:
190 kfree(owner_item);
191out:
192 return NULL;
193}
194
195struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
196 unsigned long size, unsigned alignment,
197 unsigned long owner)
198{
199 void *tmp;
200 struct drm_sman_mm *sman_mm;
201 struct drm_owner_item *owner_item;
202 struct drm_memblock_item *memblock;
203
204 BUG_ON(manager >= sman->num_managers);
205
206 sman_mm = &sman->mm[manager];
207 tmp = sman_mm->allocate(sman_mm->private, size, alignment);
208
209 if (!tmp) {
210 return NULL;
211 }
212
213 memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
214
215 if (!memblock)
216 goto out;
217
218 memblock->mm_info = tmp;
219 memblock->mm = sman_mm;
220 memblock->sman = sman;
221
222 if (drm_ht_just_insert_please
223 (&sman->user_hash_tab, &memblock->user_hash,
224 (unsigned long)memblock, 32, 0, 0))
225 goto out1;
226
227 owner_item = drm_sman_get_owner_item(sman, owner);
228 if (!owner_item)
229 goto out2;
230
231 list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
232
233 return memblock;
234
235out2:
236 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
237out1:
238 kfree(memblock);
239out:
240 sman_mm->free(sman_mm->private, tmp);
241
242 return NULL;
243}
244
245EXPORT_SYMBOL(drm_sman_alloc);
246
247static void drm_sman_free(struct drm_memblock_item *item)
248{
249 struct drm_sman *sman = item->sman;
250
251 list_del(&item->owner_list);
252 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
253 item->mm->free(item->mm->private, item->mm_info);
254 kfree(item);
255}
256
257int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
258{
259 struct drm_hash_item *hash_item;
260 struct drm_memblock_item *memblock_item;
261
262 if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
263 return -EINVAL;
264
265 memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
266 user_hash);
267 drm_sman_free(memblock_item);
268 return 0;
269}
270
271EXPORT_SYMBOL(drm_sman_free_key);
272
273static void drm_sman_remove_owner(struct drm_sman *sman,
274 struct drm_owner_item *owner_item)
275{
276 list_del(&owner_item->sman_list);
277 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
278 kfree(owner_item);
279}
280
281int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
282{
283
284 struct drm_hash_item *hash_item;
285 struct drm_owner_item *owner_item;
286
287 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
288 return -1;
289 }
290
291 owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
292 if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
293 drm_sman_remove_owner(sman, owner_item);
294 return -1;
295 }
296
297 return 0;
298}
299
300EXPORT_SYMBOL(drm_sman_owner_clean);
301
302static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
303 struct drm_owner_item *owner_item)
304{
305 struct drm_memblock_item *entry, *next;
306
307 list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
308 owner_list) {
309 drm_sman_free(entry);
310 }
311 drm_sman_remove_owner(sman, owner_item);
312}
313
314void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
315{
316
317 struct drm_hash_item *hash_item;
318 struct drm_owner_item *owner_item;
319
320 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
321
322 return;
323 }
324
325 owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
326 drm_sman_do_owner_cleanup(sman, owner_item);
327}
328
329EXPORT_SYMBOL(drm_sman_owner_cleanup);
330
331void drm_sman_cleanup(struct drm_sman *sman)
332{
333 struct drm_owner_item *entry, *next;
334 unsigned int i;
335 struct drm_sman_mm *sman_mm;
336
337 list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
338 drm_sman_do_owner_cleanup(sman, entry);
339 }
340 if (sman->mm) {
341 for (i = 0; i < sman->num_managers; ++i) {
342 sman_mm = &sman->mm[i];
343 if (sman_mm->private) {
344 sman_mm->destroy(sman_mm->private);
345 sman_mm->private = NULL;
346 }
347 }
348 }
349}
350
351EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 847466aab43..f9aaa56eae0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -18,3 +18,10 @@ config DRM_EXYNOS_FIMD
18 help 18 help
19 Choose this option if you want to use Exynos FIMD for DRM. 19 Choose this option if you want to use Exynos FIMD for DRM.
20 If M is selected, the module will be called exynos_drm_fimd 20 If M is selected, the module will be called exynos_drm_fimd
21
22config DRM_EXYNOS_HDMI
23 tristate "Exynos DRM HDMI"
24 depends on DRM_EXYNOS
25 help
26 Choose this option if you want to use Exynos HDMI for DRM.
27 If M is selected, the module will be called exynos_drm_hdmi
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 0496d3ff268..395e69c9a96 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -5,7 +5,10 @@
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos 5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
6exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ 6exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
7 exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ 7 exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o
9 10
10obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 11obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
11obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 12obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
14 exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644
index 00000000000..84b614fe26f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include "drmP.h"
15
16#include <linux/kernel.h>
17#include <linux/i2c.h>
18#include <linux/module.h>
19
20
21#include "exynos_drm_drv.h"
22#include "exynos_hdmi.h"
23
24static int s5p_ddc_probe(struct i2c_client *client,
25 const struct i2c_device_id *dev_id)
26{
27 hdmi_attach_ddc_client(client);
28
29 dev_info(&client->adapter->dev, "attached s5p_ddc "
30 "into i2c adapter successfully\n");
31
32 return 0;
33}
34
35static int s5p_ddc_remove(struct i2c_client *client)
36{
37 dev_info(&client->adapter->dev, "detached s5p_ddc "
38 "from i2c adapter successfully\n");
39
40 return 0;
41}
42
43static struct i2c_device_id ddc_idtable[] = {
44 {"s5p_ddc", 0},
45 { },
46};
47
48struct i2c_driver ddc_driver = {
49 .driver = {
50 .name = "s5p_ddc",
51 .owner = THIS_MODULE,
52 },
53 .id_table = ddc_idtable,
54 .probe = s5p_ddc_probe,
55 .remove = __devexit_p(s5p_ddc_remove),
56 .command = NULL,
57};
58EXPORT_SYMBOL(ddc_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 2bb07bca511..3cf785c5818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -73,7 +73,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
74 if (!buffer) { 74 if (!buffer) {
75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); 75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
76 return ERR_PTR(-ENOMEM); 76 return NULL;
77 } 77 }
78 78
79 buffer->size = size; 79 buffer->size = size;
@@ -84,8 +84,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
84 */ 84 */
85 if (lowlevel_buffer_allocate(dev, buffer) < 0) { 85 if (lowlevel_buffer_allocate(dev, buffer) < 0) {
86 kfree(buffer); 86 kfree(buffer);
87 buffer = NULL; 87 return NULL;
88 return ERR_PTR(-ENOMEM);
89 } 88 }
90 89
91 return buffer; 90 return buffer;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 6e91f9caa5d..c913f2bad76 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -30,9 +30,6 @@
30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, 30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
31 unsigned int size); 31 unsigned int size);
32 32
33/* get memory information of a drm framebuffer. */
34struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
35
36/* remove allocated physical memory. */ 33/* remove allocated physical memory. */
37void exynos_drm_buf_destroy(struct drm_device *dev, 34void exynos_drm_buf_destroy(struct drm_device *dev,
38 struct exynos_drm_gem_buf *buffer); 35 struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index ee43cc22085..e3861ac4929 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -34,7 +34,6 @@
34#include "exynos_drm_fb.h" 34#include "exynos_drm_fb.h"
35#include "exynos_drm_encoder.h" 35#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h" 36#include "exynos_drm_gem.h"
37#include "exynos_drm_buf.h"
38 37
39#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 38#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
40 drm_crtc) 39 drm_crtc)
@@ -52,11 +51,13 @@
52 * drm framework doesn't support multiple irq yet. 51 * drm framework doesn't support multiple irq yet.
53 * we can refer to the crtc to current hardware interrupt occured through 52 * we can refer to the crtc to current hardware interrupt occured through
54 * this pipe value. 53 * this pipe value.
54 * @dpms: store the crtc dpms value
55 */ 55 */
56struct exynos_drm_crtc { 56struct exynos_drm_crtc {
57 struct drm_crtc drm_crtc; 57 struct drm_crtc drm_crtc;
58 struct exynos_drm_overlay overlay; 58 struct exynos_drm_overlay overlay;
59 unsigned int pipe; 59 unsigned int pipe;
60 unsigned int dpms;
60}; 61};
61 62
62static void exynos_drm_crtc_apply(struct drm_crtc *crtc) 63static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -78,19 +79,23 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
78 struct exynos_drm_gem_buf *buffer; 79 struct exynos_drm_gem_buf *buffer;
79 unsigned int actual_w; 80 unsigned int actual_w;
80 unsigned int actual_h; 81 unsigned int actual_h;
82 int nr = exynos_drm_format_num_buffers(fb->pixel_format);
83 int i;
84
85 for (i = 0; i < nr; i++) {
86 buffer = exynos_drm_fb_buffer(fb, i);
87 if (!buffer) {
88 DRM_LOG_KMS("buffer is null\n");
89 return -EFAULT;
90 }
81 91
82 buffer = exynos_drm_fb_get_buf(fb); 92 overlay->dma_addr[i] = buffer->dma_addr;
83 if (!buffer) { 93 overlay->vaddr[i] = buffer->kvaddr;
84 DRM_LOG_KMS("buffer is null.\n");
85 return -EFAULT;
86 }
87
88 overlay->dma_addr = buffer->dma_addr;
89 overlay->vaddr = buffer->kvaddr;
90 94
91 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", 95 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
92 (unsigned long)overlay->vaddr, 96 i, (unsigned long)overlay->vaddr[i],
93 (unsigned long)overlay->dma_addr); 97 (unsigned long)overlay->dma_addr[i]);
98 }
94 99
95 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); 100 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
96 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); 101 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -101,7 +106,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
101 overlay->fb_width = fb->width; 106 overlay->fb_width = fb->width;
102 overlay->fb_height = fb->height; 107 overlay->fb_height = fb->height;
103 overlay->bpp = fb->bits_per_pixel; 108 overlay->bpp = fb->bits_per_pixel;
104 overlay->pitch = fb->pitch; 109 overlay->pitch = fb->pitches[0];
110 overlay->pixel_format = fb->pixel_format;
105 111
106 /* set overlay range to be displayed. */ 112 /* set overlay range to be displayed. */
107 overlay->crtc_x = pos->crtc_x; 113 overlay->crtc_x = pos->crtc_x;
@@ -153,26 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
153 159
154static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 160static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
155{ 161{
162 struct drm_device *dev = crtc->dev;
156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 163 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
157 164
158 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); 165 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
159 166
167 if (exynos_crtc->dpms == mode) {
168 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
169 return;
170 }
171
172 mutex_lock(&dev->struct_mutex);
173
160 switch (mode) { 174 switch (mode) {
161 case DRM_MODE_DPMS_ON: 175 case DRM_MODE_DPMS_ON:
162 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, 176 exynos_drm_fn_encoder(crtc, &mode,
163 exynos_drm_encoder_crtc_commit); 177 exynos_drm_encoder_crtc_dpms);
178 exynos_crtc->dpms = mode;
164 break; 179 break;
165 case DRM_MODE_DPMS_STANDBY: 180 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND: 181 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF: 182 case DRM_MODE_DPMS_OFF:
168 /* TODO */ 183 exynos_drm_fn_encoder(crtc, &mode,
169 exynos_drm_fn_encoder(crtc, NULL, 184 exynos_drm_encoder_crtc_dpms);
170 exynos_drm_encoder_crtc_disable); 185 exynos_crtc->dpms = mode;
171 break; 186 break;
172 default: 187 default:
173 DRM_DEBUG_KMS("unspecified mode %d\n", mode); 188 DRM_ERROR("unspecified mode %d\n", mode);
174 break; 189 break;
175 } 190 }
191
192 mutex_unlock(&dev->struct_mutex);
176} 193}
177 194
178static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 195static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -188,6 +205,28 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
188 205
189 DRM_DEBUG_KMS("%s\n", __FILE__); 206 DRM_DEBUG_KMS("%s\n", __FILE__);
190 207
208 /*
209 * when set_crtc is requested from user or at booting time,
210 * crtc->commit would be called without dpms call so if dpms is
211 * no power on then crtc->dpms should be called
212 * with DRM_MODE_DPMS_ON for the hardware power to be on.
213 */
214 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
215 int mode = DRM_MODE_DPMS_ON;
216
217 /*
218 * enable hardware(power on) to all encoders hdmi connected
219 * to current crtc.
220 */
221 exynos_drm_crtc_dpms(crtc, mode);
222 /*
223 * enable dma to all encoders connected to current crtc and
224 * lcd panel.
225 */
226 exynos_drm_fn_encoder(crtc, &mode,
227 exynos_drm_encoder_dpms_from_crtc);
228 }
229
191 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, 230 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
192 exynos_drm_encoder_crtc_commit); 231 exynos_drm_encoder_crtc_commit);
193} 232}
@@ -344,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
344 } 383 }
345 384
346 exynos_crtc->pipe = nr; 385 exynos_crtc->pipe = nr;
386 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
387 exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
347 crtc = &exynos_crtc->drm_crtc; 388 crtc = &exynos_crtc->drm_crtc;
348 389
349 private->crtc[nr] = crtc; 390 private->crtc[nr] = crtc;
@@ -357,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
357int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) 398int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
358{ 399{
359 struct exynos_drm_private *private = dev->dev_private; 400 struct exynos_drm_private *private = dev->dev_private;
401 struct exynos_drm_crtc *exynos_crtc =
402 to_exynos_crtc(private->crtc[crtc]);
360 403
361 DRM_DEBUG_KMS("%s\n", __FILE__); 404 DRM_DEBUG_KMS("%s\n", __FILE__);
362 405
406 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
407 return -EPERM;
408
363 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 409 exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
364 exynos_drm_enable_vblank); 410 exynos_drm_enable_vblank);
365 411
@@ -369,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
369void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) 415void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
370{ 416{
371 struct exynos_drm_private *private = dev->dev_private; 417 struct exynos_drm_private *private = dev->dev_private;
418 struct exynos_drm_crtc *exynos_crtc =
419 to_exynos_crtc(private->crtc[crtc]);
372 420
373 DRM_DEBUG_KMS("%s\n", __FILE__); 421 DRM_DEBUG_KMS("%s\n", __FILE__);
374 422
423 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
424 return;
425
375 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 426 exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
376 exynos_drm_disable_vblank); 427 exynos_drm_disable_vblank);
377} 428}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 53e2216de61..35889ca255e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -36,13 +36,16 @@
36#include "exynos_drm_fbdev.h" 36#include "exynos_drm_fbdev.h"
37#include "exynos_drm_fb.h" 37#include "exynos_drm_fb.h"
38#include "exynos_drm_gem.h" 38#include "exynos_drm_gem.h"
39#include "exynos_drm_plane.h"
39 40
40#define DRIVER_NAME "exynos-drm" 41#define DRIVER_NAME "exynos"
41#define DRIVER_DESC "Samsung SoC DRM" 42#define DRIVER_DESC "Samsung SoC DRM"
42#define DRIVER_DATE "20110530" 43#define DRIVER_DATE "20110530"
43#define DRIVER_MAJOR 1 44#define DRIVER_MAJOR 1
44#define DRIVER_MINOR 0 45#define DRIVER_MINOR 0
45 46
47#define VBLANK_OFF_DELAY 50000
48
46static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 49static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
47{ 50{
48 struct exynos_drm_private *private; 51 struct exynos_drm_private *private;
@@ -77,6 +80,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
77 goto err_crtc; 80 goto err_crtc;
78 } 81 }
79 82
83 for (nr = 0; nr < MAX_PLANE; nr++) {
84 ret = exynos_plane_init(dev, nr);
85 if (ret)
86 goto err_crtc;
87 }
88
80 ret = drm_vblank_init(dev, MAX_CRTC); 89 ret = drm_vblank_init(dev, MAX_CRTC);
81 if (ret) 90 if (ret)
82 goto err_crtc; 91 goto err_crtc;
@@ -100,6 +109,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
100 goto err_drm_device; 109 goto err_drm_device;
101 } 110 }
102 111
112 drm_vblank_offdelay = VBLANK_OFF_DELAY;
113
103 return 0; 114 return 0;
104 115
105err_drm_device: 116err_drm_device:
@@ -163,6 +174,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
163 DRM_AUTH), 174 DRM_AUTH),
164 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP, 175 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
165 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), 176 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
177 DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
178 DRM_UNLOCKED | DRM_AUTH),
179};
180
181static const struct file_operations exynos_drm_driver_fops = {
182 .owner = THIS_MODULE,
183 .open = drm_open,
184 .mmap = exynos_drm_gem_mmap,
185 .poll = drm_poll,
186 .read = drm_read,
187 .unlocked_ioctl = drm_ioctl,
188 .release = drm_release,
166}; 189};
167 190
168static struct drm_driver exynos_drm_driver = { 191static struct drm_driver exynos_drm_driver = {
@@ -182,15 +205,7 @@ static struct drm_driver exynos_drm_driver = {
182 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 205 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
183 .dumb_destroy = exynos_drm_gem_dumb_destroy, 206 .dumb_destroy = exynos_drm_gem_dumb_destroy,
184 .ioctls = exynos_ioctls, 207 .ioctls = exynos_ioctls,
185 .fops = { 208 .fops = &exynos_drm_driver_fops,
186 .owner = THIS_MODULE,
187 .open = drm_open,
188 .mmap = exynos_drm_gem_mmap,
189 .poll = drm_poll,
190 .read = drm_read,
191 .unlocked_ioctl = drm_ioctl,
192 .release = drm_release,
193 },
194 .name = DRIVER_NAME, 209 .name = DRIVER_NAME,
195 .desc = DRIVER_DESC, 210 .desc = DRIVER_DESC,
196 .date = DRIVER_DATE, 211 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e02e6ecc2e..e685e1e3305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -33,11 +33,16 @@
33#include "drm.h" 33#include "drm.h"
34 34
35#define MAX_CRTC 2 35#define MAX_CRTC 2
36#define MAX_PLANE 5
37#define MAX_FB_BUFFER 3
38#define DEFAULT_ZPOS -1
36 39
37struct drm_device; 40struct drm_device;
38struct exynos_drm_overlay; 41struct exynos_drm_overlay;
39struct drm_connector; 42struct drm_connector;
40 43
44extern unsigned int drm_vblank_offdelay;
45
41/* this enumerates display type. */ 46/* this enumerates display type. */
42enum exynos_drm_output_type { 47enum exynos_drm_output_type {
43 EXYNOS_DISPLAY_TYPE_NONE, 48 EXYNOS_DISPLAY_TYPE_NONE,
@@ -57,8 +62,8 @@ enum exynos_drm_output_type {
57struct exynos_drm_overlay_ops { 62struct exynos_drm_overlay_ops {
58 void (*mode_set)(struct device *subdrv_dev, 63 void (*mode_set)(struct device *subdrv_dev,
59 struct exynos_drm_overlay *overlay); 64 struct exynos_drm_overlay *overlay);
60 void (*commit)(struct device *subdrv_dev); 65 void (*commit)(struct device *subdrv_dev, int zpos);
61 void (*disable)(struct device *subdrv_dev); 66 void (*disable)(struct device *subdrv_dev, int zpos);
62}; 67};
63 68
64/* 69/*
@@ -80,9 +85,11 @@ struct exynos_drm_overlay_ops {
80 * @scan_flag: interlace or progressive way. 85 * @scan_flag: interlace or progressive way.
81 * (it could be DRM_MODE_FLAG_*) 86 * (it could be DRM_MODE_FLAG_*)
82 * @bpp: pixel size.(in bit) 87 * @bpp: pixel size.(in bit)
83 * @dma_addr: bus(accessed by dma) address to the memory region allocated 88 * @pixel_format: fourcc pixel format of this overlay
84 * for a overlay. 89 * @dma_addr: array of bus(accessed by dma) address to the memory region
85 * @vaddr: virtual memory addresss to this overlay. 90 * allocated for a overlay.
91 * @vaddr: array of virtual memory addresss to this overlay.
92 * @zpos: order of overlay layer(z position).
86 * @default_win: a window to be enabled. 93 * @default_win: a window to be enabled.
87 * @color_key: color key on or off. 94 * @color_key: color key on or off.
88 * @index_color: if using color key feature then this value would be used 95 * @index_color: if using color key feature then this value would be used
@@ -109,8 +116,10 @@ struct exynos_drm_overlay {
109 unsigned int scan_flag; 116 unsigned int scan_flag;
110 unsigned int bpp; 117 unsigned int bpp;
111 unsigned int pitch; 118 unsigned int pitch;
112 dma_addr_t dma_addr; 119 uint32_t pixel_format;
113 void __iomem *vaddr; 120 dma_addr_t dma_addr[MAX_FB_BUFFER];
121 void __iomem *vaddr[MAX_FB_BUFFER];
122 int zpos;
114 123
115 bool default_win; 124 bool default_win;
116 bool color_key; 125 bool color_key;
@@ -144,17 +153,19 @@ struct exynos_drm_display_ops {
144/* 153/*
145 * Exynos drm manager ops 154 * Exynos drm manager ops
146 * 155 *
156 * @dpms: control device power.
157 * @apply: set timing, vblank and overlay data to registers.
147 * @mode_set: convert drm_display_mode to hw specific display mode and 158 * @mode_set: convert drm_display_mode to hw specific display mode and
148 * would be called by encoder->mode_set(). 159 * would be called by encoder->mode_set().
149 * @commit: set current hw specific display mode to hw. 160 * @commit: set current hw specific display mode to hw.
150 * @disable: disable hardware specific display mode.
151 * @enable_vblank: specific driver callback for enabling vblank interrupt. 161 * @enable_vblank: specific driver callback for enabling vblank interrupt.
152 * @disable_vblank: specific driver callback for disabling vblank interrupt. 162 * @disable_vblank: specific driver callback for disabling vblank interrupt.
153 */ 163 */
154struct exynos_drm_manager_ops { 164struct exynos_drm_manager_ops {
165 void (*dpms)(struct device *subdrv_dev, int mode);
166 void (*apply)(struct device *subdrv_dev);
155 void (*mode_set)(struct device *subdrv_dev, void *mode); 167 void (*mode_set)(struct device *subdrv_dev, void *mode);
156 void (*commit)(struct device *subdrv_dev); 168 void (*commit)(struct device *subdrv_dev);
157 void (*disable)(struct device *subdrv_dev);
158 int (*enable_vblank)(struct device *subdrv_dev); 169 int (*enable_vblank)(struct device *subdrv_dev);
159 void (*disable_vblank)(struct device *subdrv_dev); 170 void (*disable_vblank)(struct device *subdrv_dev);
160}; 171};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 153061415ba..86b93dde219 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -42,49 +42,68 @@
42 * @drm_encoder: encoder object. 42 * @drm_encoder: encoder object.
43 * @manager: specific encoder has its own manager to control a hardware 43 * @manager: specific encoder has its own manager to control a hardware
44 * appropriately and we can access a hardware drawing on this manager. 44 * appropriately and we can access a hardware drawing on this manager.
45 * @dpms: store the encoder dpms value.
45 */ 46 */
46struct exynos_drm_encoder { 47struct exynos_drm_encoder {
47 struct drm_encoder drm_encoder; 48 struct drm_encoder drm_encoder;
48 struct exynos_drm_manager *manager; 49 struct exynos_drm_manager *manager;
50 int dpms;
49}; 51};
50 52
51static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) 53static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
52{ 54{
53 struct drm_device *dev = encoder->dev; 55 struct drm_device *dev = encoder->dev;
54 struct drm_connector *connector; 56 struct drm_connector *connector;
55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 57 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
58
59 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
60 if (connector->encoder == encoder) {
61 struct exynos_drm_display_ops *display_ops =
62 manager->display_ops;
63
64 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
65 connector->base.id, mode);
66 if (display_ops && display_ops->power_on)
67 display_ops->power_on(manager->dev, mode);
68 }
69 }
70}
71
72static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
73{
74 struct drm_device *dev = encoder->dev;
75 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
56 struct exynos_drm_manager_ops *manager_ops = manager->ops; 76 struct exynos_drm_manager_ops *manager_ops = manager->ops;
77 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
57 78
58 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 79 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
59 80
81 if (exynos_encoder->dpms == mode) {
82 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
83 return;
84 }
85
86 mutex_lock(&dev->struct_mutex);
87
60 switch (mode) { 88 switch (mode) {
61 case DRM_MODE_DPMS_ON: 89 case DRM_MODE_DPMS_ON:
62 if (manager_ops && manager_ops->commit) 90 if (manager_ops && manager_ops->apply)
63 manager_ops->commit(manager->dev); 91 manager_ops->apply(manager->dev);
92 exynos_drm_display_power(encoder, mode);
93 exynos_encoder->dpms = mode;
64 break; 94 break;
65 case DRM_MODE_DPMS_STANDBY: 95 case DRM_MODE_DPMS_STANDBY:
66 case DRM_MODE_DPMS_SUSPEND: 96 case DRM_MODE_DPMS_SUSPEND:
67 case DRM_MODE_DPMS_OFF: 97 case DRM_MODE_DPMS_OFF:
68 /* TODO */ 98 exynos_drm_display_power(encoder, mode);
69 if (manager_ops && manager_ops->disable) 99 exynos_encoder->dpms = mode;
70 manager_ops->disable(manager->dev);
71 break; 100 break;
72 default: 101 default:
73 DRM_ERROR("unspecified mode %d\n", mode); 102 DRM_ERROR("unspecified mode %d\n", mode);
74 break; 103 break;
75 } 104 }
76 105
77 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 106 mutex_unlock(&dev->struct_mutex);
78 if (connector->encoder == encoder) {
79 struct exynos_drm_display_ops *display_ops =
80 manager->display_ops;
81
82 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
83 connector->base.id, mode);
84 if (display_ops && display_ops->power_on)
85 display_ops->power_on(manager->dev, mode);
86 }
87 }
88} 107}
89 108
90static bool 109static bool
@@ -169,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
169 exynos_encoder->manager->pipe = -1; 188 exynos_encoder->manager->pipe = -1;
170 189
171 drm_encoder_cleanup(encoder); 190 drm_encoder_cleanup(encoder);
172 encoder->dev->mode_config.num_encoder--;
173 kfree(exynos_encoder); 191 kfree(exynos_encoder);
174} 192}
175 193
@@ -199,6 +217,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
199 return NULL; 217 return NULL;
200 } 218 }
201 219
220 exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
202 exynos_encoder->manager = manager; 221 exynos_encoder->manager = manager;
203 encoder = &exynos_encoder->drm_encoder; 222 encoder = &exynos_encoder->drm_encoder;
204 encoder->possible_crtcs = possible_crtcs; 223 encoder->possible_crtcs = possible_crtcs;
@@ -275,12 +294,27 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
275 manager_ops->disable_vblank(manager->dev); 294 manager_ops->disable_vblank(manager->dev);
276} 295}
277 296
278void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) 297void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
298 void *data)
279{ 299{
280 struct exynos_drm_manager *manager = 300 struct exynos_drm_manager *manager =
281 to_exynos_encoder(encoder)->manager; 301 to_exynos_encoder(encoder)->manager;
282 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 302 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
303 int zpos = DEFAULT_ZPOS;
304
305 if (data)
306 zpos = *(int *)data;
307
308 if (overlay_ops && overlay_ops->commit)
309 overlay_ops->commit(manager->dev, zpos);
310}
311
312void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
313{
314 struct exynos_drm_manager *manager =
315 to_exynos_encoder(encoder)->manager;
283 int crtc = *(int *)data; 316 int crtc = *(int *)data;
317 int zpos = DEFAULT_ZPOS;
284 318
285 DRM_DEBUG_KMS("%s\n", __FILE__); 319 DRM_DEBUG_KMS("%s\n", __FILE__);
286 320
@@ -290,8 +324,53 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
290 */ 324 */
291 manager->pipe = crtc; 325 manager->pipe = crtc;
292 326
293 if (overlay_ops && overlay_ops->commit) 327 exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
294 overlay_ops->commit(manager->dev); 328}
329
330void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
331{
332 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
333 int mode = *(int *)data;
334
335 DRM_DEBUG_KMS("%s\n", __FILE__);
336
337 exynos_drm_encoder_dpms(encoder, mode);
338
339 exynos_encoder->dpms = mode;
340}
341
342void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
343{
344 struct drm_device *dev = encoder->dev;
345 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
346 struct exynos_drm_manager *manager = exynos_encoder->manager;
347 struct exynos_drm_manager_ops *manager_ops = manager->ops;
348 struct drm_connector *connector;
349 int mode = *(int *)data;
350
351 DRM_DEBUG_KMS("%s\n", __FILE__);
352
353 if (manager_ops && manager_ops->dpms)
354 manager_ops->dpms(manager->dev, mode);
355
356 /*
357 * set current dpms mode to the connector connected to
358 * current encoder. connector->dpms would be checked
359 * at drm_helper_connector_dpms()
360 */
361 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
362 if (connector->encoder == encoder)
363 connector->dpms = mode;
364
365 /*
366 * if this condition is ok then it means that the crtc is already
367 * detached from encoder and last function for detaching is properly
368 * done, so clear pipe from manager to prevent repeated call.
369 */
370 if (mode > DRM_MODE_DPMS_ON) {
371 if (!encoder->crtc)
372 manager->pipe = -1;
373 }
295} 374}
296 375
297void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 376void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -310,19 +389,15 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
310 struct exynos_drm_manager *manager = 389 struct exynos_drm_manager *manager =
311 to_exynos_encoder(encoder)->manager; 390 to_exynos_encoder(encoder)->manager;
312 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 391 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
392 int zpos = DEFAULT_ZPOS;
313 393
314 DRM_DEBUG_KMS("\n"); 394 DRM_DEBUG_KMS("\n");
315 395
316 if (overlay_ops && overlay_ops->disable) 396 if (data)
317 overlay_ops->disable(manager->dev); 397 zpos = *(int *)data;
318 398
319 /* 399 if (overlay_ops && overlay_ops->disable)
320 * crtc is already detached from encoder and last 400 overlay_ops->disable(manager->dev, zpos);
321 * function for detaching is properly done, so
322 * clear pipe from manager to prevent repeated call
323 */
324 if (!encoder->crtc)
325 manager->pipe = -1;
326} 401}
327 402
328MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 403MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index a22acfbf0e4..97b087a51cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -39,7 +39,12 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
39 void (*fn)(struct drm_encoder *, void *)); 39 void (*fn)(struct drm_encoder *, void *));
40void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); 40void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
43 void *data);
42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); 44void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
45void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
46 void *data);
47void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 48void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
44void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); 49void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
45 50
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 5bf4a1ac7f8..3733fe6723d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -33,7 +33,6 @@
33 33
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_buf.h"
37#include "exynos_drm_gem.h" 36#include "exynos_drm_gem.h"
38 37
39#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -42,15 +41,11 @@
42 * exynos specific framebuffer structure. 41 * exynos specific framebuffer structure.
43 * 42 *
44 * @fb: drm framebuffer obejct. 43 * @fb: drm framebuffer obejct.
45 * @exynos_gem_obj: exynos specific gem object containing a gem object. 44 * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
46 * @buffer: pointer to exynos_drm_gem_buffer object.
47 * - contain the memory information to memory region allocated
48 * at default framebuffer creation.
49 */ 45 */
50struct exynos_drm_fb { 46struct exynos_drm_fb {
51 struct drm_framebuffer fb; 47 struct drm_framebuffer fb;
52 struct exynos_drm_gem_obj *exynos_gem_obj; 48 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
53 struct exynos_drm_gem_buf *buffer;
54}; 49};
55 50
56static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 51static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -61,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
61 56
62 drm_framebuffer_cleanup(fb); 57 drm_framebuffer_cleanup(fb);
63 58
64 /*
65 * default framebuffer has no gem object so
66 * a buffer of the default framebuffer should be released at here.
67 */
68 if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
69 exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
70
71 kfree(exynos_fb); 59 kfree(exynos_fb);
72 exynos_fb = NULL; 60 exynos_fb = NULL;
73} 61}
@@ -81,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
81 DRM_DEBUG_KMS("%s\n", __FILE__); 69 DRM_DEBUG_KMS("%s\n", __FILE__);
82 70
83 return drm_gem_handle_create(file_priv, 71 return drm_gem_handle_create(file_priv,
84 &exynos_fb->exynos_gem_obj->base, handle); 72 &exynos_fb->exynos_gem_obj[0]->base, handle);
85} 73}
86 74
87static int exynos_drm_fb_dirty(struct drm_framebuffer *fb, 75static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -102,134 +90,88 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
102 .dirty = exynos_drm_fb_dirty, 90 .dirty = exynos_drm_fb_dirty,
103}; 91};
104 92
105static struct drm_framebuffer * 93struct drm_framebuffer *
106exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, 94exynos_drm_framebuffer_init(struct drm_device *dev,
107 struct drm_mode_fb_cmd *mode_cmd) 95 struct drm_mode_fb_cmd2 *mode_cmd,
96 struct drm_gem_object *obj)
108{ 97{
109 struct exynos_drm_fb *exynos_fb; 98 struct exynos_drm_fb *exynos_fb;
110 struct drm_framebuffer *fb;
111 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
112 struct drm_gem_object *obj;
113 unsigned int size;
114 int ret; 99 int ret;
115 100
116 DRM_DEBUG_KMS("%s\n", __FILE__);
117
118 mode_cmd->pitch = max(mode_cmd->pitch,
119 mode_cmd->width * (mode_cmd->bpp >> 3));
120
121 DRM_LOG_KMS("drm fb create(%dx%d)\n",
122 mode_cmd->width, mode_cmd->height);
123
124 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 101 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
125 if (!exynos_fb) { 102 if (!exynos_fb) {
126 DRM_ERROR("failed to allocate exynos drm framebuffer.\n"); 103 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
127 return ERR_PTR(-ENOMEM); 104 return ERR_PTR(-ENOMEM);
128 } 105 }
129 106
130 fb = &exynos_fb->fb; 107 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
131 ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
132 if (ret) { 108 if (ret) {
133 DRM_ERROR("failed to initialize framebuffer.\n"); 109 DRM_ERROR("failed to initialize framebuffer\n");
134 goto err_init; 110 return ERR_PTR(ret);
135 } 111 }
136 112
137 DRM_LOG_KMS("create: fb id: %d\n", fb->base.id); 113 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
114 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
138 115
139 size = mode_cmd->pitch * mode_cmd->height; 116 return &exynos_fb->fb;
117}
140 118
141 /* 119static struct drm_framebuffer *
142 * mode_cmd->handle could be NULL at booting time or 120exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
143 * with user request. if NULL, a new buffer or a gem object 121 struct drm_mode_fb_cmd2 *mode_cmd)
144 * would be allocated. 122{
145 */ 123 struct drm_gem_object *obj;
146 if (!mode_cmd->handle) { 124 struct drm_framebuffer *fb;
147 if (!file_priv) { 125 struct exynos_drm_fb *exynos_fb;
148 struct exynos_drm_gem_buf *buffer; 126 int nr;
149 127 int i;
150 /*
151 * in case that file_priv is NULL, it allocates
152 * only buffer and this buffer would be used
153 * for default framebuffer.
154 */
155 buffer = exynos_drm_buf_create(dev, size);
156 if (IS_ERR(buffer)) {
157 ret = PTR_ERR(buffer);
158 goto err_buffer;
159 }
160
161 exynos_fb->buffer = buffer;
162
163 DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
164 (unsigned long)buffer->dma_addr, size);
165
166 goto out;
167 } else {
168 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
169 &mode_cmd->handle,
170 size);
171 if (IS_ERR(exynos_gem_obj)) {
172 ret = PTR_ERR(exynos_gem_obj);
173 goto err_buffer;
174 }
175 }
176 } else {
177 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
178 if (!obj) {
179 DRM_ERROR("failed to lookup gem object.\n");
180 goto err_buffer;
181 }
182 128
183 exynos_gem_obj = to_exynos_gem_obj(obj); 129 DRM_DEBUG_KMS("%s\n", __FILE__);
184 130
185 drm_gem_object_unreference_unlocked(obj); 131 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
132 if (!obj) {
133 DRM_ERROR("failed to lookup gem object\n");
134 return ERR_PTR(-ENOENT);
186 } 135 }
187 136
188 /* 137 drm_gem_object_unreference_unlocked(obj);
189 * if got a exynos_gem_obj from either a handle or
190 * a new creation then exynos_fb->exynos_gem_obj is NULL
191 * so that default framebuffer has no its own gem object,
192 * only its own buffer object.
193 */
194 exynos_fb->buffer = exynos_gem_obj->buffer;
195
196 DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
197 (unsigned long)exynos_fb->buffer->dma_addr, size,
198 (unsigned int)&exynos_gem_obj->base);
199 138
200out: 139 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
201 exynos_fb->exynos_gem_obj = exynos_gem_obj; 140 if (IS_ERR(fb))
141 return fb;
202 142
203 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 143 exynos_fb = to_exynos_fb(fb);
144 nr = exynos_drm_format_num_buffers(fb->pixel_format);
204 145
205 return fb; 146 for (i = 1; i < nr; i++) {
206 147 obj = drm_gem_object_lookup(dev, file_priv,
207err_buffer: 148 mode_cmd->handles[i]);
208 drm_framebuffer_cleanup(fb); 149 if (!obj) {
209 150 DRM_ERROR("failed to lookup gem object\n");
210err_init: 151 exynos_drm_fb_destroy(fb);
211 kfree(exynos_fb); 152 return ERR_PTR(-ENOENT);
153 }
212 154
213 return ERR_PTR(ret); 155 drm_gem_object_unreference_unlocked(obj);
214}
215 156
216struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, 157 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
217 struct drm_file *file_priv, 158 }
218 struct drm_mode_fb_cmd *mode_cmd)
219{
220 DRM_DEBUG_KMS("%s\n", __FILE__);
221 159
222 return exynos_drm_fb_init(file_priv, dev, mode_cmd); 160 return fb;
223} 161}
224 162
225struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) 163struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
164 int index)
226{ 165{
227 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 166 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
228 struct exynos_drm_gem_buf *buffer; 167 struct exynos_drm_gem_buf *buffer;
229 168
230 DRM_DEBUG_KMS("%s\n", __FILE__); 169 DRM_DEBUG_KMS("%s\n", __FILE__);
231 170
232 buffer = exynos_fb->buffer; 171 if (index >= MAX_FB_BUFFER)
172 return NULL;
173
174 buffer = exynos_fb->exynos_gem_obj[index]->buffer;
233 if (!buffer) 175 if (!buffer)
234 return NULL; 176 return NULL;
235 177
@@ -250,7 +192,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
250} 192}
251 193
252static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 194static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
253 .fb_create = exynos_drm_fb_create, 195 .fb_create = exynos_user_fb_create,
254 .output_poll_changed = exynos_drm_output_poll_changed, 196 .output_poll_changed = exynos_drm_output_poll_changed,
255}; 197};
256 198
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index eb35931d302..3ecb30d9355 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -28,9 +28,27 @@
28#ifndef _EXYNOS_DRM_FB_H_ 28#ifndef _EXYNOS_DRM_FB_H_
29#define _EXYNOS_DRM_FB_H 29#define _EXYNOS_DRM_FB_H
30 30
31struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, 31static inline int exynos_drm_format_num_buffers(uint32_t format)
32 struct drm_file *filp, 32{
33 struct drm_mode_fb_cmd *mode_cmd); 33 switch (format) {
34 case DRM_FORMAT_NV12M:
35 case DRM_FORMAT_NV12MT:
36 return 2;
37 case DRM_FORMAT_YUV420M:
38 return 3;
39 default:
40 return 1;
41 }
42}
43
44struct drm_framebuffer *
45exynos_drm_framebuffer_init(struct drm_device *dev,
46 struct drm_mode_fb_cmd2 *mode_cmd,
47 struct drm_gem_object *obj);
48
49/* get memory information of a drm framebuffer */
50struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
51 int index);
34 52
35void exynos_drm_mode_config_init(struct drm_device *dev); 53void exynos_drm_mode_config_init(struct drm_device *dev);
36 54
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 836f4100818..d7ae29d2f3d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -34,7 +34,6 @@
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 36#include "exynos_drm_gem.h"
37#include "exynos_drm_buf.h"
38 37
39#define MAX_CONNECTOR 4 38#define MAX_CONNECTOR 4
40#define PREFERRED_BPP 32 39#define PREFERRED_BPP 32
@@ -43,8 +42,8 @@
43 drm_fb_helper) 42 drm_fb_helper)
44 43
45struct exynos_drm_fbdev { 44struct exynos_drm_fbdev {
46 struct drm_fb_helper drm_fb_helper; 45 struct drm_fb_helper drm_fb_helper;
47 struct drm_framebuffer *fb; 46 struct exynos_drm_gem_obj *exynos_gem_obj;
48}; 47};
49 48
50static int exynos_drm_fbdev_set_par(struct fb_info *info) 49static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -90,26 +89,24 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
90{ 89{
91 struct fb_info *fbi = helper->fbdev; 90 struct fb_info *fbi = helper->fbdev;
92 struct drm_device *dev = helper->dev; 91 struct drm_device *dev = helper->dev;
93 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
94 struct exynos_drm_gem_buf *buffer; 92 struct exynos_drm_gem_buf *buffer;
95 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); 93 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
96 unsigned long offset; 94 unsigned long offset;
97 95
98 DRM_DEBUG_KMS("%s\n", __FILE__); 96 DRM_DEBUG_KMS("%s\n", __FILE__);
99 97
100 exynos_fb->fb = fb; 98 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
101
102 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
103 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); 99 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
104 100
105 buffer = exynos_drm_fb_get_buf(fb); 101 /* RGB formats use only one buffer */
102 buffer = exynos_drm_fb_buffer(fb, 0);
106 if (!buffer) { 103 if (!buffer) {
107 DRM_LOG_KMS("buffer is null.\n"); 104 DRM_LOG_KMS("buffer is null.\n");
108 return -EFAULT; 105 return -EFAULT;
109 } 106 }
110 107
111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 108 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
112 offset += fbi->var.yoffset * fb->pitch; 109 offset += fbi->var.yoffset * fb->pitches[0];
113 110
114 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 111 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
115 fbi->screen_base = buffer->kvaddr + offset; 112 fbi->screen_base = buffer->kvaddr + offset;
@@ -124,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
124 struct drm_fb_helper_surface_size *sizes) 121 struct drm_fb_helper_surface_size *sizes)
125{ 122{
126 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); 123 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
124 struct exynos_drm_gem_obj *exynos_gem_obj;
127 struct drm_device *dev = helper->dev; 125 struct drm_device *dev = helper->dev;
128 struct fb_info *fbi; 126 struct fb_info *fbi;
129 struct drm_mode_fb_cmd mode_cmd = { 0 }; 127 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
130 struct platform_device *pdev = dev->platformdev; 128 struct platform_device *pdev = dev->platformdev;
129 unsigned long size;
131 int ret; 130 int ret;
132 131
133 DRM_DEBUG_KMS("%s\n", __FILE__); 132 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -138,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
138 137
139 mode_cmd.width = sizes->surface_width; 138 mode_cmd.width = sizes->surface_width;
140 mode_cmd.height = sizes->surface_height; 139 mode_cmd.height = sizes->surface_height;
141 mode_cmd.bpp = sizes->surface_bpp; 140 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
142 mode_cmd.depth = sizes->surface_depth; 141 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
142 sizes->surface_depth);
143 143
144 mutex_lock(&dev->struct_mutex); 144 mutex_lock(&dev->struct_mutex);
145 145
@@ -150,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
150 goto out; 150 goto out;
151 } 151 }
152 152
153 exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd); 153 size = mode_cmd.pitches[0] * mode_cmd.height;
154 if (IS_ERR_OR_NULL(exynos_fbdev->fb)) { 154 exynos_gem_obj = exynos_drm_gem_create(dev, size);
155 if (IS_ERR(exynos_gem_obj)) {
156 ret = PTR_ERR(exynos_gem_obj);
157 goto out;
158 }
159
160 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
161
162 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
163 &exynos_gem_obj->base);
164 if (IS_ERR_OR_NULL(helper->fb)) {
155 DRM_ERROR("failed to create drm framebuffer.\n"); 165 DRM_ERROR("failed to create drm framebuffer.\n");
156 ret = PTR_ERR(exynos_fbdev->fb); 166 ret = PTR_ERR(helper->fb);
157 goto out; 167 goto out;
158 } 168 }
159 169
160 helper->fb = exynos_fbdev->fb;
161 helper->fbdev = fbi; 170 helper->fbdev = fbi;
162 171
163 fbi->par = helper; 172 fbi->par = helper;
@@ -171,8 +180,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
171 } 180 }
172 181
173 ret = exynos_drm_fbdev_update(helper, helper->fb); 182 ret = exynos_drm_fbdev_update(helper, helper->fb);
174 if (ret < 0) 183 if (ret < 0) {
175 fb_dealloc_cmap(&fbi->cmap); 184 fb_dealloc_cmap(&fbi->cmap);
185 goto out;
186 }
176 187
177/* 188/*
178 * if failed, all resources allocated above would be released by 189 * if failed, all resources allocated above would be released by
@@ -205,34 +216,42 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
205{ 216{
206 struct drm_device *dev = helper->dev; 217 struct drm_device *dev = helper->dev;
207 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); 218 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
208 struct drm_framebuffer *fb = exynos_fbdev->fb; 219 struct exynos_drm_gem_obj *exynos_gem_obj;
209 struct drm_mode_fb_cmd mode_cmd = { 0 }; 220 struct drm_framebuffer *fb = helper->fb;
221 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
222 unsigned long size;
210 223
211 DRM_DEBUG_KMS("%s\n", __FILE__); 224 DRM_DEBUG_KMS("%s\n", __FILE__);
212 225
213 if (helper->fb != fb) {
214 DRM_ERROR("drm framebuffer is different\n");
215 return -EINVAL;
216 }
217
218 if (exynos_drm_fbdev_is_samefb(fb, sizes)) 226 if (exynos_drm_fbdev_is_samefb(fb, sizes))
219 return 0; 227 return 0;
220 228
221 mode_cmd.width = sizes->surface_width; 229 mode_cmd.width = sizes->surface_width;
222 mode_cmd.height = sizes->surface_height; 230 mode_cmd.height = sizes->surface_height;
223 mode_cmd.bpp = sizes->surface_bpp; 231 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
224 mode_cmd.depth = sizes->surface_depth; 232 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
233 sizes->surface_depth);
234
235 if (exynos_fbdev->exynos_gem_obj)
236 exynos_drm_gem_destroy(exynos_fbdev->exynos_gem_obj);
225 237
226 if (fb->funcs->destroy) 238 if (fb->funcs->destroy)
227 fb->funcs->destroy(fb); 239 fb->funcs->destroy(fb);
228 240
229 exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd); 241 size = mode_cmd.pitches[0] * mode_cmd.height;
230 if (IS_ERR(exynos_fbdev->fb)) { 242 exynos_gem_obj = exynos_drm_gem_create(dev, size);
231 DRM_ERROR("failed to allocate fb.\n"); 243 if (IS_ERR(exynos_gem_obj))
232 return PTR_ERR(exynos_fbdev->fb); 244 return PTR_ERR(exynos_gem_obj);
245
246 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
247
248 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
249 &exynos_gem_obj->base);
250 if (IS_ERR_OR_NULL(helper->fb)) {
251 DRM_ERROR("failed to create drm framebuffer.\n");
252 return PTR_ERR(helper->fb);
233 } 253 }
234 254
235 helper->fb = exynos_fbdev->fb;
236 return exynos_drm_fbdev_update(helper, helper->fb); 255 return exynos_drm_fbdev_update(helper, helper->fb);
237} 256}
238 257
@@ -366,6 +385,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
366 385
367 fbdev = to_exynos_fbdev(private->fb_helper); 386 fbdev = to_exynos_fbdev(private->fb_helper);
368 387
388 if (fbdev->exynos_gem_obj)
389 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
390
369 exynos_drm_fbdev_destroy(dev, private->fb_helper); 391 exynos_drm_fbdev_destroy(dev, private->fb_helper);
370 kfree(fbdev); 392 kfree(fbdev);
371 private->fb_helper = NULL; 393 private->fb_helper = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index db3b3d9e731..ca83139cd30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/pm_runtime.h>
20 21
21#include <drm/exynos_drm.h> 22#include <drm/exynos_drm.h>
22#include <plat/regs-fb-v4.h> 23#include <plat/regs-fb-v4.h>
@@ -68,6 +69,7 @@ struct fimd_win_data {
68 void __iomem *vaddr; 69 void __iomem *vaddr;
69 unsigned int buf_offsize; 70 unsigned int buf_offsize;
70 unsigned int line_size; /* bytes */ 71 unsigned int line_size; /* bytes */
72 bool enabled;
71}; 73};
72 74
73struct fimd_context { 75struct fimd_context {
@@ -84,6 +86,8 @@ struct fimd_context {
84 unsigned long irq_flags; 86 unsigned long irq_flags;
85 u32 vidcon0; 87 u32 vidcon0;
86 u32 vidcon1; 88 u32 vidcon1;
89 bool suspended;
90 struct mutex lock;
87 91
88 struct fb_videomode *timing; 92 struct fb_videomode *timing;
89}; 93};
@@ -119,7 +123,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
119{ 123{
120 DRM_DEBUG_KMS("%s\n", __FILE__); 124 DRM_DEBUG_KMS("%s\n", __FILE__);
121 125
122 /* TODO. */ 126 /* TODO */
123 127
124 return 0; 128 return 0;
125} 129}
@@ -132,12 +136,68 @@ static struct exynos_drm_display_ops fimd_display_ops = {
132 .power_on = fimd_display_power_on, 136 .power_on = fimd_display_power_on,
133}; 137};
134 138
139static void fimd_dpms(struct device *subdrv_dev, int mode)
140{
141 struct fimd_context *ctx = get_fimd_context(subdrv_dev);
142
143 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
144
145 mutex_lock(&ctx->lock);
146
147 switch (mode) {
148 case DRM_MODE_DPMS_ON:
149 /*
150 * enable fimd hardware only if suspended status.
151 *
152 * P.S. fimd_dpms function would be called at booting time so
153 * clk_enable could be called double time.
154 */
155 if (ctx->suspended)
156 pm_runtime_get_sync(subdrv_dev);
157 break;
158 case DRM_MODE_DPMS_STANDBY:
159 case DRM_MODE_DPMS_SUSPEND:
160 case DRM_MODE_DPMS_OFF:
161 pm_runtime_put_sync(subdrv_dev);
162 break;
163 default:
164 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
165 break;
166 }
167
168 mutex_unlock(&ctx->lock);
169}
170
171static void fimd_apply(struct device *subdrv_dev)
172{
173 struct fimd_context *ctx = get_fimd_context(subdrv_dev);
174 struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
175 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
176 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
177 struct fimd_win_data *win_data;
178 int i;
179
180 DRM_DEBUG_KMS("%s\n", __FILE__);
181
182 for (i = 0; i < WINDOWS_NR; i++) {
183 win_data = &ctx->win_data[i];
184 if (win_data->enabled && (ovl_ops && ovl_ops->commit))
185 ovl_ops->commit(subdrv_dev, i);
186 }
187
188 if (mgr_ops && mgr_ops->commit)
189 mgr_ops->commit(subdrv_dev);
190}
191
135static void fimd_commit(struct device *dev) 192static void fimd_commit(struct device *dev)
136{ 193{
137 struct fimd_context *ctx = get_fimd_context(dev); 194 struct fimd_context *ctx = get_fimd_context(dev);
138 struct fb_videomode *timing = ctx->timing; 195 struct fb_videomode *timing = ctx->timing;
139 u32 val; 196 u32 val;
140 197
198 if (ctx->suspended)
199 return;
200
141 DRM_DEBUG_KMS("%s\n", __FILE__); 201 DRM_DEBUG_KMS("%s\n", __FILE__);
142 202
143 /* setup polarity values from machine code. */ 203 /* setup polarity values from machine code. */
@@ -177,40 +237,6 @@ static void fimd_commit(struct device *dev)
177 writel(val, ctx->regs + VIDCON0); 237 writel(val, ctx->regs + VIDCON0);
178} 238}
179 239
180static void fimd_disable(struct device *dev)
181{
182 struct fimd_context *ctx = get_fimd_context(dev);
183 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
184 struct drm_device *drm_dev = subdrv->drm_dev;
185 struct exynos_drm_manager *manager = &subdrv->manager;
186 u32 val;
187
188 DRM_DEBUG_KMS("%s\n", __FILE__);
189
190 /* fimd dma off */
191 val = readl(ctx->regs + VIDCON0);
192 val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
193 writel(val, ctx->regs + VIDCON0);
194
195 /*
196 * if vblank is enabled status with dma off then
197 * it disables vsync interrupt.
198 */
199 if (drm_dev->vblank_enabled[manager->pipe] &&
200 atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
201 drm_vblank_put(drm_dev, manager->pipe);
202
203 /*
204 * if vblank_disable_allowed is 0 then disable
205 * vsync interrupt right now else the vsync interrupt
206 * would be disabled by drm timer once a current process
207 * gives up ownershop of vblank event.
208 */
209 if (!drm_dev->vblank_disable_allowed)
210 drm_vblank_off(drm_dev, manager->pipe);
211 }
212}
213
214static int fimd_enable_vblank(struct device *dev) 240static int fimd_enable_vblank(struct device *dev)
215{ 241{
216 struct fimd_context *ctx = get_fimd_context(dev); 242 struct fimd_context *ctx = get_fimd_context(dev);
@@ -218,6 +244,9 @@ static int fimd_enable_vblank(struct device *dev)
218 244
219 DRM_DEBUG_KMS("%s\n", __FILE__); 245 DRM_DEBUG_KMS("%s\n", __FILE__);
220 246
247 if (ctx->suspended)
248 return -EPERM;
249
221 if (!test_and_set_bit(0, &ctx->irq_flags)) { 250 if (!test_and_set_bit(0, &ctx->irq_flags)) {
222 val = readl(ctx->regs + VIDINTCON0); 251 val = readl(ctx->regs + VIDINTCON0);
223 252
@@ -242,6 +271,9 @@ static void fimd_disable_vblank(struct device *dev)
242 271
243 DRM_DEBUG_KMS("%s\n", __FILE__); 272 DRM_DEBUG_KMS("%s\n", __FILE__);
244 273
274 if (ctx->suspended)
275 return;
276
245 if (test_and_clear_bit(0, &ctx->irq_flags)) { 277 if (test_and_clear_bit(0, &ctx->irq_flags)) {
246 val = readl(ctx->regs + VIDINTCON0); 278 val = readl(ctx->regs + VIDINTCON0);
247 279
@@ -253,8 +285,9 @@ static void fimd_disable_vblank(struct device *dev)
253} 285}
254 286
255static struct exynos_drm_manager_ops fimd_manager_ops = { 287static struct exynos_drm_manager_ops fimd_manager_ops = {
288 .dpms = fimd_dpms,
289 .apply = fimd_apply,
256 .commit = fimd_commit, 290 .commit = fimd_commit,
257 .disable = fimd_disable,
258 .enable_vblank = fimd_enable_vblank, 291 .enable_vblank = fimd_enable_vblank,
259 .disable_vblank = fimd_disable_vblank, 292 .disable_vblank = fimd_disable_vblank,
260}; 293};
@@ -264,6 +297,7 @@ static void fimd_win_mode_set(struct device *dev,
264{ 297{
265 struct fimd_context *ctx = get_fimd_context(dev); 298 struct fimd_context *ctx = get_fimd_context(dev);
266 struct fimd_win_data *win_data; 299 struct fimd_win_data *win_data;
300 int win;
267 unsigned long offset; 301 unsigned long offset;
268 302
269 DRM_DEBUG_KMS("%s\n", __FILE__); 303 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -273,12 +307,19 @@ static void fimd_win_mode_set(struct device *dev,
273 return; 307 return;
274 } 308 }
275 309
310 win = overlay->zpos;
311 if (win == DEFAULT_ZPOS)
312 win = ctx->default_win;
313
314 if (win < 0 || win > WINDOWS_NR)
315 return;
316
276 offset = overlay->fb_x * (overlay->bpp >> 3); 317 offset = overlay->fb_x * (overlay->bpp >> 3);
277 offset += overlay->fb_y * overlay->pitch; 318 offset += overlay->fb_y * overlay->pitch;
278 319
279 DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); 320 DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
280 321
281 win_data = &ctx->win_data[ctx->default_win]; 322 win_data = &ctx->win_data[win];
282 323
283 win_data->offset_x = overlay->crtc_x; 324 win_data->offset_x = overlay->crtc_x;
284 win_data->offset_y = overlay->crtc_y; 325 win_data->offset_y = overlay->crtc_y;
@@ -286,8 +327,8 @@ static void fimd_win_mode_set(struct device *dev,
286 win_data->ovl_height = overlay->crtc_height; 327 win_data->ovl_height = overlay->crtc_height;
287 win_data->fb_width = overlay->fb_width; 328 win_data->fb_width = overlay->fb_width;
288 win_data->fb_height = overlay->fb_height; 329 win_data->fb_height = overlay->fb_height;
289 win_data->dma_addr = overlay->dma_addr + offset; 330 win_data->dma_addr = overlay->dma_addr[0] + offset;
290 win_data->vaddr = overlay->vaddr + offset; 331 win_data->vaddr = overlay->vaddr[0] + offset;
291 win_data->bpp = overlay->bpp; 332 win_data->bpp = overlay->bpp;
292 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 333 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
293 (overlay->bpp >> 3); 334 (overlay->bpp >> 3);
@@ -381,15 +422,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
381 writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); 422 writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
382} 423}
383 424
384static void fimd_win_commit(struct device *dev) 425static void fimd_win_commit(struct device *dev, int zpos)
385{ 426{
386 struct fimd_context *ctx = get_fimd_context(dev); 427 struct fimd_context *ctx = get_fimd_context(dev);
387 struct fimd_win_data *win_data; 428 struct fimd_win_data *win_data;
388 int win = ctx->default_win; 429 int win = zpos;
389 unsigned long val, alpha, size; 430 unsigned long val, alpha, size;
390 431
391 DRM_DEBUG_KMS("%s\n", __FILE__); 432 DRM_DEBUG_KMS("%s\n", __FILE__);
392 433
434 if (ctx->suspended)
435 return;
436
437 if (win == DEFAULT_ZPOS)
438 win = ctx->default_win;
439
393 if (win < 0 || win > WINDOWS_NR) 440 if (win < 0 || win > WINDOWS_NR)
394 return; 441 return;
395 442
@@ -472,24 +519,37 @@ static void fimd_win_commit(struct device *dev)
472 if (win != 0) 519 if (win != 0)
473 fimd_win_set_colkey(dev, win); 520 fimd_win_set_colkey(dev, win);
474 521
522 /* wincon */
523 val = readl(ctx->regs + WINCON(win));
524 val |= WINCONx_ENWIN;
525 writel(val, ctx->regs + WINCON(win));
526
475 /* Enable DMA channel and unprotect windows */ 527 /* Enable DMA channel and unprotect windows */
476 val = readl(ctx->regs + SHADOWCON); 528 val = readl(ctx->regs + SHADOWCON);
477 val |= SHADOWCON_CHx_ENABLE(win); 529 val |= SHADOWCON_CHx_ENABLE(win);
478 val &= ~SHADOWCON_WINx_PROTECT(win); 530 val &= ~SHADOWCON_WINx_PROTECT(win);
479 writel(val, ctx->regs + SHADOWCON); 531 writel(val, ctx->regs + SHADOWCON);
532
533 win_data->enabled = true;
480} 534}
481 535
482static void fimd_win_disable(struct device *dev) 536static void fimd_win_disable(struct device *dev, int zpos)
483{ 537{
484 struct fimd_context *ctx = get_fimd_context(dev); 538 struct fimd_context *ctx = get_fimd_context(dev);
485 int win = ctx->default_win; 539 struct fimd_win_data *win_data;
540 int win = zpos;
486 u32 val; 541 u32 val;
487 542
488 DRM_DEBUG_KMS("%s\n", __FILE__); 543 DRM_DEBUG_KMS("%s\n", __FILE__);
489 544
545 if (win == DEFAULT_ZPOS)
546 win = ctx->default_win;
547
490 if (win < 0 || win > WINDOWS_NR) 548 if (win < 0 || win > WINDOWS_NR)
491 return; 549 return;
492 550
551 win_data = &ctx->win_data[win];
552
493 /* protect windows */ 553 /* protect windows */
494 val = readl(ctx->regs + SHADOWCON); 554 val = readl(ctx->regs + SHADOWCON);
495 val |= SHADOWCON_WINx_PROTECT(win); 555 val |= SHADOWCON_WINx_PROTECT(win);
@@ -505,6 +565,8 @@ static void fimd_win_disable(struct device *dev)
505 val &= ~SHADOWCON_CHx_ENABLE(win); 565 val &= ~SHADOWCON_CHx_ENABLE(win);
506 val &= ~SHADOWCON_WINx_PROTECT(win); 566 val &= ~SHADOWCON_WINx_PROTECT(win);
507 writel(val, ctx->regs + SHADOWCON); 567 writel(val, ctx->regs + SHADOWCON);
568
569 win_data->enabled = false;
508} 570}
509 571
510static struct exynos_drm_overlay_ops fimd_overlay_ops = { 572static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -540,9 +602,17 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
540 wake_up_interruptible(&e->base.file_priv->event_wait); 602 wake_up_interruptible(&e->base.file_priv->event_wait);
541 } 603 }
542 604
543 if (is_checked) 605 if (is_checked) {
544 drm_vblank_put(drm_dev, crtc); 606 drm_vblank_put(drm_dev, crtc);
545 607
608 /*
609 * don't off vblank if vblank_disable_allowed is 1,
610 * because vblank would be off by timer handler.
611 */
612 if (!drm_dev->vblank_disable_allowed)
613 drm_vblank_off(drm_dev, crtc);
614 }
615
546 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 616 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
547} 617}
548 618
@@ -560,19 +630,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
560 /* VSYNC interrupt */ 630 /* VSYNC interrupt */
561 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 631 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
562 632
563 /* 633 /* check the crtc is detached already from encoder */
564 * in case that vblank_disable_allowed is 1, it could induce 634 if (manager->pipe < 0)
565 * the problem that manager->pipe could be -1 because with 635 goto out;
566 * disable callback, vsync interrupt isn't disabled and at this moment,
567 * vsync interrupt could occur. the vsync interrupt would be disabled
568 * by timer handler later.
569 */
570 if (manager->pipe == -1)
571 return IRQ_HANDLED;
572 636
573 drm_handle_vblank(drm_dev, manager->pipe); 637 drm_handle_vblank(drm_dev, manager->pipe);
574 fimd_finish_pageflip(drm_dev, manager->pipe); 638 fimd_finish_pageflip(drm_dev, manager->pipe);
575 639
640out:
576 return IRQ_HANDLED; 641 return IRQ_HANDLED;
577} 642}
578 643
@@ -590,6 +655,13 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
590 */ 655 */
591 drm_dev->irq_enabled = 1; 656 drm_dev->irq_enabled = 1;
592 657
658 /*
659 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
660 * by drm timer once a current process gives up ownership of
661 * vblank event.(after drm_vblank_put function is called)
662 */
663 drm_dev->vblank_disable_allowed = 1;
664
593 return 0; 665 return 0;
594} 666}
595 667
@@ -739,9 +811,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
739 811
740 ctx->irq = res->start; 812 ctx->irq = res->start;
741 813
742 for (win = 0; win < WINDOWS_NR; win++)
743 fimd_clear_win(ctx, win);
744
745 ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); 814 ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
746 if (ret < 0) { 815 if (ret < 0) {
747 dev_err(dev, "irq request failed.\n"); 816 dev_err(dev, "irq request failed.\n");
@@ -769,7 +838,17 @@ static int __devinit fimd_probe(struct platform_device *pdev)
769 subdrv->manager.display_ops = &fimd_display_ops; 838 subdrv->manager.display_ops = &fimd_display_ops;
770 subdrv->manager.dev = dev; 839 subdrv->manager.dev = dev;
771 840
841 mutex_init(&ctx->lock);
842
772 platform_set_drvdata(pdev, ctx); 843 platform_set_drvdata(pdev, ctx);
844
845 pm_runtime_set_active(dev);
846 pm_runtime_enable(dev);
847 pm_runtime_get_sync(dev);
848
849 for (win = 0; win < WINDOWS_NR; win++)
850 fimd_clear_win(ctx, win);
851
773 exynos_drm_subdrv_register(subdrv); 852 exynos_drm_subdrv_register(subdrv);
774 853
775 return 0; 854 return 0;
@@ -797,14 +876,25 @@ err_clk_get:
797 876
798static int __devexit fimd_remove(struct platform_device *pdev) 877static int __devexit fimd_remove(struct platform_device *pdev)
799{ 878{
879 struct device *dev = &pdev->dev;
800 struct fimd_context *ctx = platform_get_drvdata(pdev); 880 struct fimd_context *ctx = platform_get_drvdata(pdev);
801 881
802 DRM_DEBUG_KMS("%s\n", __FILE__); 882 DRM_DEBUG_KMS("%s\n", __FILE__);
803 883
804 exynos_drm_subdrv_unregister(&ctx->subdrv); 884 exynos_drm_subdrv_unregister(&ctx->subdrv);
805 885
886 if (ctx->suspended)
887 goto out;
888
806 clk_disable(ctx->lcd_clk); 889 clk_disable(ctx->lcd_clk);
807 clk_disable(ctx->bus_clk); 890 clk_disable(ctx->bus_clk);
891
892 pm_runtime_set_suspended(dev);
893 pm_runtime_put_sync(dev);
894
895out:
896 pm_runtime_disable(dev);
897
808 clk_put(ctx->lcd_clk); 898 clk_put(ctx->lcd_clk);
809 clk_put(ctx->bus_clk); 899 clk_put(ctx->bus_clk);
810 900
@@ -818,12 +908,102 @@ static int __devexit fimd_remove(struct platform_device *pdev)
818 return 0; 908 return 0;
819} 909}
820 910
911#ifdef CONFIG_PM_SLEEP
912static int fimd_suspend(struct device *dev)
913{
914 int ret;
915
916 if (pm_runtime_suspended(dev))
917 return 0;
918
919 ret = pm_runtime_suspend(dev);
920 if (ret < 0)
921 return ret;
922
923 return 0;
924}
925
926static int fimd_resume(struct device *dev)
927{
928 int ret;
929
930 ret = pm_runtime_resume(dev);
931 if (ret < 0) {
932 DRM_ERROR("failed to resume runtime pm.\n");
933 return ret;
934 }
935
936 pm_runtime_disable(dev);
937
938 ret = pm_runtime_set_active(dev);
939 if (ret < 0) {
940 DRM_ERROR("failed to active runtime pm.\n");
941 pm_runtime_enable(dev);
942 pm_runtime_suspend(dev);
943 return ret;
944 }
945
946 pm_runtime_enable(dev);
947
948 return 0;
949}
950#endif
951
952#ifdef CONFIG_PM_RUNTIME
953static int fimd_runtime_suspend(struct device *dev)
954{
955 struct fimd_context *ctx = get_fimd_context(dev);
956
957 DRM_DEBUG_KMS("%s\n", __FILE__);
958
959 clk_disable(ctx->lcd_clk);
960 clk_disable(ctx->bus_clk);
961
962 ctx->suspended = true;
963 return 0;
964}
965
966static int fimd_runtime_resume(struct device *dev)
967{
968 struct fimd_context *ctx = get_fimd_context(dev);
969 int ret;
970
971 DRM_DEBUG_KMS("%s\n", __FILE__);
972
973 ret = clk_enable(ctx->bus_clk);
974 if (ret < 0)
975 return ret;
976
977 ret = clk_enable(ctx->lcd_clk);
978 if (ret < 0) {
979 clk_disable(ctx->bus_clk);
980 return ret;
981 }
982
983 ctx->suspended = false;
984
985 /* if vblank was enabled status, enable it again. */
986 if (test_and_clear_bit(0, &ctx->irq_flags))
987 fimd_enable_vblank(dev);
988
989 fimd_apply(dev);
990
991 return 0;
992}
993#endif
994
995static const struct dev_pm_ops fimd_pm_ops = {
996 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
997 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
998};
999
821static struct platform_driver fimd_driver = { 1000static struct platform_driver fimd_driver = {
822 .probe = fimd_probe, 1001 .probe = fimd_probe,
823 .remove = __devexit_p(fimd_remove), 1002 .remove = __devexit_p(fimd_remove),
824 .driver = { 1003 .driver = {
825 .name = "exynos4-fb", 1004 .name = "exynos4-fb",
826 .owner = THIS_MODULE, 1005 .owner = THIS_MODULE,
1006 .pm = &fimd_pm_ops,
827 }, 1007 },
828}; 1008};
829 1009
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index aba0fe47f7e..025abb3e3b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -55,17 +55,54 @@ static unsigned int convert_to_vm_err_msg(int msg)
55 return out_msg; 55 return out_msg;
56} 56}
57 57
58static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) 58static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
59 struct drm_file *file_priv,
60 unsigned int *handle)
59{ 61{
62 int ret;
63
64 /*
65 * allocate a id of idr table where the obj is registered
66 * and handle has the id what user can see.
67 */
68 ret = drm_gem_handle_create(file_priv, obj, handle);
69 if (ret)
70 return ret;
71
72 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
73
74 /* drop reference from allocate - handle holds it now. */
75 drm_gem_object_unreference_unlocked(obj);
76
77 return 0;
78}
79
80void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
81{
82 struct drm_gem_object *obj;
83
60 DRM_DEBUG_KMS("%s\n", __FILE__); 84 DRM_DEBUG_KMS("%s\n", __FILE__);
61 85
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 86 if (!exynos_gem_obj)
87 return;
88
89 obj = &exynos_gem_obj->base;
90
91 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
92
93 exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
94
95 if (obj->map_list.map)
96 drm_gem_free_mmap_offset(obj);
97
98 /* release file pointer to gem object. */
99 drm_gem_object_release(obj);
100
101 kfree(exynos_gem_obj);
63} 102}
64 103
65static struct exynos_drm_gem_obj 104static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
66 *exynos_drm_gem_init(struct drm_device *drm_dev, 105 unsigned long size)
67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
69{ 106{
70 struct exynos_drm_gem_obj *exynos_gem_obj; 107 struct exynos_drm_gem_obj *exynos_gem_obj;
71 struct drm_gem_object *obj; 108 struct drm_gem_object *obj;
@@ -73,75 +110,41 @@ static struct exynos_drm_gem_obj
73 110
74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 111 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
75 if (!exynos_gem_obj) { 112 if (!exynos_gem_obj) {
76 DRM_ERROR("failed to allocate exynos gem object.\n"); 113 DRM_ERROR("failed to allocate exynos gem object\n");
77 return ERR_PTR(-ENOMEM); 114 return NULL;
78 } 115 }
79 116
80 obj = &exynos_gem_obj->base; 117 obj = &exynos_gem_obj->base;
81 118
82 ret = drm_gem_object_init(drm_dev, obj, size); 119 ret = drm_gem_object_init(dev, obj, size);
83 if (ret < 0) { 120 if (ret < 0) {
84 DRM_ERROR("failed to initialize gem object.\n"); 121 DRM_ERROR("failed to initialize gem object\n");
85 ret = -EINVAL; 122 kfree(exynos_gem_obj);
86 goto err_object_init; 123 return NULL;
87 } 124 }
88 125
89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 126 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
90 127
91 ret = drm_gem_create_mmap_offset(obj);
92 if (ret < 0) {
93 DRM_ERROR("failed to allocate mmap offset.\n");
94 goto err_create_mmap_offset;
95 }
96
97 /*
98 * allocate a id of idr table where the obj is registered
99 * and handle has the id what user can see.
100 */
101 ret = drm_gem_handle_create(file_priv, obj, handle);
102 if (ret)
103 goto err_handle_create;
104
105 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
106
107 /* drop reference from allocate - handle holds it now. */
108 drm_gem_object_unreference_unlocked(obj);
109
110 return exynos_gem_obj; 128 return exynos_gem_obj;
111
112err_handle_create:
113 drm_gem_free_mmap_offset(obj);
114
115err_create_mmap_offset:
116 drm_gem_object_release(obj);
117
118err_object_init:
119 kfree(exynos_gem_obj);
120
121 return ERR_PTR(ret);
122} 129}
123 130
124struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 131struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv, 132 unsigned long size)
126 unsigned int *handle, unsigned long size)
127{ 133{
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer; 134 struct exynos_drm_gem_buf *buffer;
135 struct exynos_drm_gem_obj *exynos_gem_obj;
131 136
132 size = roundup(size, PAGE_SIZE); 137 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); 138 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135 139
136 buffer = exynos_drm_buf_create(dev, size); 140 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) { 141 if (!buffer)
138 return ERR_CAST(buffer); 142 return ERR_PTR(-ENOMEM);
139 }
140 143
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size); 144 exynos_gem_obj = exynos_drm_gem_init(dev, size);
142 if (IS_ERR(exynos_gem_obj)) { 145 if (!exynos_gem_obj) {
143 exynos_drm_buf_destroy(dev, buffer); 146 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj; 147 return ERR_PTR(-ENOMEM);
145 } 148 }
146 149
147 exynos_gem_obj->buffer = buffer; 150 exynos_gem_obj->buffer = buffer;
@@ -150,23 +153,30 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
150} 153}
151 154
152int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 155int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
153 struct drm_file *file_priv) 156 struct drm_file *file_priv)
154{ 157{
155 struct drm_exynos_gem_create *args = data; 158 struct drm_exynos_gem_create *args = data;
156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL; 159 struct exynos_drm_gem_obj *exynos_gem_obj;
160 int ret;
157 161
158 DRM_DEBUG_KMS("%s\n", __FILE__); 162 DRM_DEBUG_KMS("%s\n", __FILE__);
159 163
160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, 164 exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
161 &args->handle, args->size);
162 if (IS_ERR(exynos_gem_obj)) 165 if (IS_ERR(exynos_gem_obj))
163 return PTR_ERR(exynos_gem_obj); 166 return PTR_ERR(exynos_gem_obj);
164 167
168 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
169 &args->handle);
170 if (ret) {
171 exynos_drm_gem_destroy(exynos_gem_obj);
172 return ret;
173 }
174
165 return 0; 175 return 0;
166} 176}
167 177
168int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 178int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
169 struct drm_file *file_priv) 179 struct drm_file *file_priv)
170{ 180{
171 struct drm_exynos_gem_map_off *args = data; 181 struct drm_exynos_gem_map_off *args = data;
172 182
@@ -185,7 +195,7 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
185} 195}
186 196
187static int exynos_drm_gem_mmap_buffer(struct file *filp, 197static int exynos_drm_gem_mmap_buffer(struct file *filp,
188 struct vm_area_struct *vma) 198 struct vm_area_struct *vma)
189{ 199{
190 struct drm_gem_object *obj = filp->private_data; 200 struct drm_gem_object *obj = filp->private_data;
191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 201 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
@@ -196,6 +206,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
196 206
197 vma->vm_flags |= (VM_IO | VM_RESERVED); 207 vma->vm_flags |= (VM_IO | VM_RESERVED);
198 208
209 /* in case of direct mapping, always having non-cachable attribute */
199 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 210 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
200 vma->vm_file = filp; 211 vma->vm_file = filp;
201 212
@@ -232,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
232}; 243};
233 244
234int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 245int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *file_priv) 246 struct drm_file *file_priv)
236{ 247{
237 struct drm_exynos_gem_mmap *args = data; 248 struct drm_exynos_gem_mmap *args = data;
238 struct drm_gem_object *obj; 249 struct drm_gem_object *obj;
@@ -278,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
278 return 0; 289 return 0;
279} 290}
280 291
281void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj) 292void exynos_drm_gem_free_object(struct drm_gem_object *obj)
282{ 293{
283 struct exynos_drm_gem_obj *exynos_gem_obj;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__); 294 DRM_DEBUG_KMS("%s\n", __FILE__);
286 295
287 DRM_DEBUG_KMS("handle count = %d\n", 296 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
288 atomic_read(&gem_obj->handle_count));
289
290 if (gem_obj->map_list.map)
291 drm_gem_free_mmap_offset(gem_obj);
292
293 /* release file pointer to gem object. */
294 drm_gem_object_release(gem_obj);
295
296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
297
298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
299
300 kfree(exynos_gem_obj);
301} 297}
302 298
303int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 299int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
304 struct drm_device *dev, struct drm_mode_create_dumb *args) 300 struct drm_device *dev,
301 struct drm_mode_create_dumb *args)
305{ 302{
306 struct exynos_drm_gem_obj *exynos_gem_obj; 303 struct exynos_drm_gem_obj *exynos_gem_obj;
304 int ret;
307 305
308 DRM_DEBUG_KMS("%s\n", __FILE__); 306 DRM_DEBUG_KMS("%s\n", __FILE__);
309 307
@@ -316,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
316 args->pitch = args->width * args->bpp >> 3; 314 args->pitch = args->width * args->bpp >> 3;
317 args->size = args->pitch * args->height; 315 args->size = args->pitch * args->height;
318 316
319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle, 317 exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
320 args->size);
321 if (IS_ERR(exynos_gem_obj)) 318 if (IS_ERR(exynos_gem_obj))
322 return PTR_ERR(exynos_gem_obj); 319 return PTR_ERR(exynos_gem_obj);
323 320
321 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
322 &args->handle);
323 if (ret) {
324 exynos_drm_gem_destroy(exynos_gem_obj);
325 return ret;
326 }
327
324 return 0; 328 return 0;
325} 329}
326 330
327int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, 331int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
328 struct drm_device *dev, uint32_t handle, uint64_t *offset) 332 struct drm_device *dev, uint32_t handle,
333 uint64_t *offset)
329{ 334{
330 struct exynos_drm_gem_obj *exynos_gem_obj; 335 struct exynos_drm_gem_obj *exynos_gem_obj;
331 struct drm_gem_object *obj; 336 struct drm_gem_object *obj;
337 int ret = 0;
332 338
333 DRM_DEBUG_KMS("%s\n", __FILE__); 339 DRM_DEBUG_KMS("%s\n", __FILE__);
334 340
@@ -343,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
343 obj = drm_gem_object_lookup(dev, file_priv, handle); 349 obj = drm_gem_object_lookup(dev, file_priv, handle);
344 if (!obj) { 350 if (!obj) {
345 DRM_ERROR("failed to lookup gem object.\n"); 351 DRM_ERROR("failed to lookup gem object.\n");
346 mutex_unlock(&dev->struct_mutex); 352 ret = -EINVAL;
347 return -EINVAL; 353 goto unlock;
348 } 354 }
349 355
350 exynos_gem_obj = to_exynos_gem_obj(obj); 356 exynos_gem_obj = to_exynos_gem_obj(obj);
351 357
352 *offset = get_gem_mmap_offset(&exynos_gem_obj->base); 358 if (!exynos_gem_obj->base.map_list.map) {
353 359 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
354 drm_gem_object_unreference(obj); 360 if (ret)
361 goto out;
362 }
355 363
364 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
356 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 365 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
357 366
367out:
368 drm_gem_object_unreference(obj);
369unlock:
358 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
371 return ret;
372}
373
374int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
375 struct drm_device *dev,
376 unsigned int handle)
377{
378 int ret;
379
380 DRM_DEBUG_KMS("%s\n", __FILE__);
381
382 /*
383 * obj->refcount and obj->handle_count are decreased and
384 * if both them are 0 then exynos_drm_gem_free_object()
385 * would be called by callback to release resources.
386 */
387 ret = drm_gem_handle_delete(file_priv, handle);
388 if (ret < 0) {
389 DRM_ERROR("failed to delete drm_gem_handle.\n");
390 return ret;
391 }
359 392
360 return 0; 393 return 0;
361} 394}
@@ -403,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
403 return ret; 436 return ret;
404} 437}
405 438
406
407int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
408 struct drm_device *dev, unsigned int handle)
409{
410 int ret;
411
412 DRM_DEBUG_KMS("%s\n", __FILE__);
413
414 /*
415 * obj->refcount and obj->handle_count are decreased and
416 * if both them are 0 then exynos_drm_gem_free_object()
417 * would be called by callback to release resources.
418 */
419 ret = drm_gem_handle_delete(file_priv, handle);
420 if (ret < 0) {
421 DRM_ERROR("failed to delete drm_gem_handle.\n");
422 return ret;
423 }
424
425 return 0;
426}
427
428MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 439MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
429MODULE_DESCRIPTION("Samsung SoC DRM GEM Module"); 440MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
430MODULE_LICENSE("GPL"); 441MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index ef8797334e6..67cdc916870 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -60,14 +60,16 @@ struct exynos_drm_gem_buf {
60 * user can access the buffer through kms_bo.handle. 60 * user can access the buffer through kms_bo.handle.
61 */ 61 */
62struct exynos_drm_gem_obj { 62struct exynos_drm_gem_obj {
63 struct drm_gem_object base; 63 struct drm_gem_object base;
64 struct exynos_drm_gem_buf *buffer; 64 struct exynos_drm_gem_buf *buffer;
65}; 65};
66 66
67/* create a new buffer and get a new gem handle. */ 67/* destroy a buffer with gem object */
68void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
69
70/* create a new buffer with gem object */
68struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 71struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
69 struct drm_file *file_priv, 72 unsigned long size);
70 unsigned int *handle, unsigned long size);
71 73
72/* 74/*
73 * request gem object creation and buffer allocation as the size 75 * request gem object creation and buffer allocation as the size
@@ -75,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
75 * height and bpp. 77 * height and bpp.
76 */ 78 */
77int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 79int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
78 struct drm_file *file_priv); 80 struct drm_file *file_priv);
79 81
80/* get buffer offset to map to user space. */ 82/* get buffer offset to map to user space. */
81int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 83int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv); 84 struct drm_file *file_priv);
83 85
84/* unmap a buffer from user space. */ 86/*
85int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data, 87 * mmap the physically continuous memory that a gem object contains
86 struct drm_file *file_priv); 88 * to user space.
89 */
90int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv);
87 92
88/* initialize gem object. */ 93/* initialize gem object. */
89int exynos_drm_gem_init_object(struct drm_gem_object *obj); 94int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -93,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
93 98
94/* create memory region for drm framebuffer. */ 99/* create memory region for drm framebuffer. */
95int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 100int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
96 struct drm_device *dev, struct drm_mode_create_dumb *args); 101 struct drm_device *dev,
102 struct drm_mode_create_dumb *args);
97 103
98/* map memory region for drm framebuffer to user space. */ 104/* map memory region for drm framebuffer to user space. */
99int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, 105int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
100 struct drm_device *dev, uint32_t handle, uint64_t *offset); 106 struct drm_device *dev, uint32_t handle,
101 107 uint64_t *offset);
102/* page fault handler and mmap fault address(virtual) to physical memory. */
103int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
104
105/*
106 * mmap the physically continuous memory that a gem object contains
107 * to user space.
108 */
109int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv);
111
112/* set vm_flags and we can change the vm attribute to other one at here. */
113int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
114 108
115/* 109/*
116 * destroy memory region allocated. 110 * destroy memory region allocated.
@@ -118,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
118 * would be released by drm_gem_handle_delete(). 112 * would be released by drm_gem_handle_delete().
119 */ 113 */
120int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, 114int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
121 struct drm_device *dev, unsigned int handle); 115 struct drm_device *dev,
116 unsigned int handle);
117
118/* page fault handler and mmap fault address(virtual) to physical memory. */
119int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
120
121/* set vm_flags and we can change the vm attribute to other one at here. */
122int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
122 123
123#endif 124#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644
index 00000000000..ed8a319ed84
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -0,0 +1,439 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Seung-Woo Kim <sw0312.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include "drmP.h"
15
16#include <linux/kernel.h>
17#include <linux/wait.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
21
22#include <drm/exynos_drm.h>
23
24#include "exynos_drm_drv.h"
25#include "exynos_drm_hdmi.h"
26
27#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
28#define to_subdrv(dev) to_context(dev)
29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
30 struct drm_hdmi_context, subdrv);
31
32/* these callback points shoud be set by specific drivers. */
33static struct exynos_hdmi_display_ops *hdmi_display_ops;
34static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
35static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
36
37struct drm_hdmi_context {
38 struct exynos_drm_subdrv subdrv;
39 struct exynos_drm_hdmi_context *hdmi_ctx;
40 struct exynos_drm_hdmi_context *mixer_ctx;
41 struct work_struct work;
42};
43
44void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
45 *display_ops)
46{
47 DRM_DEBUG_KMS("%s\n", __FILE__);
48
49 if (display_ops)
50 hdmi_display_ops = display_ops;
51}
52EXPORT_SYMBOL(exynos_drm_display_ops_register);
53
54void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
55 *manager_ops)
56{
57 DRM_DEBUG_KMS("%s\n", __FILE__);
58
59 if (manager_ops)
60 hdmi_manager_ops = manager_ops;
61}
62EXPORT_SYMBOL(exynos_drm_manager_ops_register);
63
64void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
65 *overlay_ops)
66{
67 DRM_DEBUG_KMS("%s\n", __FILE__);
68
69 if (overlay_ops)
70 hdmi_overlay_ops = overlay_ops;
71}
72EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
73
74static bool drm_hdmi_is_connected(struct device *dev)
75{
76 struct drm_hdmi_context *ctx = to_context(dev);
77
78 DRM_DEBUG_KMS("%s\n", __FILE__);
79
80 if (hdmi_display_ops && hdmi_display_ops->is_connected)
81 return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
82
83 return false;
84}
85
86static int drm_hdmi_get_edid(struct device *dev,
87 struct drm_connector *connector, u8 *edid, int len)
88{
89 struct drm_hdmi_context *ctx = to_context(dev);
90
91 DRM_DEBUG_KMS("%s\n", __FILE__);
92
93 if (hdmi_display_ops && hdmi_display_ops->get_edid)
94 return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
95 connector, edid, len);
96
97 return 0;
98}
99
100static int drm_hdmi_check_timing(struct device *dev, void *timing)
101{
102 struct drm_hdmi_context *ctx = to_context(dev);
103
104 DRM_DEBUG_KMS("%s\n", __FILE__);
105
106 if (hdmi_display_ops && hdmi_display_ops->check_timing)
107 return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
108 timing);
109
110 return 0;
111}
112
113static int drm_hdmi_power_on(struct device *dev, int mode)
114{
115 struct drm_hdmi_context *ctx = to_context(dev);
116
117 DRM_DEBUG_KMS("%s\n", __FILE__);
118
119 if (hdmi_display_ops && hdmi_display_ops->power_on)
120 return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
121
122 return 0;
123}
124
125static struct exynos_drm_display_ops drm_hdmi_display_ops = {
126 .type = EXYNOS_DISPLAY_TYPE_HDMI,
127 .is_connected = drm_hdmi_is_connected,
128 .get_edid = drm_hdmi_get_edid,
129 .check_timing = drm_hdmi_check_timing,
130 .power_on = drm_hdmi_power_on,
131};
132
133static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
134{
135 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
136 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
137 struct exynos_drm_manager *manager = &subdrv->manager;
138
139 DRM_DEBUG_KMS("%s\n", __FILE__);
140
141 if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
142 return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
143 manager->pipe);
144
145 return 0;
146}
147
148static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
149{
150 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
151
152 DRM_DEBUG_KMS("%s\n", __FILE__);
153
154 if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
155 return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
156}
157
158static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
159{
160 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
161
162 DRM_DEBUG_KMS("%s\n", __FILE__);
163
164 if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
165 hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
166}
167
168static void drm_hdmi_commit(struct device *subdrv_dev)
169{
170 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
171
172 DRM_DEBUG_KMS("%s\n", __FILE__);
173
174 if (hdmi_manager_ops && hdmi_manager_ops->commit)
175 hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
176}
177
178static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
179{
180 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
181
182 DRM_DEBUG_KMS("%s\n", __FILE__);
183
184 switch (mode) {
185 case DRM_MODE_DPMS_ON:
186 break;
187 case DRM_MODE_DPMS_STANDBY:
188 case DRM_MODE_DPMS_SUSPEND:
189 case DRM_MODE_DPMS_OFF:
190 if (hdmi_manager_ops && hdmi_manager_ops->disable)
191 hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
192 break;
193 default:
194 DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
195 break;
196 }
197}
198
199static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
200 .dpms = drm_hdmi_dpms,
201 .enable_vblank = drm_hdmi_enable_vblank,
202 .disable_vblank = drm_hdmi_disable_vblank,
203 .mode_set = drm_hdmi_mode_set,
204 .commit = drm_hdmi_commit,
205};
206
207static void drm_mixer_mode_set(struct device *subdrv_dev,
208 struct exynos_drm_overlay *overlay)
209{
210 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
211
212 DRM_DEBUG_KMS("%s\n", __FILE__);
213
214 if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
215 hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
216}
217
218static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
219{
220 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
221
222 DRM_DEBUG_KMS("%s\n", __FILE__);
223
224 if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
225 hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
226}
227
228static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
229{
230 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
231
232 DRM_DEBUG_KMS("%s\n", __FILE__);
233
234 if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
235 hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
236}
237
238static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
239 .mode_set = drm_mixer_mode_set,
240 .commit = drm_mixer_commit,
241 .disable = drm_mixer_disable,
242};
243
244
245static int hdmi_subdrv_probe(struct drm_device *drm_dev,
246 struct device *dev)
247{
248 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
249 struct drm_hdmi_context *ctx;
250 struct platform_device *pdev = to_platform_device(dev);
251 struct exynos_drm_common_hdmi_pd *pd;
252 int ret;
253
254 DRM_DEBUG_KMS("%s\n", __FILE__);
255
256 pd = pdev->dev.platform_data;
257
258 if (!pd) {
259 DRM_DEBUG_KMS("platform data is null.\n");
260 return -EFAULT;
261 }
262
263 if (!pd->hdmi_dev) {
264 DRM_DEBUG_KMS("hdmi device is null.\n");
265 return -EFAULT;
266 }
267
268 if (!pd->mixer_dev) {
269 DRM_DEBUG_KMS("mixer device is null.\n");
270 return -EFAULT;
271 }
272
273 ret = platform_driver_register(&hdmi_driver);
274 if (ret) {
275 DRM_DEBUG_KMS("failed to register hdmi driver.\n");
276 return ret;
277 }
278
279 ret = platform_driver_register(&mixer_driver);
280 if (ret) {
281 DRM_DEBUG_KMS("failed to register mixer driver.\n");
282 goto err_hdmidrv;
283 }
284
285 ctx = get_ctx_from_subdrv(subdrv);
286
287 ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
288 to_context(pd->hdmi_dev);
289 if (!ctx->hdmi_ctx) {
290 DRM_DEBUG_KMS("hdmi context is null.\n");
291 ret = -EFAULT;
292 goto err_mixerdrv;
293 }
294
295 ctx->hdmi_ctx->drm_dev = drm_dev;
296
297 ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
298 to_context(pd->mixer_dev);
299 if (!ctx->mixer_ctx) {
300 DRM_DEBUG_KMS("mixer context is null.\n");
301 ret = -EFAULT;
302 goto err_mixerdrv;
303 }
304
305 ctx->mixer_ctx->drm_dev = drm_dev;
306
307 return 0;
308
309err_mixerdrv:
310 platform_driver_unregister(&mixer_driver);
311err_hdmidrv:
312 platform_driver_unregister(&hdmi_driver);
313 return ret;
314}
315
316static void hdmi_subdrv_remove(struct drm_device *drm_dev)
317{
318 DRM_DEBUG_KMS("%s\n", __FILE__);
319
320 platform_driver_unregister(&hdmi_driver);
321 platform_driver_unregister(&mixer_driver);
322}
323
324static void exynos_drm_hdmi_late_probe(struct work_struct *work)
325{
326 struct drm_hdmi_context *ctx = container_of(work,
327 struct drm_hdmi_context, work);
328
329 /*
330 * this function calls subdrv->probe() so this must be called
331 * after probe context.
332 *
333 * PS. subdrv->probe() will call platform_driver_register() to probe
334 * hdmi and mixer driver.
335 */
336 exynos_drm_subdrv_register(&ctx->subdrv);
337}
338
339static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
340{
341 struct device *dev = &pdev->dev;
342 struct exynos_drm_subdrv *subdrv;
343 struct drm_hdmi_context *ctx;
344
345 DRM_DEBUG_KMS("%s\n", __FILE__);
346
347 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
348 if (!ctx) {
349 DRM_LOG_KMS("failed to alloc common hdmi context.\n");
350 return -ENOMEM;
351 }
352
353 subdrv = &ctx->subdrv;
354
355 subdrv->probe = hdmi_subdrv_probe;
356 subdrv->remove = hdmi_subdrv_remove;
357 subdrv->manager.pipe = -1;
358 subdrv->manager.ops = &drm_hdmi_manager_ops;
359 subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
360 subdrv->manager.display_ops = &drm_hdmi_display_ops;
361 subdrv->manager.dev = dev;
362
363 platform_set_drvdata(pdev, subdrv);
364
365 INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
366
367 schedule_work(&ctx->work);
368
369 return 0;
370}
371
372static int hdmi_runtime_suspend(struct device *dev)
373{
374 DRM_DEBUG_KMS("%s\n", __FILE__);
375
376 return 0;
377}
378
379static int hdmi_runtime_resume(struct device *dev)
380{
381 DRM_DEBUG_KMS("%s\n", __FILE__);
382
383 return 0;
384}
385
386static const struct dev_pm_ops hdmi_pm_ops = {
387 .runtime_suspend = hdmi_runtime_suspend,
388 .runtime_resume = hdmi_runtime_resume,
389};
390
391static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
392{
393 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
394
395 DRM_DEBUG_KMS("%s\n", __FILE__);
396
397 exynos_drm_subdrv_unregister(&ctx->subdrv);
398 kfree(ctx);
399
400 return 0;
401}
402
403static struct platform_driver exynos_drm_common_hdmi_driver = {
404 .probe = exynos_drm_hdmi_probe,
405 .remove = __devexit_p(exynos_drm_hdmi_remove),
406 .driver = {
407 .name = "exynos-drm-hdmi",
408 .owner = THIS_MODULE,
409 .pm = &hdmi_pm_ops,
410 },
411};
412
413static int __init exynos_drm_hdmi_init(void)
414{
415 int ret;
416
417 DRM_DEBUG_KMS("%s\n", __FILE__);
418
419 ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
420 if (ret) {
421 DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
422 return ret;
423 }
424
425 return ret;
426}
427
428static void __exit exynos_drm_hdmi_exit(void)
429{
430 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
431}
432
433module_init(exynos_drm_hdmi_init);
434module_exit(exynos_drm_hdmi_exit);
435
436MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
437MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
438MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
439MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644
index 00000000000..3c29f790ee4
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -0,0 +1,73 @@
1/* exynos_drm_hdmi.h
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_HDMI_H_
27#define _EXYNOS_DRM_HDMI_H_
28
29/*
30 * exynos hdmi common context structure.
31 *
32 * @drm_dev: pointer to drm_device.
33 * @ctx: pointer to the context of specific device driver.
34 * this context should be hdmi_context or mixer_context.
35 */
36struct exynos_drm_hdmi_context {
37 struct drm_device *drm_dev;
38 void *ctx;
39};
40
41struct exynos_hdmi_display_ops {
42 bool (*is_connected)(void *ctx);
43 int (*get_edid)(void *ctx, struct drm_connector *connector,
44 u8 *edid, int len);
45 int (*check_timing)(void *ctx, void *timing);
46 int (*power_on)(void *ctx, int mode);
47};
48
49struct exynos_hdmi_manager_ops {
50 void (*mode_set)(void *ctx, void *mode);
51 void (*commit)(void *ctx);
52 void (*disable)(void *ctx);
53};
54
55struct exynos_hdmi_overlay_ops {
56 int (*enable_vblank)(void *ctx, int pipe);
57 void (*disable_vblank)(void *ctx);
58 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
59 void (*win_commit)(void *ctx, int zpos);
60 void (*win_disable)(void *ctx, int zpos);
61};
62
63extern struct platform_driver hdmi_driver;
64extern struct platform_driver mixer_driver;
65
66void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
67 *display_ops);
68void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
69 *manager_ops);
70void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
71 *overlay_ops);
72
73#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 00000000000..bdcf770aa22
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 */
11
12#include "drmP.h"
13
14#include "exynos_drm.h"
15#include "exynos_drm_crtc.h"
16#include "exynos_drm_drv.h"
17#include "exynos_drm_encoder.h"
18
19struct exynos_plane {
20 struct drm_plane base;
21 struct exynos_drm_overlay overlay;
22 bool enabled;
23};
24
25static int
26exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
27 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
28 unsigned int crtc_w, unsigned int crtc_h,
29 uint32_t src_x, uint32_t src_y,
30 uint32_t src_w, uint32_t src_h)
31{
32 struct exynos_plane *exynos_plane =
33 container_of(plane, struct exynos_plane, base);
34 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
35 struct exynos_drm_crtc_pos pos;
36 unsigned int x = src_x >> 16;
37 unsigned int y = src_y >> 16;
38 int ret;
39
40 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
41
42 memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
43 pos.crtc_x = crtc_x;
44 pos.crtc_y = crtc_y;
45 pos.crtc_w = crtc_w;
46 pos.crtc_h = crtc_h;
47
48 pos.fb_x = x;
49 pos.fb_y = y;
50
51 /* TODO: scale feature */
52 ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
53 if (ret < 0)
54 return ret;
55
56 exynos_drm_fn_encoder(crtc, overlay,
57 exynos_drm_encoder_crtc_mode_set);
58 exynos_drm_fn_encoder(crtc, &overlay->zpos,
59 exynos_drm_encoder_crtc_plane_commit);
60
61 exynos_plane->enabled = true;
62
63 return 0;
64}
65
66static int exynos_disable_plane(struct drm_plane *plane)
67{
68 struct exynos_plane *exynos_plane =
69 container_of(plane, struct exynos_plane, base);
70 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
71
72 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
73
74 if (!exynos_plane->enabled)
75 return 0;
76
77 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
78 exynos_drm_encoder_crtc_disable);
79
80 exynos_plane->enabled = false;
81 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
82
83 return 0;
84}
85
86static void exynos_plane_destroy(struct drm_plane *plane)
87{
88 struct exynos_plane *exynos_plane =
89 container_of(plane, struct exynos_plane, base);
90
91 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
92
93 exynos_disable_plane(plane);
94 drm_plane_cleanup(plane);
95 kfree(exynos_plane);
96}
97
98static struct drm_plane_funcs exynos_plane_funcs = {
99 .update_plane = exynos_update_plane,
100 .disable_plane = exynos_disable_plane,
101 .destroy = exynos_plane_destroy,
102};
103
104int exynos_plane_init(struct drm_device *dev, unsigned int nr)
105{
106 struct exynos_plane *exynos_plane;
107 uint32_t possible_crtcs;
108
109 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
110 if (!exynos_plane)
111 return -ENOMEM;
112
113 /* all CRTCs are available */
114 possible_crtcs = (1 << MAX_CRTC) - 1;
115
116 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
117
118 /* TODO: format */
119 return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
120 &exynos_plane_funcs, NULL, 0, false);
121}
122
123int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
124 struct drm_file *file_priv)
125{
126 struct drm_exynos_plane_set_zpos *zpos_req = data;
127 struct drm_mode_object *obj;
128 struct drm_plane *plane;
129 struct exynos_plane *exynos_plane;
130 int ret = 0;
131
132 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
133
134 if (!drm_core_check_feature(dev, DRIVER_MODESET))
135 return -EINVAL;
136
137 if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
138 if (zpos_req->zpos != DEFAULT_ZPOS) {
139 DRM_ERROR("zpos not within limits\n");
140 return -EINVAL;
141 }
142 }
143
144 mutex_lock(&dev->mode_config.mutex);
145
146 obj = drm_mode_object_find(dev, zpos_req->plane_id,
147 DRM_MODE_OBJECT_PLANE);
148 if (!obj) {
149 DRM_DEBUG_KMS("Unknown plane ID %d\n",
150 zpos_req->plane_id);
151 ret = -EINVAL;
152 goto out;
153 }
154
155 plane = obj_to_plane(obj);
156 exynos_plane = container_of(plane, struct exynos_plane, base);
157
158 exynos_plane->overlay.zpos = zpos_req->zpos;
159
160out:
161 mutex_unlock(&dev->mode_config.mutex);
162 return ret;
163}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 00000000000..16b71f8217e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 */
11
12int exynos_plane_init(struct drm_device *dev, unsigned int nr);
13int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
14 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 00000000000..f48f7ce92f5
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,1176 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 *
8 * Based on drivers/media/video/s5p-tv/hdmi_drv.c
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16
17#include "drmP.h"
18#include "drm_edid.h"
19#include "drm_crtc_helper.h"
20
21#include "regs-hdmi.h"
22
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/wait.h>
26#include <linux/i2c.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/interrupt.h>
30#include <linux/irq.h>
31#include <linux/delay.h>
32#include <linux/pm_runtime.h>
33#include <linux/clk.h>
34#include <linux/regulator/consumer.h>
35
36#include <drm/exynos_drm.h>
37
38#include "exynos_drm_drv.h"
39#include "exynos_drm_hdmi.h"
40
41#include "exynos_hdmi.h"
42
43#define HDMI_OVERLAY_NUMBER 3
44#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
45
46static const u8 hdmiphy_conf27[32] = {
47 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
48 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
49 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
50 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
51};
52
53static const u8 hdmiphy_conf27_027[32] = {
54 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
55 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
56 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
57 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
58};
59
60static const u8 hdmiphy_conf74_175[32] = {
61 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
62 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
63 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
64 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
65};
66
67static const u8 hdmiphy_conf74_25[32] = {
68 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
69 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
70 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
71 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
72};
73
74static const u8 hdmiphy_conf148_5[32] = {
75 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
76 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
77 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
78 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
79};
80
81struct hdmi_tg_regs {
82 u8 cmd;
83 u8 h_fsz_l;
84 u8 h_fsz_h;
85 u8 hact_st_l;
86 u8 hact_st_h;
87 u8 hact_sz_l;
88 u8 hact_sz_h;
89 u8 v_fsz_l;
90 u8 v_fsz_h;
91 u8 vsync_l;
92 u8 vsync_h;
93 u8 vsync2_l;
94 u8 vsync2_h;
95 u8 vact_st_l;
96 u8 vact_st_h;
97 u8 vact_sz_l;
98 u8 vact_sz_h;
99 u8 field_chg_l;
100 u8 field_chg_h;
101 u8 vact_st2_l;
102 u8 vact_st2_h;
103 u8 vsync_top_hdmi_l;
104 u8 vsync_top_hdmi_h;
105 u8 vsync_bot_hdmi_l;
106 u8 vsync_bot_hdmi_h;
107 u8 field_top_hdmi_l;
108 u8 field_top_hdmi_h;
109 u8 field_bot_hdmi_l;
110 u8 field_bot_hdmi_h;
111};
112
113struct hdmi_core_regs {
114 u8 h_blank[2];
115 u8 v_blank[3];
116 u8 h_v_line[3];
117 u8 vsync_pol[1];
118 u8 int_pro_mode[1];
119 u8 v_blank_f[3];
120 u8 h_sync_gen[3];
121 u8 v_sync_gen1[3];
122 u8 v_sync_gen2[3];
123 u8 v_sync_gen3[3];
124};
125
126struct hdmi_preset_conf {
127 struct hdmi_core_regs core;
128 struct hdmi_tg_regs tg;
129};
130
131static const struct hdmi_preset_conf hdmi_conf_480p = {
132 .core = {
133 .h_blank = {0x8a, 0x00},
134 .v_blank = {0x0d, 0x6a, 0x01},
135 .h_v_line = {0x0d, 0xa2, 0x35},
136 .vsync_pol = {0x01},
137 .int_pro_mode = {0x00},
138 .v_blank_f = {0x00, 0x00, 0x00},
139 .h_sync_gen = {0x0e, 0x30, 0x11},
140 .v_sync_gen1 = {0x0f, 0x90, 0x00},
141 /* other don't care */
142 },
143 .tg = {
144 0x00, /* cmd */
145 0x5a, 0x03, /* h_fsz */
146 0x8a, 0x00, 0xd0, 0x02, /* hact */
147 0x0d, 0x02, /* v_fsz */
148 0x01, 0x00, 0x33, 0x02, /* vsync */
149 0x2d, 0x00, 0xe0, 0x01, /* vact */
150 0x33, 0x02, /* field_chg */
151 0x49, 0x02, /* vact_st2 */
152 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
153 0x01, 0x00, 0x33, 0x02, /* field top/bot */
154 },
155};
156
157static const struct hdmi_preset_conf hdmi_conf_720p60 = {
158 .core = {
159 .h_blank = {0x72, 0x01},
160 .v_blank = {0xee, 0xf2, 0x00},
161 .h_v_line = {0xee, 0x22, 0x67},
162 .vsync_pol = {0x00},
163 .int_pro_mode = {0x00},
164 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
165 .h_sync_gen = {0x6c, 0x50, 0x02},
166 .v_sync_gen1 = {0x0a, 0x50, 0x00},
167 .v_sync_gen2 = {0x01, 0x10, 0x00},
168 .v_sync_gen3 = {0x01, 0x10, 0x00},
169 /* other don't care */
170 },
171 .tg = {
172 0x00, /* cmd */
173 0x72, 0x06, /* h_fsz */
174 0x71, 0x01, 0x01, 0x05, /* hact */
175 0xee, 0x02, /* v_fsz */
176 0x01, 0x00, 0x33, 0x02, /* vsync */
177 0x1e, 0x00, 0xd0, 0x02, /* vact */
178 0x33, 0x02, /* field_chg */
179 0x49, 0x02, /* vact_st2 */
180 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
181 0x01, 0x00, 0x33, 0x02, /* field top/bot */
182 },
183};
184
185static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
186 .core = {
187 .h_blank = {0xd0, 0x02},
188 .v_blank = {0x32, 0xB2, 0x00},
189 .h_v_line = {0x65, 0x04, 0xa5},
190 .vsync_pol = {0x00},
191 .int_pro_mode = {0x01},
192 .v_blank_f = {0x49, 0x2A, 0x23},
193 .h_sync_gen = {0x0E, 0xEA, 0x08},
194 .v_sync_gen1 = {0x07, 0x20, 0x00},
195 .v_sync_gen2 = {0x39, 0x42, 0x23},
196 .v_sync_gen3 = {0x38, 0x87, 0x73},
197 /* other don't care */
198 },
199 .tg = {
200 0x00, /* cmd */
201 0x50, 0x0A, /* h_fsz */
202 0xCF, 0x02, 0x81, 0x07, /* hact */
203 0x65, 0x04, /* v_fsz */
204 0x01, 0x00, 0x33, 0x02, /* vsync */
205 0x16, 0x00, 0x1c, 0x02, /* vact */
206 0x33, 0x02, /* field_chg */
207 0x49, 0x02, /* vact_st2 */
208 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
209 0x01, 0x00, 0x33, 0x02, /* field top/bot */
210 },
211};
212
213static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
214 .core = {
215 .h_blank = {0xd0, 0x02},
216 .v_blank = {0x65, 0x6c, 0x01},
217 .h_v_line = {0x65, 0x04, 0xa5},
218 .vsync_pol = {0x00},
219 .int_pro_mode = {0x00},
220 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
221 .h_sync_gen = {0x0e, 0xea, 0x08},
222 .v_sync_gen1 = {0x09, 0x40, 0x00},
223 .v_sync_gen2 = {0x01, 0x10, 0x00},
224 .v_sync_gen3 = {0x01, 0x10, 0x00},
225 /* other don't care */
226 },
227 .tg = {
228 0x00, /* cmd */
229 0x50, 0x0A, /* h_fsz */
230 0xCF, 0x02, 0x81, 0x07, /* hact */
231 0x65, 0x04, /* v_fsz */
232 0x01, 0x00, 0x33, 0x02, /* vsync */
233 0x2d, 0x00, 0x38, 0x04, /* vact */
234 0x33, 0x02, /* field_chg */
235 0x48, 0x02, /* vact_st2 */
236 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
237 0x01, 0x00, 0x33, 0x02, /* field top/bot */
238 },
239};
240
241static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
242 .core = {
243 .h_blank = {0x18, 0x01},
244 .v_blank = {0x32, 0xB2, 0x00},
245 .h_v_line = {0x65, 0x84, 0x89},
246 .vsync_pol = {0x00},
247 .int_pro_mode = {0x01},
248 .v_blank_f = {0x49, 0x2A, 0x23},
249 .h_sync_gen = {0x56, 0x08, 0x02},
250 .v_sync_gen1 = {0x07, 0x20, 0x00},
251 .v_sync_gen2 = {0x39, 0x42, 0x23},
252 .v_sync_gen3 = {0xa4, 0x44, 0x4a},
253 /* other don't care */
254 },
255 .tg = {
256 0x00, /* cmd */
257 0x98, 0x08, /* h_fsz */
258 0x17, 0x01, 0x81, 0x07, /* hact */
259 0x65, 0x04, /* v_fsz */
260 0x01, 0x00, 0x33, 0x02, /* vsync */
261 0x16, 0x00, 0x1c, 0x02, /* vact */
262 0x33, 0x02, /* field_chg */
263 0x49, 0x02, /* vact_st2 */
264 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
265 0x01, 0x00, 0x33, 0x02, /* field top/bot */
266 },
267};
268
269static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
270 .core = {
271 .h_blank = {0x18, 0x01},
272 .v_blank = {0x65, 0x6c, 0x01},
273 .h_v_line = {0x65, 0x84, 0x89},
274 .vsync_pol = {0x00},
275 .int_pro_mode = {0x00},
276 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
277 .h_sync_gen = {0x56, 0x08, 0x02},
278 .v_sync_gen1 = {0x09, 0x40, 0x00},
279 .v_sync_gen2 = {0x01, 0x10, 0x00},
280 .v_sync_gen3 = {0x01, 0x10, 0x00},
281 /* other don't care */
282 },
283 .tg = {
284 0x00, /* cmd */
285 0x98, 0x08, /* h_fsz */
286 0x17, 0x01, 0x81, 0x07, /* hact */
287 0x65, 0x04, /* v_fsz */
288 0x01, 0x00, 0x33, 0x02, /* vsync */
289 0x2d, 0x00, 0x38, 0x04, /* vact */
290 0x33, 0x02, /* field_chg */
291 0x48, 0x02, /* vact_st2 */
292 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
293 0x01, 0x00, 0x33, 0x02, /* field top/bot */
294 },
295};
296
297static const struct hdmi_conf hdmi_confs[] = {
298 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
299 { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
300 { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
301 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
302 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
303 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
304 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
305};
306
307
308static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
309{
310 return readl(hdata->regs + reg_id);
311}
312
313static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
314 u32 reg_id, u8 value)
315{
316 writeb(value, hdata->regs + reg_id);
317}
318
319static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
320 u32 reg_id, u32 value, u32 mask)
321{
322 u32 old = readl(hdata->regs + reg_id);
323 value = (value & mask) | (old & ~mask);
324 writel(value, hdata->regs + reg_id);
325}
326
327static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
328{
329#define DUMPREG(reg_id) \
330 DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
331 readl(hdata->regs + reg_id))
332 DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
333 DUMPREG(HDMI_INTC_FLAG);
334 DUMPREG(HDMI_INTC_CON);
335 DUMPREG(HDMI_HPD_STATUS);
336 DUMPREG(HDMI_PHY_RSTOUT);
337 DUMPREG(HDMI_PHY_VPLL);
338 DUMPREG(HDMI_PHY_CMU);
339 DUMPREG(HDMI_CORE_RSTOUT);
340
341 DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
342 DUMPREG(HDMI_CON_0);
343 DUMPREG(HDMI_CON_1);
344 DUMPREG(HDMI_CON_2);
345 DUMPREG(HDMI_SYS_STATUS);
346 DUMPREG(HDMI_PHY_STATUS);
347 DUMPREG(HDMI_STATUS_EN);
348 DUMPREG(HDMI_HPD);
349 DUMPREG(HDMI_MODE_SEL);
350 DUMPREG(HDMI_HPD_GEN);
351 DUMPREG(HDMI_DC_CONTROL);
352 DUMPREG(HDMI_VIDEO_PATTERN_GEN);
353
354 DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
355 DUMPREG(HDMI_H_BLANK_0);
356 DUMPREG(HDMI_H_BLANK_1);
357 DUMPREG(HDMI_V_BLANK_0);
358 DUMPREG(HDMI_V_BLANK_1);
359 DUMPREG(HDMI_V_BLANK_2);
360 DUMPREG(HDMI_H_V_LINE_0);
361 DUMPREG(HDMI_H_V_LINE_1);
362 DUMPREG(HDMI_H_V_LINE_2);
363 DUMPREG(HDMI_VSYNC_POL);
364 DUMPREG(HDMI_INT_PRO_MODE);
365 DUMPREG(HDMI_V_BLANK_F_0);
366 DUMPREG(HDMI_V_BLANK_F_1);
367 DUMPREG(HDMI_V_BLANK_F_2);
368 DUMPREG(HDMI_H_SYNC_GEN_0);
369 DUMPREG(HDMI_H_SYNC_GEN_1);
370 DUMPREG(HDMI_H_SYNC_GEN_2);
371 DUMPREG(HDMI_V_SYNC_GEN_1_0);
372 DUMPREG(HDMI_V_SYNC_GEN_1_1);
373 DUMPREG(HDMI_V_SYNC_GEN_1_2);
374 DUMPREG(HDMI_V_SYNC_GEN_2_0);
375 DUMPREG(HDMI_V_SYNC_GEN_2_1);
376 DUMPREG(HDMI_V_SYNC_GEN_2_2);
377 DUMPREG(HDMI_V_SYNC_GEN_3_0);
378 DUMPREG(HDMI_V_SYNC_GEN_3_1);
379 DUMPREG(HDMI_V_SYNC_GEN_3_2);
380
381 DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
382 DUMPREG(HDMI_TG_CMD);
383 DUMPREG(HDMI_TG_H_FSZ_L);
384 DUMPREG(HDMI_TG_H_FSZ_H);
385 DUMPREG(HDMI_TG_HACT_ST_L);
386 DUMPREG(HDMI_TG_HACT_ST_H);
387 DUMPREG(HDMI_TG_HACT_SZ_L);
388 DUMPREG(HDMI_TG_HACT_SZ_H);
389 DUMPREG(HDMI_TG_V_FSZ_L);
390 DUMPREG(HDMI_TG_V_FSZ_H);
391 DUMPREG(HDMI_TG_VSYNC_L);
392 DUMPREG(HDMI_TG_VSYNC_H);
393 DUMPREG(HDMI_TG_VSYNC2_L);
394 DUMPREG(HDMI_TG_VSYNC2_H);
395 DUMPREG(HDMI_TG_VACT_ST_L);
396 DUMPREG(HDMI_TG_VACT_ST_H);
397 DUMPREG(HDMI_TG_VACT_SZ_L);
398 DUMPREG(HDMI_TG_VACT_SZ_H);
399 DUMPREG(HDMI_TG_FIELD_CHG_L);
400 DUMPREG(HDMI_TG_FIELD_CHG_H);
401 DUMPREG(HDMI_TG_VACT_ST2_L);
402 DUMPREG(HDMI_TG_VACT_ST2_H);
403 DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
404 DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
405 DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
406 DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
407 DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
408 DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
409 DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
410 DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
411#undef DUMPREG
412}
413
414static int hdmi_conf_index(struct drm_display_mode *mode)
415{
416 int i;
417
418 for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
419 if (hdmi_confs[i].width == mode->hdisplay &&
420 hdmi_confs[i].height == mode->vdisplay &&
421 hdmi_confs[i].vrefresh == mode->vrefresh &&
422 hdmi_confs[i].interlace ==
423 ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
424 true : false))
425 return i;
426
427 return -1;
428}
429
430static bool hdmi_is_connected(void *ctx)
431{
432 struct hdmi_context *hdata = (struct hdmi_context *)ctx;
433 u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
434
435 if (val)
436 return true;
437
438 return false;
439}
440
441static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
442 u8 *edid, int len)
443{
444 struct edid *raw_edid;
445 struct hdmi_context *hdata = (struct hdmi_context *)ctx;
446
447 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
448
449 if (!hdata->ddc_port)
450 return -ENODEV;
451
452 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
453 if (raw_edid) {
454 memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
455 * EDID_LENGTH, len));
456 DRM_DEBUG_KMS("width[%d] x height[%d]\n",
457 raw_edid->width_cm, raw_edid->height_cm);
458 } else {
459 return -ENODEV;
460 }
461
462 return 0;
463}
464
465static int hdmi_check_timing(void *ctx, void *timing)
466{
467 struct fb_videomode *check_timing = timing;
468 int i;
469
470 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
471
472 DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
473 check_timing->yres, check_timing->refresh,
474 check_timing->vmode);
475
476 for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
477 if (hdmi_confs[i].width == check_timing->xres &&
478 hdmi_confs[i].height == check_timing->yres &&
479 hdmi_confs[i].vrefresh == check_timing->refresh &&
480 hdmi_confs[i].interlace ==
481 ((check_timing->vmode & FB_VMODE_INTERLACED) ?
482 true : false))
483 return 0;
484
485 return -EINVAL;
486}
487
488static int hdmi_display_power_on(void *ctx, int mode)
489{
490 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
491
492 switch (mode) {
493 case DRM_MODE_DPMS_ON:
494 DRM_DEBUG_KMS("hdmi [on]\n");
495 break;
496 case DRM_MODE_DPMS_STANDBY:
497 break;
498 case DRM_MODE_DPMS_SUSPEND:
499 break;
500 case DRM_MODE_DPMS_OFF:
501 DRM_DEBUG_KMS("hdmi [off]\n");
502 break;
503 default:
504 break;
505 }
506
507 return 0;
508}
509
510static struct exynos_hdmi_display_ops display_ops = {
511 .is_connected = hdmi_is_connected,
512 .get_edid = hdmi_get_edid,
513 .check_timing = hdmi_check_timing,
514 .power_on = hdmi_display_power_on,
515};
516
517static void hdmi_conf_reset(struct hdmi_context *hdata)
518{
519 /* disable hpd handle for drm */
520 hdata->hpd_handle = false;
521
522 /* resetting HDMI core */
523 hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
524 mdelay(10);
525 hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
526 mdelay(10);
527
528 /* enable hpd handle for drm */
529 hdata->hpd_handle = true;
530}
531
532static void hdmi_conf_init(struct hdmi_context *hdata)
533{
534 /* disable hpd handle for drm */
535 hdata->hpd_handle = false;
536
537 /* enable HPD interrupts */
538 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
539 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
540 mdelay(10);
541 hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
542 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
543
544 /* choose HDMI mode */
545 hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
546 HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
547 /* disable bluescreen */
548 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
549 /* choose bluescreen (fecal) color */
550 hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
551 hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
552 hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
553 /* enable AVI packet every vsync, fixes purple line problem */
554 hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
555 /* force RGB, look to CEA-861-D, table 7 for more detail */
556 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
557 hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
558
559 hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
560 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
561 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
562
563 /* enable hpd handle for drm */
564 hdata->hpd_handle = true;
565}
566
567static void hdmi_timing_apply(struct hdmi_context *hdata,
568 const struct hdmi_preset_conf *conf)
569{
570 const struct hdmi_core_regs *core = &conf->core;
571 const struct hdmi_tg_regs *tg = &conf->tg;
572 int tries;
573
574 /* setting core registers */
575 hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
576 hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
577 hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
578 hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
579 hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
580 hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
581 hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
582 hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
583 hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
584 hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
585 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
586 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
587 hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
588 hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
589 hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
590 hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
591 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
592 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
593 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
594 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
595 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
596 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
597 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
598 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
599 hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
600 /* Timing generator registers */
601 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
602 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
603 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
604 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
605 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
606 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
607 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
608 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
609 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
610 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
611 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
612 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
613 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
614 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
615 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
616 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
617 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
618 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
619 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
620 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
621 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
622 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
623 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
624 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
625 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
626 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
627 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
628 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
629
630 /* waiting for HDMIPHY's PLL to get to steady state */
631 for (tries = 100; tries; --tries) {
632 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
633 if (val & HDMI_PHY_STATUS_READY)
634 break;
635 mdelay(1);
636 }
637 /* steady state not achieved */
638 if (tries == 0) {
639 DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
640 hdmi_regs_dump(hdata, "timing apply");
641 }
642
643 clk_disable(hdata->res.sclk_hdmi);
644 clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
645 clk_enable(hdata->res.sclk_hdmi);
646
647 /* enable HDMI and timing generator */
648 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
649 if (core->int_pro_mode[0])
650 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
651 HDMI_FIELD_EN);
652 else
653 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
654}
655
656static void hdmiphy_conf_reset(struct hdmi_context *hdata)
657{
658 u8 buffer[2];
659
660 clk_disable(hdata->res.sclk_hdmi);
661 clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
662 clk_enable(hdata->res.sclk_hdmi);
663
664 /* operation mode */
665 buffer[0] = 0x1f;
666 buffer[1] = 0x00;
667
668 if (hdata->hdmiphy_port)
669 i2c_master_send(hdata->hdmiphy_port, buffer, 2);
670
671 /* reset hdmiphy */
672 hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
673 mdelay(10);
674 hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
675 mdelay(10);
676}
677
678static void hdmiphy_conf_apply(struct hdmi_context *hdata)
679{
680 u8 buffer[32];
681 u8 operation[2];
682 u8 read_buffer[32] = {0, };
683 int ret;
684 int i;
685
686 if (!hdata->hdmiphy_port) {
687 DRM_ERROR("hdmiphy is not attached\n");
688 return;
689 }
690
691 /* pixel clock */
692 memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
693 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
694 if (ret != 32) {
695 DRM_ERROR("failed to configure HDMIPHY via I2C\n");
696 return;
697 }
698
699 mdelay(10);
700
701 /* operation mode */
702 operation[0] = 0x1f;
703 operation[1] = 0x80;
704
705 ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
706 if (ret != 2) {
707 DRM_ERROR("failed to enable hdmiphy\n");
708 return;
709 }
710
711 ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
712 if (ret < 0) {
713 DRM_ERROR("failed to read hdmiphy config\n");
714 return;
715 }
716
717 for (i = 0; i < ret; i++)
718 DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
719 "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
720}
721
722static void hdmi_conf_apply(struct hdmi_context *hdata)
723{
724 const struct hdmi_preset_conf *conf =
725 hdmi_confs[hdata->cur_conf].conf;
726
727 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
728
729 hdmiphy_conf_reset(hdata);
730 hdmiphy_conf_apply(hdata);
731
732 hdmi_conf_reset(hdata);
733 hdmi_conf_init(hdata);
734
735 /* setting core registers */
736 hdmi_timing_apply(hdata, conf);
737
738 hdmi_regs_dump(hdata, "start");
739}
740
741static void hdmi_mode_set(void *ctx, void *mode)
742{
743 struct hdmi_context *hdata = (struct hdmi_context *)ctx;
744 int conf_idx;
745
746 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
747
748 conf_idx = hdmi_conf_index(mode);
749 if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
750 hdata->cur_conf = conf_idx;
751 else
752 DRM_DEBUG_KMS("not supported mode\n");
753}
754
755static void hdmi_commit(void *ctx)
756{
757 struct hdmi_context *hdata = (struct hdmi_context *)ctx;
758
759 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
760
761 hdmi_conf_apply(hdata);
762
763 hdata->enabled = true;
764}
765
766static void hdmi_disable(void *ctx)
767{
768 struct hdmi_context *hdata = (struct hdmi_context *)ctx;
769
770 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
771
772 if (hdata->enabled) {
773 hdmiphy_conf_reset(hdata);
774 hdmi_conf_reset(hdata);
775 }
776}
777
778static struct exynos_hdmi_manager_ops manager_ops = {
779 .mode_set = hdmi_mode_set,
780 .commit = hdmi_commit,
781 .disable = hdmi_disable,
782};
783
784/*
785 * Handle hotplug events outside the interrupt handler proper.
786 */
787static void hdmi_hotplug_func(struct work_struct *work)
788{
789 struct hdmi_context *hdata =
790 container_of(work, struct hdmi_context, hotplug_work);
791 struct exynos_drm_hdmi_context *ctx =
792 (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
793
794 drm_helper_hpd_irq_event(ctx->drm_dev);
795}
796
797static irqreturn_t hdmi_irq_handler(int irq, void *arg)
798{
799 struct exynos_drm_hdmi_context *ctx = arg;
800 struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
801 u32 intc_flag;
802
803 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
804 /* clearing flags for HPD plug/unplug */
805 if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
806 DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
807 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
808 HDMI_INTC_FLAG_HPD_UNPLUG);
809 }
810 if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
811 DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
812 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
813 HDMI_INTC_FLAG_HPD_PLUG);
814 }
815
816 if (ctx->drm_dev && hdata->hpd_handle)
817 queue_work(hdata->wq, &hdata->hotplug_work);
818
819 return IRQ_HANDLED;
820}
821
822static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
823{
824 struct device *dev = hdata->dev;
825 struct hdmi_resources *res = &hdata->res;
826 static char *supply[] = {
827 "hdmi-en",
828 "vdd",
829 "vdd_osc",
830 "vdd_pll",
831 };
832 int i, ret;
833
834 DRM_DEBUG_KMS("HDMI resource init\n");
835
836 memset(res, 0, sizeof *res);
837
838 /* get clocks, power */
839 res->hdmi = clk_get(dev, "hdmi");
840 if (IS_ERR_OR_NULL(res->hdmi)) {
841 DRM_ERROR("failed to get clock 'hdmi'\n");
842 goto fail;
843 }
844 res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
845 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
846 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
847 goto fail;
848 }
849 res->sclk_pixel = clk_get(dev, "sclk_pixel");
850 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
851 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
852 goto fail;
853 }
854 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
855 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
856 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
857 goto fail;
858 }
859 res->hdmiphy = clk_get(dev, "hdmiphy");
860 if (IS_ERR_OR_NULL(res->hdmiphy)) {
861 DRM_ERROR("failed to get clock 'hdmiphy'\n");
862 goto fail;
863 }
864
865 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
866
867 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
868 sizeof res->regul_bulk[0], GFP_KERNEL);
869 if (!res->regul_bulk) {
870 DRM_ERROR("failed to get memory for regulators\n");
871 goto fail;
872 }
873 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
874 res->regul_bulk[i].supply = supply[i];
875 res->regul_bulk[i].consumer = NULL;
876 }
877 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
878 if (ret) {
879 DRM_ERROR("failed to get regulators\n");
880 goto fail;
881 }
882 res->regul_count = ARRAY_SIZE(supply);
883
884 return 0;
885fail:
886 DRM_ERROR("HDMI resource init - failed\n");
887 return -ENODEV;
888}
889
890static int hdmi_resources_cleanup(struct hdmi_context *hdata)
891{
892 struct hdmi_resources *res = &hdata->res;
893
894 regulator_bulk_free(res->regul_count, res->regul_bulk);
895 /* kfree is NULL-safe */
896 kfree(res->regul_bulk);
897 if (!IS_ERR_OR_NULL(res->hdmiphy))
898 clk_put(res->hdmiphy);
899 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
900 clk_put(res->sclk_hdmiphy);
901 if (!IS_ERR_OR_NULL(res->sclk_pixel))
902 clk_put(res->sclk_pixel);
903 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
904 clk_put(res->sclk_hdmi);
905 if (!IS_ERR_OR_NULL(res->hdmi))
906 clk_put(res->hdmi);
907 memset(res, 0, sizeof *res);
908
909 return 0;
910}
911
912static void hdmi_resource_poweron(struct hdmi_context *hdata)
913{
914 struct hdmi_resources *res = &hdata->res;
915
916 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
917
918 /* turn HDMI power on */
919 regulator_bulk_enable(res->regul_count, res->regul_bulk);
920 /* power-on hdmi physical interface */
921 clk_enable(res->hdmiphy);
922 /* turn clocks on */
923 clk_enable(res->hdmi);
924 clk_enable(res->sclk_hdmi);
925
926 hdmiphy_conf_reset(hdata);
927 hdmi_conf_reset(hdata);
928 hdmi_conf_init(hdata);
929
930}
931
932static void hdmi_resource_poweroff(struct hdmi_context *hdata)
933{
934 struct hdmi_resources *res = &hdata->res;
935
936 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
937
938 /* turn clocks off */
939 clk_disable(res->sclk_hdmi);
940 clk_disable(res->hdmi);
941 /* power-off hdmiphy */
942 clk_disable(res->hdmiphy);
943 /* turn HDMI power off */
944 regulator_bulk_disable(res->regul_count, res->regul_bulk);
945}
946
947static int hdmi_runtime_suspend(struct device *dev)
948{
949 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
950
951 DRM_DEBUG_KMS("%s\n", __func__);
952
953 hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
954
955 return 0;
956}
957
958static int hdmi_runtime_resume(struct device *dev)
959{
960 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
961
962 DRM_DEBUG_KMS("%s\n", __func__);
963
964 hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
965
966 return 0;
967}
968
969static const struct dev_pm_ops hdmi_pm_ops = {
970 .runtime_suspend = hdmi_runtime_suspend,
971 .runtime_resume = hdmi_runtime_resume,
972};
973
974static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
975
976void hdmi_attach_ddc_client(struct i2c_client *ddc)
977{
978 if (ddc)
979 hdmi_ddc = ddc;
980}
981EXPORT_SYMBOL(hdmi_attach_ddc_client);
982
983void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
984{
985 if (hdmiphy)
986 hdmi_hdmiphy = hdmiphy;
987}
988EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
989
990static int __devinit hdmi_probe(struct platform_device *pdev)
991{
992 struct device *dev = &pdev->dev;
993 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
994 struct hdmi_context *hdata;
995 struct exynos_drm_hdmi_pdata *pdata;
996 struct resource *res;
997 int ret;
998
999 DRM_DEBUG_KMS("[%d]\n", __LINE__);
1000
1001 pdata = pdev->dev.platform_data;
1002 if (!pdata) {
1003 DRM_ERROR("no platform data specified\n");
1004 return -EINVAL;
1005 }
1006
1007 drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
1008 if (!drm_hdmi_ctx) {
1009 DRM_ERROR("failed to allocate common hdmi context.\n");
1010 return -ENOMEM;
1011 }
1012
1013 hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
1014 if (!hdata) {
1015 DRM_ERROR("out of memory\n");
1016 kfree(drm_hdmi_ctx);
1017 return -ENOMEM;
1018 }
1019
1020 drm_hdmi_ctx->ctx = (void *)hdata;
1021 hdata->parent_ctx = (void *)drm_hdmi_ctx;
1022
1023 platform_set_drvdata(pdev, drm_hdmi_ctx);
1024
1025 hdata->default_win = pdata->default_win;
1026 hdata->default_timing = &pdata->timing;
1027 hdata->default_bpp = pdata->bpp;
1028 hdata->dev = dev;
1029
1030 ret = hdmi_resources_init(hdata);
1031 if (ret) {
1032 ret = -EINVAL;
1033 goto err_data;
1034 }
1035
1036 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1037 if (!res) {
1038 DRM_ERROR("failed to find registers\n");
1039 ret = -ENOENT;
1040 goto err_resource;
1041 }
1042
1043 hdata->regs_res = request_mem_region(res->start, resource_size(res),
1044 dev_name(dev));
1045 if (!hdata->regs_res) {
1046 DRM_ERROR("failed to claim register region\n");
1047 ret = -ENOENT;
1048 goto err_resource;
1049 }
1050
1051 hdata->regs = ioremap(res->start, resource_size(res));
1052 if (!hdata->regs) {
1053 DRM_ERROR("failed to map registers\n");
1054 ret = -ENXIO;
1055 goto err_req_region;
1056 }
1057
1058 /* DDC i2c driver */
1059 if (i2c_add_driver(&ddc_driver)) {
1060 DRM_ERROR("failed to register ddc i2c driver\n");
1061 ret = -ENOENT;
1062 goto err_iomap;
1063 }
1064
1065 hdata->ddc_port = hdmi_ddc;
1066
1067 /* hdmiphy i2c driver */
1068 if (i2c_add_driver(&hdmiphy_driver)) {
1069 DRM_ERROR("failed to register hdmiphy i2c driver\n");
1070 ret = -ENOENT;
1071 goto err_ddc;
1072 }
1073
1074 hdata->hdmiphy_port = hdmi_hdmiphy;
1075
1076 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1077 if (res == NULL) {
1078 DRM_ERROR("get interrupt resource failed.\n");
1079 ret = -ENXIO;
1080 goto err_hdmiphy;
1081 }
1082
1083 /* create workqueue and hotplug work */
1084 hdata->wq = alloc_workqueue("exynos-drm-hdmi",
1085 WQ_UNBOUND | WQ_NON_REENTRANT, 1);
1086 if (hdata->wq == NULL) {
1087 DRM_ERROR("Failed to create workqueue.\n");
1088 ret = -ENOMEM;
1089 goto err_hdmiphy;
1090 }
1091 INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
1092
1093 /* register hpd interrupt */
1094 ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
1095 drm_hdmi_ctx);
1096 if (ret) {
1097 DRM_ERROR("request interrupt failed.\n");
1098 goto err_workqueue;
1099 }
1100 hdata->irq = res->start;
1101
1102 /* register specific callbacks to common hdmi. */
1103 exynos_drm_display_ops_register(&display_ops);
1104 exynos_drm_manager_ops_register(&manager_ops);
1105
1106 hdmi_resource_poweron(hdata);
1107
1108 return 0;
1109
1110err_workqueue:
1111 destroy_workqueue(hdata->wq);
1112err_hdmiphy:
1113 i2c_del_driver(&hdmiphy_driver);
1114err_ddc:
1115 i2c_del_driver(&ddc_driver);
1116err_iomap:
1117 iounmap(hdata->regs);
1118err_req_region:
1119 release_resource(hdata->regs_res);
1120 kfree(hdata->regs_res);
1121err_resource:
1122 hdmi_resources_cleanup(hdata);
1123err_data:
1124 kfree(hdata);
1125 kfree(drm_hdmi_ctx);
1126 return ret;
1127}
1128
1129static int __devexit hdmi_remove(struct platform_device *pdev)
1130{
1131 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
1132 struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
1133
1134 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1135
1136 hdmi_resource_poweroff(hdata);
1137
1138 disable_irq(hdata->irq);
1139 free_irq(hdata->irq, hdata);
1140
1141 cancel_work_sync(&hdata->hotplug_work);
1142 destroy_workqueue(hdata->wq);
1143
1144 hdmi_resources_cleanup(hdata);
1145
1146 iounmap(hdata->regs);
1147
1148 release_resource(hdata->regs_res);
1149 kfree(hdata->regs_res);
1150
1151 /* hdmiphy i2c driver */
1152 i2c_del_driver(&hdmiphy_driver);
1153 /* DDC i2c driver */
1154 i2c_del_driver(&ddc_driver);
1155
1156 kfree(hdata);
1157
1158 return 0;
1159}
1160
1161struct platform_driver hdmi_driver = {
1162 .probe = hdmi_probe,
1163 .remove = __devexit_p(hdmi_remove),
1164 .driver = {
1165 .name = "exynos4-hdmi",
1166 .owner = THIS_MODULE,
1167 .pm = &hdmi_pm_ops,
1168 },
1169};
1170EXPORT_SYMBOL(hdmi_driver);
1171
1172MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
1173MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
1174MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
1175MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
1176MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644
index 00000000000..31d6cf84c1a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -0,0 +1,87 @@
1/*
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _EXYNOS_HDMI_H_
29#define _EXYNOS_HDMI_H_
30
31struct hdmi_conf {
32 int width;
33 int height;
34 int vrefresh;
35 bool interlace;
36 const u8 *hdmiphy_data;
37 const struct hdmi_preset_conf *conf;
38};
39
40struct hdmi_resources {
41 struct clk *hdmi;
42 struct clk *sclk_hdmi;
43 struct clk *sclk_pixel;
44 struct clk *sclk_hdmiphy;
45 struct clk *hdmiphy;
46 struct regulator_bulk_data *regul_bulk;
47 int regul_count;
48};
49
50struct hdmi_context {
51 struct device *dev;
52 struct drm_device *drm_dev;
53 struct fb_videomode *default_timing;
54 unsigned int default_win;
55 unsigned int default_bpp;
56 bool hpd_handle;
57 bool enabled;
58
59 struct resource *regs_res;
60 /** base address of HDMI registers */
61 void __iomem *regs;
62 /** HDMI hotplug interrupt */
63 unsigned int irq;
64 /** workqueue for delayed work */
65 struct workqueue_struct *wq;
66 /** hotplug handling work */
67 struct work_struct hotplug_work;
68
69 struct i2c_client *ddc_port;
70 struct i2c_client *hdmiphy_port;
71
72 /** current hdmiphy conf index */
73 int cur_conf;
74 /** other resources */
75 struct hdmi_resources res;
76
77 void *parent_ctx;
78};
79
80
81void hdmi_attach_ddc_client(struct i2c_client *ddc);
82void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
83
84extern struct i2c_driver hdmiphy_driver;
85extern struct i2c_driver ddc_driver;
86
87#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644
index 00000000000..9fe2995ab9f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include "drmP.h"
15
16#include <linux/kernel.h>
17#include <linux/i2c.h>
18#include <linux/module.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h"
22
23
24static int hdmiphy_probe(struct i2c_client *client,
25 const struct i2c_device_id *id)
26{
27 hdmi_attach_hdmiphy_client(client);
28
29 dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
30 "into i2c adapter successfully\n");
31
32 return 0;
33}
34
35static int hdmiphy_remove(struct i2c_client *client)
36{
37 dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
38 "from i2c adapter successfully\n");
39
40 return 0;
41}
42
43static const struct i2c_device_id hdmiphy_id[] = {
44 { "s5p_hdmiphy", 0 },
45 { },
46};
47
48struct i2c_driver hdmiphy_driver = {
49 .driver = {
50 .name = "s5p-hdmiphy",
51 .owner = THIS_MODULE,
52 },
53 .id_table = hdmiphy_id,
54 .probe = hdmiphy_probe,
55 .remove = __devexit_p(hdmiphy_remove),
56 .command = NULL,
57};
58EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 00000000000..ac24cff3977
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1070 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 *
8 * Based on drivers/media/video/s5p-tv/mixer_reg.c
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16
17#include "drmP.h"
18
19#include "regs-mixer.h"
20#include "regs-vp.h"
21
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
24#include <linux/wait.h>
25#include <linux/i2c.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29#include <linux/irq.h>
30#include <linux/delay.h>
31#include <linux/pm_runtime.h>
32#include <linux/clk.h>
33#include <linux/regulator/consumer.h>
34
35#include <drm/exynos_drm.h>
36
37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h"
39#include "exynos_hdmi.h"
40#include "exynos_mixer.h"
41
42#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
43
44static const u8 filter_y_horiz_tap8[] = {
45 0, -1, -1, -1, -1, -1, -1, -1,
46 -1, -1, -1, -1, -1, 0, 0, 0,
47 0, 2, 4, 5, 6, 6, 6, 6,
48 6, 5, 5, 4, 3, 2, 1, 1,
49 0, -6, -12, -16, -18, -20, -21, -20,
50 -20, -18, -16, -13, -10, -8, -5, -2,
51 127, 126, 125, 121, 114, 107, 99, 89,
52 79, 68, 57, 46, 35, 25, 16, 8,
53};
54
55static const u8 filter_y_vert_tap4[] = {
56 0, -3, -6, -8, -8, -8, -8, -7,
57 -6, -5, -4, -3, -2, -1, -1, 0,
58 127, 126, 124, 118, 111, 102, 92, 81,
59 70, 59, 48, 37, 27, 19, 11, 5,
60 0, 5, 11, 19, 27, 37, 48, 59,
61 70, 81, 92, 102, 111, 118, 124, 126,
62 0, 0, -1, -1, -2, -3, -4, -5,
63 -6, -7, -8, -8, -8, -8, -6, -3,
64};
65
66static const u8 filter_cr_horiz_tap4[] = {
67 0, -3, -6, -8, -8, -8, -8, -7,
68 -6, -5, -4, -3, -2, -1, -1, 0,
69 127, 126, 124, 118, 111, 102, 92, 81,
70 70, 59, 48, 37, 27, 19, 11, 5,
71};
72
73static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
74{
75 return readl(res->vp_regs + reg_id);
76}
77
78static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
79 u32 val)
80{
81 writel(val, res->vp_regs + reg_id);
82}
83
84static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
85 u32 val, u32 mask)
86{
87 u32 old = vp_reg_read(res, reg_id);
88
89 val = (val & mask) | (old & ~mask);
90 writel(val, res->vp_regs + reg_id);
91}
92
93static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
94{
95 return readl(res->mixer_regs + reg_id);
96}
97
98static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
99 u32 val)
100{
101 writel(val, res->mixer_regs + reg_id);
102}
103
104static inline void mixer_reg_writemask(struct mixer_resources *res,
105 u32 reg_id, u32 val, u32 mask)
106{
107 u32 old = mixer_reg_read(res, reg_id);
108
109 val = (val & mask) | (old & ~mask);
110 writel(val, res->mixer_regs + reg_id);
111}
112
113static void mixer_regs_dump(struct mixer_context *ctx)
114{
115#define DUMPREG(reg_id) \
116do { \
117 DRM_DEBUG_KMS(#reg_id " = %08x\n", \
118 (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
119} while (0)
120
121 DUMPREG(MXR_STATUS);
122 DUMPREG(MXR_CFG);
123 DUMPREG(MXR_INT_EN);
124 DUMPREG(MXR_INT_STATUS);
125
126 DUMPREG(MXR_LAYER_CFG);
127 DUMPREG(MXR_VIDEO_CFG);
128
129 DUMPREG(MXR_GRAPHIC0_CFG);
130 DUMPREG(MXR_GRAPHIC0_BASE);
131 DUMPREG(MXR_GRAPHIC0_SPAN);
132 DUMPREG(MXR_GRAPHIC0_WH);
133 DUMPREG(MXR_GRAPHIC0_SXY);
134 DUMPREG(MXR_GRAPHIC0_DXY);
135
136 DUMPREG(MXR_GRAPHIC1_CFG);
137 DUMPREG(MXR_GRAPHIC1_BASE);
138 DUMPREG(MXR_GRAPHIC1_SPAN);
139 DUMPREG(MXR_GRAPHIC1_WH);
140 DUMPREG(MXR_GRAPHIC1_SXY);
141 DUMPREG(MXR_GRAPHIC1_DXY);
142#undef DUMPREG
143}
144
145static void vp_regs_dump(struct mixer_context *ctx)
146{
147#define DUMPREG(reg_id) \
148do { \
149 DRM_DEBUG_KMS(#reg_id " = %08x\n", \
150 (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
151} while (0)
152
153 DUMPREG(VP_ENABLE);
154 DUMPREG(VP_SRESET);
155 DUMPREG(VP_SHADOW_UPDATE);
156 DUMPREG(VP_FIELD_ID);
157 DUMPREG(VP_MODE);
158 DUMPREG(VP_IMG_SIZE_Y);
159 DUMPREG(VP_IMG_SIZE_C);
160 DUMPREG(VP_PER_RATE_CTRL);
161 DUMPREG(VP_TOP_Y_PTR);
162 DUMPREG(VP_BOT_Y_PTR);
163 DUMPREG(VP_TOP_C_PTR);
164 DUMPREG(VP_BOT_C_PTR);
165 DUMPREG(VP_ENDIAN_MODE);
166 DUMPREG(VP_SRC_H_POSITION);
167 DUMPREG(VP_SRC_V_POSITION);
168 DUMPREG(VP_SRC_WIDTH);
169 DUMPREG(VP_SRC_HEIGHT);
170 DUMPREG(VP_DST_H_POSITION);
171 DUMPREG(VP_DST_V_POSITION);
172 DUMPREG(VP_DST_WIDTH);
173 DUMPREG(VP_DST_HEIGHT);
174 DUMPREG(VP_H_RATIO);
175 DUMPREG(VP_V_RATIO);
176
177#undef DUMPREG
178}
179
180static inline void vp_filter_set(struct mixer_resources *res,
181 int reg_id, const u8 *data, unsigned int size)
182{
183 /* assure 4-byte align */
184 BUG_ON(size & 3);
185 for (; size; size -= 4, reg_id += 4, data += 4) {
186 u32 val = (data[0] << 24) | (data[1] << 16) |
187 (data[2] << 8) | data[3];
188 vp_reg_write(res, reg_id, val);
189 }
190}
191
192static void vp_default_filter(struct mixer_resources *res)
193{
194 vp_filter_set(res, VP_POLY8_Y0_LL,
195 filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
196 vp_filter_set(res, VP_POLY4_Y0_LL,
197 filter_y_vert_tap4, sizeof filter_y_vert_tap4);
198 vp_filter_set(res, VP_POLY4_C0_LL,
199 filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
200}
201
202static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
203{
204 struct mixer_resources *res = &ctx->mixer_res;
205
206 /* block update on vsync */
207 mixer_reg_writemask(res, MXR_STATUS, enable ?
208 MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
209
210 vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
211 VP_SHADOW_UPDATE_ENABLE : 0);
212}
213
214static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
215{
216 struct mixer_resources *res = &ctx->mixer_res;
217 u32 val;
218
219 /* choosing between interlace and progressive mode */
220 val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
221 MXR_CFG_SCAN_PROGRASSIVE);
222
223 /* choosing between porper HD and SD mode */
224 if (height == 480)
225 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
226 else if (height == 576)
227 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
228 else if (height == 720)
229 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
230 else if (height == 1080)
231 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
232 else
233 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
234
235 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
236}
237
238static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
239{
240 struct mixer_resources *res = &ctx->mixer_res;
241 u32 val;
242
243 if (height == 480) {
244 val = MXR_CFG_RGB601_0_255;
245 } else if (height == 576) {
246 val = MXR_CFG_RGB601_0_255;
247 } else if (height == 720) {
248 val = MXR_CFG_RGB709_16_235;
249 mixer_reg_write(res, MXR_CM_COEFF_Y,
250 (1 << 30) | (94 << 20) | (314 << 10) |
251 (32 << 0));
252 mixer_reg_write(res, MXR_CM_COEFF_CB,
253 (972 << 20) | (851 << 10) | (225 << 0));
254 mixer_reg_write(res, MXR_CM_COEFF_CR,
255 (225 << 20) | (820 << 10) | (1004 << 0));
256 } else if (height == 1080) {
257 val = MXR_CFG_RGB709_16_235;
258 mixer_reg_write(res, MXR_CM_COEFF_Y,
259 (1 << 30) | (94 << 20) | (314 << 10) |
260 (32 << 0));
261 mixer_reg_write(res, MXR_CM_COEFF_CB,
262 (972 << 20) | (851 << 10) | (225 << 0));
263 mixer_reg_write(res, MXR_CM_COEFF_CR,
264 (225 << 20) | (820 << 10) | (1004 << 0));
265 } else {
266 val = MXR_CFG_RGB709_16_235;
267 mixer_reg_write(res, MXR_CM_COEFF_Y,
268 (1 << 30) | (94 << 20) | (314 << 10) |
269 (32 << 0));
270 mixer_reg_write(res, MXR_CM_COEFF_CB,
271 (972 << 20) | (851 << 10) | (225 << 0));
272 mixer_reg_write(res, MXR_CM_COEFF_CR,
273 (225 << 20) | (820 << 10) | (1004 << 0));
274 }
275
276 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
277}
278
279static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
280{
281 struct mixer_resources *res = &ctx->mixer_res;
282 u32 val = enable ? ~0 : 0;
283
284 switch (win) {
285 case 0:
286 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
287 break;
288 case 1:
289 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
290 break;
291 case 2:
292 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
293 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
294 break;
295 }
296}
297
298static void mixer_run(struct mixer_context *ctx)
299{
300 struct mixer_resources *res = &ctx->mixer_res;
301
302 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
303
304 mixer_regs_dump(ctx);
305}
306
307static void vp_video_buffer(struct mixer_context *ctx, int win)
308{
309 struct mixer_resources *res = &ctx->mixer_res;
310 unsigned long flags;
311 struct hdmi_win_data *win_data;
312 unsigned int full_width, full_height, width, height;
313 unsigned int x_ratio, y_ratio;
314 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
315 unsigned int mode_width, mode_height;
316 unsigned int buf_num;
317 dma_addr_t luma_addr[2], chroma_addr[2];
318 bool tiled_mode = false;
319 bool crcb_mode = false;
320 u32 val;
321
322 win_data = &ctx->win_data[win];
323
324 switch (win_data->pixel_format) {
325 case DRM_FORMAT_NV12MT:
326 tiled_mode = true;
327 case DRM_FORMAT_NV12M:
328 crcb_mode = false;
329 buf_num = 2;
330 break;
331 /* TODO: single buffer format NV12, NV21 */
332 default:
333 /* ignore pixel format at disable time */
334 if (!win_data->dma_addr)
335 break;
336
337 DRM_ERROR("pixel format for vp is wrong [%d].\n",
338 win_data->pixel_format);
339 return;
340 }
341
342 full_width = win_data->fb_width;
343 full_height = win_data->fb_height;
344 width = win_data->crtc_width;
345 height = win_data->crtc_height;
346 mode_width = win_data->mode_width;
347 mode_height = win_data->mode_height;
348
349 /* scaling feature: (src << 16) / dst */
350 x_ratio = (width << 16) / width;
351 y_ratio = (height << 16) / height;
352
353 src_x_offset = win_data->fb_x;
354 src_y_offset = win_data->fb_y;
355 dst_x_offset = win_data->crtc_x;
356 dst_y_offset = win_data->crtc_y;
357
358 if (buf_num == 2) {
359 luma_addr[0] = win_data->dma_addr;
360 chroma_addr[0] = win_data->chroma_dma_addr;
361 } else {
362 luma_addr[0] = win_data->dma_addr;
363 chroma_addr[0] = win_data->dma_addr
364 + (full_width * full_height);
365 }
366
367 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
368 ctx->interlace = true;
369 if (tiled_mode) {
370 luma_addr[1] = luma_addr[0] + 0x40;
371 chroma_addr[1] = chroma_addr[0] + 0x40;
372 } else {
373 luma_addr[1] = luma_addr[0] + full_width;
374 chroma_addr[1] = chroma_addr[0] + full_width;
375 }
376 } else {
377 ctx->interlace = false;
378 luma_addr[1] = 0;
379 chroma_addr[1] = 0;
380 }
381
382 spin_lock_irqsave(&res->reg_slock, flags);
383 mixer_vsync_set_update(ctx, false);
384
385 /* interlace or progressive scan mode */
386 val = (ctx->interlace ? ~0 : 0);
387 vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
388
389 /* setup format */
390 val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
391 val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
392 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
393
394 /* setting size of input image */
395 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
396 VP_IMG_VSIZE(full_height));
397 /* chroma height has to reduced by 2 to avoid chroma distorions */
398 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
399 VP_IMG_VSIZE(full_height / 2));
400
401 vp_reg_write(res, VP_SRC_WIDTH, width);
402 vp_reg_write(res, VP_SRC_HEIGHT, height);
403 vp_reg_write(res, VP_SRC_H_POSITION,
404 VP_SRC_H_POSITION_VAL(src_x_offset));
405 vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
406
407 vp_reg_write(res, VP_DST_WIDTH, width);
408 vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
409 if (ctx->interlace) {
410 vp_reg_write(res, VP_DST_HEIGHT, height / 2);
411 vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
412 } else {
413 vp_reg_write(res, VP_DST_HEIGHT, height);
414 vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
415 }
416
417 vp_reg_write(res, VP_H_RATIO, x_ratio);
418 vp_reg_write(res, VP_V_RATIO, y_ratio);
419
420 vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
421
422 /* set buffer address to vp */
423 vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
424 vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
425 vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
426 vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
427
428 mixer_cfg_scan(ctx, mode_height);
429 mixer_cfg_rgb_fmt(ctx, mode_height);
430 mixer_cfg_layer(ctx, win, true);
431 mixer_run(ctx);
432
433 mixer_vsync_set_update(ctx, true);
434 spin_unlock_irqrestore(&res->reg_slock, flags);
435
436 vp_regs_dump(ctx);
437}
438
439static void mixer_graph_buffer(struct mixer_context *ctx, int win)
440{
441 struct mixer_resources *res = &ctx->mixer_res;
442 unsigned long flags;
443 struct hdmi_win_data *win_data;
444 unsigned int full_width, width, height;
445 unsigned int x_ratio, y_ratio;
446 unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
447 unsigned int mode_width, mode_height;
448 dma_addr_t dma_addr;
449 unsigned int fmt;
450 u32 val;
451
452 win_data = &ctx->win_data[win];
453
454 #define RGB565 4
455 #define ARGB1555 5
456 #define ARGB4444 6
457 #define ARGB8888 7
458
459 switch (win_data->bpp) {
460 case 16:
461 fmt = ARGB4444;
462 break;
463 case 32:
464 fmt = ARGB8888;
465 break;
466 default:
467 fmt = ARGB8888;
468 }
469
470 dma_addr = win_data->dma_addr;
471 full_width = win_data->fb_width;
472 width = win_data->crtc_width;
473 height = win_data->crtc_height;
474 mode_width = win_data->mode_width;
475 mode_height = win_data->mode_height;
476
477 /* 2x scaling feature */
478 x_ratio = 0;
479 y_ratio = 0;
480
481 src_x_offset = win_data->fb_x;
482 src_y_offset = win_data->fb_y;
483 dst_x_offset = win_data->crtc_x;
484 dst_y_offset = win_data->crtc_y;
485
486 /* converting dma address base and source offset */
487 dma_addr = dma_addr
488 + (src_x_offset * win_data->bpp >> 3)
489 + (src_y_offset * full_width * win_data->bpp >> 3);
490 src_x_offset = 0;
491 src_y_offset = 0;
492
493 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
494 ctx->interlace = true;
495 else
496 ctx->interlace = false;
497
498 spin_lock_irqsave(&res->reg_slock, flags);
499 mixer_vsync_set_update(ctx, false);
500
501 /* setup format */
502 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
503 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
504
505 /* setup geometry */
506 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
507
508 val = MXR_GRP_WH_WIDTH(width);
509 val |= MXR_GRP_WH_HEIGHT(height);
510 val |= MXR_GRP_WH_H_SCALE(x_ratio);
511 val |= MXR_GRP_WH_V_SCALE(y_ratio);
512 mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
513
514 /* setup offsets in source image */
515 val = MXR_GRP_SXY_SX(src_x_offset);
516 val |= MXR_GRP_SXY_SY(src_y_offset);
517 mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
518
519 /* setup offsets in display image */
520 val = MXR_GRP_DXY_DX(dst_x_offset);
521 val |= MXR_GRP_DXY_DY(dst_y_offset);
522 mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
523
524 /* set buffer address to mixer */
525 mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
526
527 mixer_cfg_scan(ctx, mode_height);
528 mixer_cfg_rgb_fmt(ctx, mode_height);
529 mixer_cfg_layer(ctx, win, true);
530 mixer_run(ctx);
531
532 mixer_vsync_set_update(ctx, true);
533 spin_unlock_irqrestore(&res->reg_slock, flags);
534}
535
536static void vp_win_reset(struct mixer_context *ctx)
537{
538 struct mixer_resources *res = &ctx->mixer_res;
539 int tries = 100;
540
541 vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
542 for (tries = 100; tries; --tries) {
543 /* waiting until VP_SRESET_PROCESSING is 0 */
544 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
545 break;
546 mdelay(10);
547 }
548 WARN(tries == 0, "failed to reset Video Processor\n");
549}
550
551static int mixer_enable_vblank(void *ctx, int pipe)
552{
553 struct mixer_context *mixer_ctx = ctx;
554 struct mixer_resources *res = &mixer_ctx->mixer_res;
555
556 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
557
558 mixer_ctx->pipe = pipe;
559
560 /* enable vsync interrupt */
561 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
562 MXR_INT_EN_VSYNC);
563
564 return 0;
565}
566
567static void mixer_disable_vblank(void *ctx)
568{
569 struct mixer_context *mixer_ctx = ctx;
570 struct mixer_resources *res = &mixer_ctx->mixer_res;
571
572 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
573
574 /* disable vsync interrupt */
575 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
576}
577
578static void mixer_win_mode_set(void *ctx,
579 struct exynos_drm_overlay *overlay)
580{
581 struct mixer_context *mixer_ctx = ctx;
582 struct hdmi_win_data *win_data;
583 int win;
584
585 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
586
587 if (!overlay) {
588 DRM_ERROR("overlay is NULL\n");
589 return;
590 }
591
592 DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
593 overlay->fb_width, overlay->fb_height,
594 overlay->fb_x, overlay->fb_y,
595 overlay->crtc_width, overlay->crtc_height,
596 overlay->crtc_x, overlay->crtc_y);
597
598 win = overlay->zpos;
599 if (win == DEFAULT_ZPOS)
600 win = mixer_ctx->default_win;
601
602 if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
603 DRM_ERROR("overlay plane[%d] is wrong\n", win);
604 return;
605 }
606
607 win_data = &mixer_ctx->win_data[win];
608
609 win_data->dma_addr = overlay->dma_addr[0];
610 win_data->vaddr = overlay->vaddr[0];
611 win_data->chroma_dma_addr = overlay->dma_addr[1];
612 win_data->chroma_vaddr = overlay->vaddr[1];
613 win_data->pixel_format = overlay->pixel_format;
614 win_data->bpp = overlay->bpp;
615
616 win_data->crtc_x = overlay->crtc_x;
617 win_data->crtc_y = overlay->crtc_y;
618 win_data->crtc_width = overlay->crtc_width;
619 win_data->crtc_height = overlay->crtc_height;
620
621 win_data->fb_x = overlay->fb_x;
622 win_data->fb_y = overlay->fb_y;
623 win_data->fb_width = overlay->fb_width;
624 win_data->fb_height = overlay->fb_height;
625
626 win_data->mode_width = overlay->mode_width;
627 win_data->mode_height = overlay->mode_height;
628
629 win_data->scan_flags = overlay->scan_flag;
630}
631
632static void mixer_win_commit(void *ctx, int zpos)
633{
634 struct mixer_context *mixer_ctx = ctx;
635 int win = zpos;
636
637 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
638
639 if (win == DEFAULT_ZPOS)
640 win = mixer_ctx->default_win;
641
642 if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
643 DRM_ERROR("overlay plane[%d] is wrong\n", win);
644 return;
645 }
646
647 if (win > 1)
648 vp_video_buffer(mixer_ctx, win);
649 else
650 mixer_graph_buffer(mixer_ctx, win);
651}
652
653static void mixer_win_disable(void *ctx, int zpos)
654{
655 struct mixer_context *mixer_ctx = ctx;
656 struct mixer_resources *res = &mixer_ctx->mixer_res;
657 unsigned long flags;
658 int win = zpos;
659
660 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
661
662 if (win == DEFAULT_ZPOS)
663 win = mixer_ctx->default_win;
664
665 if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
666 DRM_ERROR("overlay plane[%d] is wrong\n", win);
667 return;
668 }
669
670 spin_lock_irqsave(&res->reg_slock, flags);
671 mixer_vsync_set_update(mixer_ctx, false);
672
673 mixer_cfg_layer(mixer_ctx, win, false);
674
675 mixer_vsync_set_update(mixer_ctx, true);
676 spin_unlock_irqrestore(&res->reg_slock, flags);
677}
678
679static struct exynos_hdmi_overlay_ops overlay_ops = {
680 .enable_vblank = mixer_enable_vblank,
681 .disable_vblank = mixer_disable_vblank,
682 .win_mode_set = mixer_win_mode_set,
683 .win_commit = mixer_win_commit,
684 .win_disable = mixer_win_disable,
685};
686
687/* for pageflip event */
688static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
689{
690 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
691 struct drm_pending_vblank_event *e, *t;
692 struct timeval now;
693 unsigned long flags;
694 bool is_checked = false;
695
696 spin_lock_irqsave(&drm_dev->event_lock, flags);
697
698 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
699 base.link) {
700 /* if event's pipe isn't same as crtc then ignore it. */
701 if (crtc != e->pipe)
702 continue;
703
704 is_checked = true;
705 do_gettimeofday(&now);
706 e->event.sequence = 0;
707 e->event.tv_sec = now.tv_sec;
708 e->event.tv_usec = now.tv_usec;
709
710 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
711 wake_up_interruptible(&e->base.file_priv->event_wait);
712 }
713
714 if (is_checked)
715 drm_vblank_put(drm_dev, crtc);
716
717 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
718}
719
720static irqreturn_t mixer_irq_handler(int irq, void *arg)
721{
722 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
723 struct mixer_context *ctx =
724 (struct mixer_context *)drm_hdmi_ctx->ctx;
725 struct mixer_resources *res = &ctx->mixer_res;
726 u32 val, val_base;
727
728 spin_lock(&res->reg_slock);
729
730 /* read interrupt status for handling and clearing flags for VSYNC */
731 val = mixer_reg_read(res, MXR_INT_STATUS);
732
733 /* handling VSYNC */
734 if (val & MXR_INT_STATUS_VSYNC) {
735 /* interlace scan need to check shadow register */
736 if (ctx->interlace) {
737 val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
738 if (ctx->win_data[0].dma_addr != val_base)
739 goto out;
740
741 val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
742 if (ctx->win_data[1].dma_addr != val_base)
743 goto out;
744 }
745
746 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
747 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
748 }
749
750out:
751 /* clear interrupts */
752 if (~val & MXR_INT_EN_VSYNC) {
753 /* vsync interrupt use different bit for read and clear */
754 val &= ~MXR_INT_EN_VSYNC;
755 val |= MXR_INT_CLEAR_VSYNC;
756 }
757 mixer_reg_write(res, MXR_INT_STATUS, val);
758
759 spin_unlock(&res->reg_slock);
760
761 return IRQ_HANDLED;
762}
763
764static void mixer_win_reset(struct mixer_context *ctx)
765{
766 struct mixer_resources *res = &ctx->mixer_res;
767 unsigned long flags;
768 u32 val; /* value stored to register */
769
770 spin_lock_irqsave(&res->reg_slock, flags);
771 mixer_vsync_set_update(ctx, false);
772
773 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
774
775 /* set output in RGB888 mode */
776 mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
777
778 /* 16 beat burst in DMA */
779 mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
780 MXR_STATUS_BURST_MASK);
781
782 /* setting default layer priority: layer1 > video > layer0
783 * because typical usage scenario would be
784 * layer0 - framebuffer
785 * video - video overlay
786 * layer1 - OSD
787 */
788 val = MXR_LAYER_CFG_GRP0_VAL(1);
789 val |= MXR_LAYER_CFG_VP_VAL(2);
790 val |= MXR_LAYER_CFG_GRP1_VAL(3);
791 mixer_reg_write(res, MXR_LAYER_CFG, val);
792
793 /* setting background color */
794 mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
795 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
796 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
797
798 /* setting graphical layers */
799
800 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
801 val |= MXR_GRP_CFG_WIN_BLEND_EN;
802 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
803
804 /* the same configuration for both layers */
805 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
806
807 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
808 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
809 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
810
811 /* configuration of Video Processor Registers */
812 vp_win_reset(ctx);
813 vp_default_filter(res);
814
815 /* disable all layers */
816 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
817 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
818 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
819
820 mixer_vsync_set_update(ctx, true);
821 spin_unlock_irqrestore(&res->reg_slock, flags);
822}
823
824static void mixer_resource_poweron(struct mixer_context *ctx)
825{
826 struct mixer_resources *res = &ctx->mixer_res;
827
828 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
829
830 clk_enable(res->mixer);
831 clk_enable(res->vp);
832 clk_enable(res->sclk_mixer);
833
834 mixer_win_reset(ctx);
835}
836
837static void mixer_resource_poweroff(struct mixer_context *ctx)
838{
839 struct mixer_resources *res = &ctx->mixer_res;
840
841 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
842
843 clk_disable(res->mixer);
844 clk_disable(res->vp);
845 clk_disable(res->sclk_mixer);
846}
847
848static int mixer_runtime_resume(struct device *dev)
849{
850 struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
851
852 DRM_DEBUG_KMS("resume - start\n");
853
854 mixer_resource_poweron((struct mixer_context *)ctx->ctx);
855
856 return 0;
857}
858
859static int mixer_runtime_suspend(struct device *dev)
860{
861 struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
862
863 DRM_DEBUG_KMS("suspend - start\n");
864
865 mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
866
867 return 0;
868}
869
870static const struct dev_pm_ops mixer_pm_ops = {
871 .runtime_suspend = mixer_runtime_suspend,
872 .runtime_resume = mixer_runtime_resume,
873};
874
875static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
876 struct platform_device *pdev)
877{
878 struct mixer_context *mixer_ctx =
879 (struct mixer_context *)ctx->ctx;
880 struct device *dev = &pdev->dev;
881 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
882 struct resource *res;
883 int ret;
884
885 mixer_res->dev = dev;
886 spin_lock_init(&mixer_res->reg_slock);
887
888 mixer_res->mixer = clk_get(dev, "mixer");
889 if (IS_ERR_OR_NULL(mixer_res->mixer)) {
890 dev_err(dev, "failed to get clock 'mixer'\n");
891 ret = -ENODEV;
892 goto fail;
893 }
894 mixer_res->vp = clk_get(dev, "vp");
895 if (IS_ERR_OR_NULL(mixer_res->vp)) {
896 dev_err(dev, "failed to get clock 'vp'\n");
897 ret = -ENODEV;
898 goto fail;
899 }
900 mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
901 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
902 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
903 ret = -ENODEV;
904 goto fail;
905 }
906 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
907 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
908 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
909 ret = -ENODEV;
910 goto fail;
911 }
912 mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
913 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
914 dev_err(dev, "failed to get clock 'sclk_dac'\n");
915 ret = -ENODEV;
916 goto fail;
917 }
918 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
919 if (res == NULL) {
920 dev_err(dev, "get memory resource failed.\n");
921 ret = -ENXIO;
922 goto fail;
923 }
924
925 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
926
927 mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
928 if (mixer_res->mixer_regs == NULL) {
929 dev_err(dev, "register mapping failed.\n");
930 ret = -ENXIO;
931 goto fail;
932 }
933
934 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
935 if (res == NULL) {
936 dev_err(dev, "get memory resource failed.\n");
937 ret = -ENXIO;
938 goto fail_mixer_regs;
939 }
940
941 mixer_res->vp_regs = ioremap(res->start, resource_size(res));
942 if (mixer_res->vp_regs == NULL) {
943 dev_err(dev, "register mapping failed.\n");
944 ret = -ENXIO;
945 goto fail_mixer_regs;
946 }
947
948 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
949 if (res == NULL) {
950 dev_err(dev, "get interrupt resource failed.\n");
951 ret = -ENXIO;
952 goto fail_vp_regs;
953 }
954
955 ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
956 if (ret) {
957 dev_err(dev, "request interrupt failed.\n");
958 goto fail_vp_regs;
959 }
960 mixer_res->irq = res->start;
961
962 return 0;
963
964fail_vp_regs:
965 iounmap(mixer_res->vp_regs);
966
967fail_mixer_regs:
968 iounmap(mixer_res->mixer_regs);
969
970fail:
971 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
972 clk_put(mixer_res->sclk_dac);
973 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
974 clk_put(mixer_res->sclk_hdmi);
975 if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
976 clk_put(mixer_res->sclk_mixer);
977 if (!IS_ERR_OR_NULL(mixer_res->vp))
978 clk_put(mixer_res->vp);
979 if (!IS_ERR_OR_NULL(mixer_res->mixer))
980 clk_put(mixer_res->mixer);
981 mixer_res->dev = NULL;
982 return ret;
983}
984
985static void mixer_resources_cleanup(struct mixer_context *ctx)
986{
987 struct mixer_resources *res = &ctx->mixer_res;
988
989 disable_irq(res->irq);
990 free_irq(res->irq, ctx);
991
992 iounmap(res->vp_regs);
993 iounmap(res->mixer_regs);
994}
995
996static int __devinit mixer_probe(struct platform_device *pdev)
997{
998 struct device *dev = &pdev->dev;
999 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
1000 struct mixer_context *ctx;
1001 int ret;
1002
1003 dev_info(dev, "probe start\n");
1004
1005 drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
1006 if (!drm_hdmi_ctx) {
1007 DRM_ERROR("failed to allocate common hdmi context.\n");
1008 return -ENOMEM;
1009 }
1010
1011 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1012 if (!ctx) {
1013 DRM_ERROR("failed to alloc mixer context.\n");
1014 kfree(drm_hdmi_ctx);
1015 return -ENOMEM;
1016 }
1017
1018 drm_hdmi_ctx->ctx = (void *)ctx;
1019
1020 platform_set_drvdata(pdev, drm_hdmi_ctx);
1021
1022 /* acquire resources: regs, irqs, clocks */
1023 ret = mixer_resources_init(drm_hdmi_ctx, pdev);
1024 if (ret)
1025 goto fail;
1026
1027 /* register specific callback point to common hdmi. */
1028 exynos_drm_overlay_ops_register(&overlay_ops);
1029
1030 mixer_resource_poweron(ctx);
1031
1032 return 0;
1033
1034
1035fail:
1036 dev_info(dev, "probe failed\n");
1037 return ret;
1038}
1039
1040static int mixer_remove(struct platform_device *pdev)
1041{
1042 struct device *dev = &pdev->dev;
1043 struct exynos_drm_hdmi_context *drm_hdmi_ctx =
1044 platform_get_drvdata(pdev);
1045 struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
1046
1047 dev_info(dev, "remove sucessful\n");
1048
1049 mixer_resource_poweroff(ctx);
1050 mixer_resources_cleanup(ctx);
1051
1052 return 0;
1053}
1054
1055struct platform_driver mixer_driver = {
1056 .driver = {
1057 .name = "s5p-mixer",
1058 .owner = THIS_MODULE,
1059 .pm = &mixer_pm_ops,
1060 },
1061 .probe = mixer_probe,
1062 .remove = __devexit_p(mixer_remove),
1063};
1064EXPORT_SYMBOL(mixer_driver);
1065
1066MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
1067MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
1068MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
1069MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
1070MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 00000000000..cebacfefc07
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,92 @@
1/*
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Seung-Woo Kim <sw0312.kim@samsung.com>
6 * Inki Dae <inki.dae@samsung.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _EXYNOS_MIXER_H_
29#define _EXYNOS_MIXER_H_
30
31#define HDMI_OVERLAY_NUMBER 3
32
33struct hdmi_win_data {
34 dma_addr_t dma_addr;
35 void __iomem *vaddr;
36 dma_addr_t chroma_dma_addr;
37 void __iomem *chroma_vaddr;
38 uint32_t pixel_format;
39 unsigned int bpp;
40 unsigned int crtc_x;
41 unsigned int crtc_y;
42 unsigned int crtc_width;
43 unsigned int crtc_height;
44 unsigned int fb_x;
45 unsigned int fb_y;
46 unsigned int fb_width;
47 unsigned int fb_height;
48 unsigned int mode_width;
49 unsigned int mode_height;
50 unsigned int scan_flags;
51};
52
53struct mixer_resources {
54 struct device *dev;
55 /** interrupt index */
56 int irq;
57 /** pointer to Mixer registers */
58 void __iomem *mixer_regs;
59 /** pointer to Video Processor registers */
60 void __iomem *vp_regs;
61 /** spinlock for protection of registers */
62 spinlock_t reg_slock;
63 /** other resources */
64 struct clk *mixer;
65 struct clk *vp;
66 struct clk *sclk_mixer;
67 struct clk *sclk_hdmi;
68 struct clk *sclk_dac;
69};
70
71struct mixer_context {
72 unsigned int default_win;
73 struct fb_videomode *default_timing;
74 unsigned int default_bpp;
75
76 /** mixer interrupt */
77 unsigned int irq;
78 /** current crtc pipe for vblank */
79 int pipe;
80 /** interlace scan mode */
81 bool interlace;
82 /** vp enabled status */
83 bool vp_enabled;
84
85 /** mixer and vp resources */
86 struct mixer_resources mixer_res;
87
88 /** overlay window data */
89 struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
90};
91
92#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 00000000000..72e6b52be74
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,147 @@
1/*
2 *
3 * Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
4 *
5 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 *
8 * HDMI register header file for Samsung TVOUT driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#ifndef SAMSUNG_REGS_HDMI_H
16#define SAMSUNG_REGS_HDMI_H
17
18/*
19 * Register part
20*/
21
22#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
23#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
24#define HDMI_TG_BASE(x) ((x) + 0x00050000)
25
26/* Control registers */
27#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
28#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
29#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
30#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
31#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
32#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
33#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
34
35/* Core registers */
36#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
37#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
38#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
39#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
40#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
41#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
42#define HDMI_HPD HDMI_CORE_BASE(0x0030)
43#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
44#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
45#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
46#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
47#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
48#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
49#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
50#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
51#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
52#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
53#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
54#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
55#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
56#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
57#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
58#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
59#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
60#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
61#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
62#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
63#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
64#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
65#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
66#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
67#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
68#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
69#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
70#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
71#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
72#define HDMI_ACR_CON HDMI_CORE_BASE(0x0180)
73#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
74#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
75#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
76#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
77#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
78#define HDMI_AUI_CON HDMI_CORE_BASE(0x0360)
79#define HDMI_SPD_CON HDMI_CORE_BASE(0x0400)
80
81/* Timing generator registers */
82#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
83#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
84#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
85#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
86#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
87#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
88#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
89#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
90#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
91#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
92#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
93#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
94#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
95#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
96#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
97#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
98#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
99#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
100#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
101#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
102#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
103#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
104#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
105#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
106#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
107#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
108#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
109#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
110#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
111
112/*
113 * Bit definition part
114 */
115
116/* HDMI_INTC_CON */
117#define HDMI_INTC_EN_GLOBAL (1 << 6)
118#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
119#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
120
121/* HDMI_INTC_FLAG */
122#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
123#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
124
125/* HDMI_PHY_RSTOUT */
126#define HDMI_PHY_SW_RSTOUT (1 << 0)
127
128/* HDMI_CORE_RSTOUT */
129#define HDMI_CORE_SW_RSTOUT (1 << 0)
130
131/* HDMI_CON_0 */
132#define HDMI_BLUE_SCR_EN (1 << 5)
133#define HDMI_EN (1 << 0)
134
135/* HDMI_PHY_STATUS */
136#define HDMI_PHY_STATUS_READY (1 << 0)
137
138/* HDMI_MODE_SEL */
139#define HDMI_MODE_HDMI_EN (1 << 1)
140#define HDMI_MODE_DVI_EN (1 << 0)
141#define HDMI_MODE_MASK (3 << 0)
142
143/* HDMI_TG_CMD */
144#define HDMI_TG_EN (1 << 0)
145#define HDMI_FIELD_EN (1 << 1)
146
147#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 00000000000..fd2f4d14cf6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,141 @@
1/*
2 *
3 * Cloned from drivers/media/video/s5p-tv/regs-mixer.h
4 *
5 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 *
8 * Mixer register header file for Samsung Mixer driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14#ifndef SAMSUNG_REGS_MIXER_H
15#define SAMSUNG_REGS_MIXER_H
16
17/*
18 * Register part
19 */
20#define MXR_STATUS 0x0000
21#define MXR_CFG 0x0004
22#define MXR_INT_EN 0x0008
23#define MXR_INT_STATUS 0x000C
24#define MXR_LAYER_CFG 0x0010
25#define MXR_VIDEO_CFG 0x0014
26#define MXR_GRAPHIC0_CFG 0x0020
27#define MXR_GRAPHIC0_BASE 0x0024
28#define MXR_GRAPHIC0_SPAN 0x0028
29#define MXR_GRAPHIC0_SXY 0x002C
30#define MXR_GRAPHIC0_WH 0x0030
31#define MXR_GRAPHIC0_DXY 0x0034
32#define MXR_GRAPHIC0_BLANK 0x0038
33#define MXR_GRAPHIC1_CFG 0x0040
34#define MXR_GRAPHIC1_BASE 0x0044
35#define MXR_GRAPHIC1_SPAN 0x0048
36#define MXR_GRAPHIC1_SXY 0x004C
37#define MXR_GRAPHIC1_WH 0x0050
38#define MXR_GRAPHIC1_DXY 0x0054
39#define MXR_GRAPHIC1_BLANK 0x0058
40#define MXR_BG_CFG 0x0060
41#define MXR_BG_COLOR0 0x0064
42#define MXR_BG_COLOR1 0x0068
43#define MXR_BG_COLOR2 0x006C
44#define MXR_CM_COEFF_Y 0x0080
45#define MXR_CM_COEFF_CB 0x0084
46#define MXR_CM_COEFF_CR 0x0088
47#define MXR_GRAPHIC0_BASE_S 0x2024
48#define MXR_GRAPHIC1_BASE_S 0x2044
49
50/* for parametrized access to layer registers */
51#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
52#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
53#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
54#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
55#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
56#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
57#define MXR_GRAPHIC_BLANK(i) (0x0038 + (i) * 0x20)
58#define MXR_GRAPHIC_BASE_S(i) (0x2024 + (i) * 0x20)
59
60/*
61 * Bit definition part
62 */
63
64/* generates mask for range of bits */
65#define MXR_MASK(high_bit, low_bit) \
66 (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
67
68#define MXR_MASK_VAL(val, high_bit, low_bit) \
69 (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
70
71/* bits for MXR_STATUS */
72#define MXR_STATUS_16_BURST (1 << 7)
73#define MXR_STATUS_BURST_MASK (1 << 7)
74#define MXR_STATUS_BIG_ENDIAN (1 << 3)
75#define MXR_STATUS_ENDIAN_MASK (1 << 3)
76#define MXR_STATUS_SYNC_ENABLE (1 << 2)
77#define MXR_STATUS_REG_RUN (1 << 0)
78
79/* bits for MXR_CFG */
80#define MXR_CFG_RGB601_0_255 (0 << 9)
81#define MXR_CFG_RGB601_16_235 (1 << 9)
82#define MXR_CFG_RGB709_0_255 (2 << 9)
83#define MXR_CFG_RGB709_16_235 (3 << 9)
84#define MXR_CFG_RGB_FMT_MASK 0x600
85#define MXR_CFG_OUT_YUV444 (0 << 8)
86#define MXR_CFG_OUT_RGB888 (1 << 8)
87#define MXR_CFG_OUT_MASK (1 << 8)
88#define MXR_CFG_DST_SDO (0 << 7)
89#define MXR_CFG_DST_HDMI (1 << 7)
90#define MXR_CFG_DST_MASK (1 << 7)
91#define MXR_CFG_SCAN_HD_720 (0 << 6)
92#define MXR_CFG_SCAN_HD_1080 (1 << 6)
93#define MXR_CFG_GRP1_ENABLE (1 << 5)
94#define MXR_CFG_GRP0_ENABLE (1 << 4)
95#define MXR_CFG_VP_ENABLE (1 << 3)
96#define MXR_CFG_SCAN_INTERLACE (0 << 2)
97#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
98#define MXR_CFG_SCAN_NTSC (0 << 1)
99#define MXR_CFG_SCAN_PAL (1 << 1)
100#define MXR_CFG_SCAN_SD (0 << 0)
101#define MXR_CFG_SCAN_HD (1 << 0)
102#define MXR_CFG_SCAN_MASK 0x47
103
104/* bits for MXR_GRAPHICn_CFG */
105#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
106#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
107#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
108#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
109#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
110#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
111#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
112
113/* bits for MXR_GRAPHICn_WH */
114#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
115#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
116#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
117#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
118
119/* bits for MXR_GRAPHICn_SXY */
120#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
121#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
122
123/* bits for MXR_GRAPHICn_DXY */
124#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
125#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
126
127/* bits for MXR_INT_EN */
128#define MXR_INT_EN_VSYNC (1 << 11)
129#define MXR_INT_EN_ALL (0x0f << 8)
130
131/* bit for MXR_INT_STATUS */
132#define MXR_INT_CLEAR_VSYNC (1 << 11)
133#define MXR_INT_STATUS_VSYNC (1 << 0)
134
135/* bit for MXR_LAYER_CFG */
136#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
137#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
138#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
139
140#endif /* SAMSUNG_REGS_MIXER_H */
141
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 00000000000..10b737af0a7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
1/*
2 *
3 * Cloned from drivers/media/video/s5p-tv/regs-vp.h
4 *
5 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 *
8 * Video processor register header file for Samsung Mixer driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef SAMSUNG_REGS_VP_H
16#define SAMSUNG_REGS_VP_H
17
18/*
19 * Register part
20 */
21
22#define VP_ENABLE 0x0000
23#define VP_SRESET 0x0004
24#define VP_SHADOW_UPDATE 0x0008
25#define VP_FIELD_ID 0x000C
26#define VP_MODE 0x0010
27#define VP_IMG_SIZE_Y 0x0014
28#define VP_IMG_SIZE_C 0x0018
29#define VP_PER_RATE_CTRL 0x001C
30#define VP_TOP_Y_PTR 0x0028
31#define VP_BOT_Y_PTR 0x002C
32#define VP_TOP_C_PTR 0x0030
33#define VP_BOT_C_PTR 0x0034
34#define VP_ENDIAN_MODE 0x03CC
35#define VP_SRC_H_POSITION 0x0044
36#define VP_SRC_V_POSITION 0x0048
37#define VP_SRC_WIDTH 0x004C
38#define VP_SRC_HEIGHT 0x0050
39#define VP_DST_H_POSITION 0x0054
40#define VP_DST_V_POSITION 0x0058
41#define VP_DST_WIDTH 0x005C
42#define VP_DST_HEIGHT 0x0060
43#define VP_H_RATIO 0x0064
44#define VP_V_RATIO 0x0068
45#define VP_POLY8_Y0_LL 0x006C
46#define VP_POLY4_Y0_LL 0x00EC
47#define VP_POLY4_C0_LL 0x012C
48
49/*
50 * Bit definition part
51 */
52
53/* generates mask for range of bits */
54
55#define VP_MASK(high_bit, low_bit) \
56 (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
57
58#define VP_MASK_VAL(val, high_bit, low_bit) \
59 (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
60
61 /* VP_ENABLE */
62#define VP_ENABLE_ON (1 << 0)
63
64/* VP_SRESET */
65#define VP_SRESET_PROCESSING (1 << 0)
66
67/* VP_SHADOW_UPDATE */
68#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
69
70/* VP_MODE */
71#define VP_MODE_NV12 (0 << 6)
72#define VP_MODE_NV21 (1 << 6)
73#define VP_MODE_LINE_SKIP (1 << 5)
74#define VP_MODE_MEM_LINEAR (0 << 4)
75#define VP_MODE_MEM_TILED (1 << 4)
76#define VP_MODE_FMT_MASK (5 << 4)
77#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
78#define VP_MODE_2D_IPC (1 << 1)
79
80/* VP_IMG_SIZE_Y */
81/* VP_IMG_SIZE_C */
82#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
83#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
84
85/* VP_SRC_H_POSITION */
86#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
87
88/* VP_ENDIAN_MODE */
89#define VP_ENDIAN_MODE_LITTLE (1 << 0)
90
91#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644
index 00000000000..754e14bdc80
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -0,0 +1,27 @@
1config DRM_GMA500
2 tristate "Intel GMA5/600 KMS Framebuffer"
3 depends on DRM && PCI && X86 && EXPERIMENTAL
4 select FB_CFB_COPYAREA
5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 Say yes for an experimental 2D KMS framebuffer driver for the
11 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
12 devices.
13
14config DRM_GMA600
15 bool "Intel GMA600 support (Experimental)"
16 depends on DRM_GMA500
17 help
18 Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
19 platforms with LVDS ports. HDMI and MIPI are not currently
20 supported.
21
22config DRM_GMA3600
23 bool "Intel GMA3600/3650 support (Experimental)"
24 depends on DRM_GMA500
25 help
26 Say yes to include basic support for Intel GMA3600/3650 (Intel
27 Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 00000000000..81c103be5e2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,40 @@
1#
2# KMS driver for the GMA500
3#
4ccflags-y += -Iinclude/drm
5
6gma500_gfx-y += gem_glue.o \
7 accel_2d.o \
8 backlight.o \
9 framebuffer.o \
10 gem.o \
11 gtt.o \
12 intel_bios.o \
13 intel_i2c.o \
14 intel_gmbus.o \
15 intel_opregion.o \
16 mmu.o \
17 power.o \
18 psb_drv.o \
19 psb_intel_display.o \
20 psb_intel_lvds.o \
21 psb_intel_modes.o \
22 psb_intel_sdvo.o \
23 psb_lid.o \
24 psb_irq.o \
25 psb_device.o \
26 mid_bios.o
27
28gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
29 cdv_intel_crt.o \
30 cdv_intel_display.o \
31 cdv_intel_hdmi.o \
32 cdv_intel_lvds.o
33
34gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
35 oaktrail_crtc.o \
36 oaktrail_lvds.o \
37 oaktrail_hdmi.o \
38 oaktrail_hdmi_i2c.o
39
40obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644
index 00000000000..d5ef1a5793c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -0,0 +1,364 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/mm.h>
28#include <linux/tty.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/fb.h>
32#include <linux/init.h>
33#include <linux/console.h>
34
35#include <drm/drmP.h>
36#include <drm/drm.h>
37#include <drm/drm_crtc.h>
38
39#include "psb_drv.h"
40#include "psb_reg.h"
41#include "framebuffer.h"
42
43/**
44 * psb_spank - reset the 2D engine
45 * @dev_priv: our PSB DRM device
46 *
47 * Soft reset the graphics engine and then reload the necessary registers.
48 * We use this at initialisation time but it will become relevant for
49 * accelerated X later
50 */
51void psb_spank(struct drm_psb_private *dev_priv)
52{
53 PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
54 _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
55 _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
56 _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
57 PSB_RSGX32(PSB_CR_SOFT_RESET);
58
59 msleep(1);
60
61 PSB_WSGX32(0, PSB_CR_SOFT_RESET);
62 wmb();
63 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
64 PSB_CR_BIF_CTRL);
65 wmb();
66 (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
67
68 msleep(1);
69 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
70 PSB_CR_BIF_CTRL);
71 (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
72 PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
73}
74
75/**
76 * psb2_2d_wait_available - wait for FIFO room
77 * @dev_priv: our DRM device
78 * @size: size (in dwords) of the command we want to issue
79 *
80 * Wait until there is room to load the FIFO with our data. If the
81 * device is not responding then reset it
82 */
83static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
84 unsigned size)
85{
86 uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
87 unsigned long t = jiffies + HZ;
88
89 while (avail < size) {
90 avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
91 if (time_after(jiffies, t)) {
92 psb_spank(dev_priv);
93 return -EIO;
94 }
95 }
96 return 0;
97}
98
99/**
100 * psb_2d_submit - submit a 2D command
101 * @dev_priv: our DRM device
102 * @cmdbuf: command to issue
103 * @size: length (in dwords)
104 *
105 * Issue one or more 2D commands to the accelerator. This needs to be
106 * serialized later when we add the GEM interfaces for acceleration
107 */
108static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
109 unsigned size)
110{
111 int ret = 0;
112 int i;
113 unsigned submit_size;
114 unsigned long flags;
115
116 spin_lock_irqsave(&dev_priv->lock_2d, flags);
117 while (size > 0) {
118 submit_size = (size < 0x60) ? size : 0x60;
119 size -= submit_size;
120 ret = psb_2d_wait_available(dev_priv, submit_size);
121 if (ret)
122 break;
123
124 submit_size <<= 2;
125
126 for (i = 0; i < submit_size; i += 4)
127 PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
128
129 (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
130 }
131 spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
132 return ret;
133}
134
135
136/**
137 * psb_accel_2d_copy_direction - compute blit order
138 * @xdir: X direction of move
139 * @ydir: Y direction of move
140 *
141 * Compute the correct order setings to ensure that an overlapping blit
142 * correctly copies all the pixels.
143 */
144static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
145{
146 if (xdir < 0)
147 return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
148 PSB_2D_COPYORDER_TR2BL;
149 else
150 return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
151 PSB_2D_COPYORDER_TL2BR;
152}
153
154/**
155 * psb_accel_2d_copy - accelerated 2D copy
156 * @dev_priv: our DRM device
157 * @src_offset in bytes
158 * @src_stride in bytes
159 * @src_format psb 2D format defines
160 * @dst_offset in bytes
161 * @dst_stride in bytes
162 * @dst_format psb 2D format defines
163 * @src_x offset in pixels
164 * @src_y offset in pixels
165 * @dst_x offset in pixels
166 * @dst_y offset in pixels
167 * @size_x of the copied area
168 * @size_y of the copied area
169 *
170 * Format and issue a 2D accelerated copy command.
171 */
172static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
173 uint32_t src_offset, uint32_t src_stride,
174 uint32_t src_format, uint32_t dst_offset,
175 uint32_t dst_stride, uint32_t dst_format,
176 uint16_t src_x, uint16_t src_y,
177 uint16_t dst_x, uint16_t dst_y,
178 uint16_t size_x, uint16_t size_y)
179{
180 uint32_t blit_cmd;
181 uint32_t buffer[10];
182 uint32_t *buf;
183 uint32_t direction;
184
185 buf = buffer;
186
187 direction =
188 psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
189
190 if (direction == PSB_2D_COPYORDER_BR2TL ||
191 direction == PSB_2D_COPYORDER_TR2BL) {
192 src_x += size_x - 1;
193 dst_x += size_x - 1;
194 }
195 if (direction == PSB_2D_COPYORDER_BR2TL ||
196 direction == PSB_2D_COPYORDER_BL2TR) {
197 src_y += size_y - 1;
198 dst_y += size_y - 1;
199 }
200
201 blit_cmd =
202 PSB_2D_BLIT_BH |
203 PSB_2D_ROT_NONE |
204 PSB_2D_DSTCK_DISABLE |
205 PSB_2D_SRCCK_DISABLE |
206 PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
207
208 *buf++ = PSB_2D_FENCE_BH;
209 *buf++ =
210 PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
211 PSB_2D_DST_STRIDE_SHIFT);
212 *buf++ = dst_offset;
213 *buf++ =
214 PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
215 PSB_2D_SRC_STRIDE_SHIFT);
216 *buf++ = src_offset;
217 *buf++ =
218 PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
219 (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
220 *buf++ = blit_cmd;
221 *buf++ =
222 (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
223 PSB_2D_DST_YSTART_SHIFT);
224 *buf++ =
225 (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
226 PSB_2D_DST_YSIZE_SHIFT);
227 *buf++ = PSB_2D_FLUSH_BH;
228
229 return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
230}
231
232/**
233 * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
234 * @info: our framebuffer
235 * @a: copyarea parameters from the framebuffer core
236 *
237 * Perform a 2D copy via the accelerator
238 */
239static void psbfb_copyarea_accel(struct fb_info *info,
240 const struct fb_copyarea *a)
241{
242 struct psb_fbdev *fbdev = info->par;
243 struct psb_framebuffer *psbfb = &fbdev->pfb;
244 struct drm_device *dev = psbfb->base.dev;
245 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
246 struct drm_psb_private *dev_priv = dev->dev_private;
247 uint32_t offset;
248 uint32_t stride;
249 uint32_t src_format;
250 uint32_t dst_format;
251
252 if (!fb)
253 return;
254
255 offset = psbfb->gtt->offset;
256 stride = fb->pitches[0];
257
258 switch (fb->depth) {
259 case 8:
260 src_format = PSB_2D_SRC_332RGB;
261 dst_format = PSB_2D_DST_332RGB;
262 break;
263 case 15:
264 src_format = PSB_2D_SRC_555RGB;
265 dst_format = PSB_2D_DST_555RGB;
266 break;
267 case 16:
268 src_format = PSB_2D_SRC_565RGB;
269 dst_format = PSB_2D_DST_565RGB;
270 break;
271 case 24:
272 case 32:
273 /* this is wrong but since we don't do blending its okay */
274 src_format = PSB_2D_SRC_8888ARGB;
275 dst_format = PSB_2D_DST_8888ARGB;
276 break;
277 default:
278 /* software fallback */
279 cfb_copyarea(info, a);
280 return;
281 }
282
283 if (!gma_power_begin(dev, false)) {
284 cfb_copyarea(info, a);
285 return;
286 }
287 psb_accel_2d_copy(dev_priv,
288 offset, stride, src_format,
289 offset, stride, dst_format,
290 a->sx, a->sy, a->dx, a->dy, a->width, a->height);
291 gma_power_end(dev);
292}
293
294/**
295 * psbfb_copyarea - 2D copy interface
296 * @info: our framebuffer
297 * @region: region to copy
298 *
299 * Copy an area of the framebuffer console either by the accelerator
300 * or directly using the cfb helpers according to the request
301 */
302void psbfb_copyarea(struct fb_info *info,
303 const struct fb_copyarea *region)
304{
305 if (unlikely(info->state != FBINFO_STATE_RUNNING))
306 return;
307
308 /* Avoid the 8 pixel erratum */
309 if (region->width == 8 || region->height == 8 ||
310 (info->flags & FBINFO_HWACCEL_DISABLED))
311 return cfb_copyarea(info, region);
312
313 psbfb_copyarea_accel(info, region);
314}
315
316/**
317 * psbfb_sync - synchronize 2D
318 * @info: our framebuffer
319 *
320 * Wait for the 2D engine to quiesce so that we can do CPU
321 * access to the framebuffer again
322 */
323int psbfb_sync(struct fb_info *info)
324{
325 struct psb_fbdev *fbdev = info->par;
326 struct psb_framebuffer *psbfb = &fbdev->pfb;
327 struct drm_device *dev = psbfb->base.dev;
328 struct drm_psb_private *dev_priv = dev->dev_private;
329 unsigned long _end = jiffies + DRM_HZ;
330 int busy = 0;
331 unsigned long flags;
332
333 spin_lock_irqsave(&dev_priv->lock_2d, flags);
334 /*
335 * First idle the 2D engine.
336 */
337
338 if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
339 ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
340 goto out;
341
342 do {
343 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
344 cpu_relax();
345 } while (busy && !time_after_eq(jiffies, _end));
346
347 if (busy)
348 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
349 if (busy)
350 goto out;
351
352 do {
353 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
354 _PSB_C2B_STATUS_BUSY) != 0);
355 cpu_relax();
356 } while (busy && !time_after_eq(jiffies, _end));
357 if (busy)
358 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
359 _PSB_C2B_STATUS_BUSY) != 0);
360
361out:
362 spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
363 return (busy) ? -EBUSY : 0;
364}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644
index 00000000000..20793951fca
--- /dev/null
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -0,0 +1,49 @@
1/*
2 * GMA500 Backlight Interface
3 *
4 * Copyright (c) 2009-2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Eric Knopp
20 *
21 */
22
23#include "psb_drv.h"
24#include "psb_intel_reg.h"
25#include "psb_intel_drv.h"
26#include "intel_bios.h"
27#include "power.h"
28
29int gma_backlight_init(struct drm_device *dev)
30{
31#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
32 struct drm_psb_private *dev_priv = dev->dev_private;
33 return dev_priv->ops->backlight_init(dev);
34#else
35 return 0;
36#endif
37}
38
39void gma_backlight_exit(struct drm_device *dev)
40{
41#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
42 struct drm_psb_private *dev_priv = dev->dev_private;
43 if (dev_priv->backlight_device) {
44 dev_priv->backlight_device->props.brightness = 0;
45 backlight_update_status(dev_priv->backlight_device);
46 backlight_device_unregister(dev_priv->backlight_device);
47 }
48#endif
49}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644
index 00000000000..4a5b099c3bc
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -0,0 +1,351 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "gma_drm.h"
24#include "psb_drv.h"
25#include "psb_reg.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28#include "cdv_device.h"
29
30#define VGA_SR_INDEX 0x3c4
31#define VGA_SR_DATA 0x3c5
32
33static void cdv_disable_vga(struct drm_device *dev)
34{
35 u8 sr1;
36 u32 vga_reg;
37
38 vga_reg = VGACNTRL;
39
40 outb(1, VGA_SR_INDEX);
41 sr1 = inb(VGA_SR_DATA);
42 outb(sr1 | 1<<5, VGA_SR_DATA);
43 udelay(300);
44
45 REG_WRITE(vga_reg, VGA_DISP_DISABLE);
46 REG_READ(vga_reg);
47}
48
49static int cdv_output_init(struct drm_device *dev)
50{
51 struct drm_psb_private *dev_priv = dev->dev_private;
52 cdv_disable_vga(dev);
53
54 cdv_intel_crt_init(dev, &dev_priv->mode_dev);
55 cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
56
57 /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
58 the HDMI interface */
59 if (REG_READ(SDVOB) & SDVO_DETECTED)
60 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
61 if (REG_READ(SDVOC) & SDVO_DETECTED)
62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
63 return 0;
64}
65
66#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
67
68/*
69 * Poulsbo Backlight Interfaces
70 */
71
72#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
73#define BLC_PWM_FREQ_CALC_CONSTANT 32
74#define MHz 1000000
75
76#define PSB_BLC_PWM_PRECISION_FACTOR 10
77#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
78#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
79
80#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
81#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
82
83static int cdv_brightness;
84static struct backlight_device *cdv_backlight_device;
85
86static int cdv_get_brightness(struct backlight_device *bd)
87{
88 /* return locally cached var instead of HW read (due to DPST etc.) */
89 /* FIXME: ideally return actual value in case firmware fiddled with
90 it */
91 return cdv_brightness;
92}
93
94
95static int cdv_backlight_setup(struct drm_device *dev)
96{
97 struct drm_psb_private *dev_priv = dev->dev_private;
98 unsigned long core_clock;
99 /* u32 bl_max_freq; */
100 /* unsigned long value; */
101 u16 bl_max_freq;
102 uint32_t value;
103 uint32_t blc_pwm_precision_factor;
104
105 /* get bl_max_freq and pol from dev_priv*/
106 if (!dev_priv->lvds_bl) {
107 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
108 return -ENOENT;
109 }
110 bl_max_freq = dev_priv->lvds_bl->freq;
111 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
112
113 core_clock = dev_priv->core_freq;
114
115 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
116 value *= blc_pwm_precision_factor;
117 value /= bl_max_freq;
118 value /= blc_pwm_precision_factor;
119
120 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
121 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
122 return -ERANGE;
123 else {
124 /* FIXME */
125 }
126 return 0;
127}
128
129static int cdv_set_brightness(struct backlight_device *bd)
130{
131 int level = bd->props.brightness;
132
133 /* Percentage 1-100% being valid */
134 if (level < 1)
135 level = 1;
136
137 /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
138 cdv_brightness = level;
139 return 0;
140}
141
142static const struct backlight_ops cdv_ops = {
143 .get_brightness = cdv_get_brightness,
144 .update_status = cdv_set_brightness,
145};
146
147static int cdv_backlight_init(struct drm_device *dev)
148{
149 struct drm_psb_private *dev_priv = dev->dev_private;
150 int ret;
151 struct backlight_properties props;
152
153 memset(&props, 0, sizeof(struct backlight_properties));
154 props.max_brightness = 100;
155 props.type = BACKLIGHT_PLATFORM;
156
157 cdv_backlight_device = backlight_device_register("psb-bl",
158 NULL, (void *)dev, &cdv_ops, &props);
159 if (IS_ERR(cdv_backlight_device))
160 return PTR_ERR(cdv_backlight_device);
161
162 ret = cdv_backlight_setup(dev);
163 if (ret < 0) {
164 backlight_device_unregister(cdv_backlight_device);
165 cdv_backlight_device = NULL;
166 return ret;
167 }
168 cdv_backlight_device->props.brightness = 100;
169 cdv_backlight_device->props.max_brightness = 100;
170 backlight_update_status(cdv_backlight_device);
171 dev_priv->backlight_device = cdv_backlight_device;
172 return 0;
173}
174
175#endif
176
177/*
178 * Provide the Cedarview specific chip logic and low level methods
179 * for power management
180 *
181 * FIXME: we need to implement the apm/ospm base management bits
182 * for this and the MID devices.
183 */
184
185static inline u32 CDV_MSG_READ32(uint port, uint offset)
186{
187 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
188 uint32_t ret_val = 0;
189 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
190 pci_write_config_dword(pci_root, 0xD0, mcr);
191 pci_read_config_dword(pci_root, 0xD4, &ret_val);
192 pci_dev_put(pci_root);
193 return ret_val;
194}
195
196static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
197{
198 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
199 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
200 pci_write_config_dword(pci_root, 0xD4, value);
201 pci_write_config_dword(pci_root, 0xD0, mcr);
202 pci_dev_put(pci_root);
203}
204
205#define PSB_APM_CMD 0x0
206#define PSB_APM_STS 0x04
207#define PSB_PM_SSC 0x20
208#define PSB_PM_SSS 0x30
209#define PSB_PWRGT_GFX_MASK 0x3
210#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
211#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
212
213static void cdv_init_pm(struct drm_device *dev)
214{
215 struct drm_psb_private *dev_priv = dev->dev_private;
216 u32 pwr_cnt;
217 int i;
218
219 dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
220 PSB_APMBA) & 0xFFFF;
221 dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
222 PSB_OSPMBA) & 0xFFFF;
223
224 /* Force power on for now */
225 pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
226 pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
227
228 outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
229 for (i = 0; i < 5; i++) {
230 u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
231 if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
232 break;
233 udelay(10);
234 }
235 pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
236 pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
237 outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
238 for (i = 0; i < 5; i++) {
239 u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
240 if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
241 break;
242 udelay(10);
243 }
244}
245
246/**
247 * cdv_save_display_registers - save registers lost on suspend
248 * @dev: our DRM device
249 *
250 * Save the state we need in order to be able to restore the interface
251 * upon resume from suspend
252 *
253 * FIXME: review
254 */
255static int cdv_save_display_registers(struct drm_device *dev)
256{
257 return 0;
258}
259
260/**
261 * cdv_restore_display_registers - restore lost register state
262 * @dev: our DRM device
263 *
264 * Restore register state that was lost during suspend and resume.
265 *
266 * FIXME: review
267 */
268static int cdv_restore_display_registers(struct drm_device *dev)
269{
270 return 0;
271}
272
273static int cdv_power_down(struct drm_device *dev)
274{
275 return 0;
276}
277
278static int cdv_power_up(struct drm_device *dev)
279{
280 return 0;
281}
282
283/* FIXME ? - shared with Poulsbo */
284static void cdv_get_core_freq(struct drm_device *dev)
285{
286 uint32_t clock;
287 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
288 struct drm_psb_private *dev_priv = dev->dev_private;
289
290 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
291 pci_read_config_dword(pci_root, 0xD4, &clock);
292 pci_dev_put(pci_root);
293
294 switch (clock & 0x07) {
295 case 0:
296 dev_priv->core_freq = 100;
297 break;
298 case 1:
299 dev_priv->core_freq = 133;
300 break;
301 case 2:
302 dev_priv->core_freq = 150;
303 break;
304 case 3:
305 dev_priv->core_freq = 178;
306 break;
307 case 4:
308 dev_priv->core_freq = 200;
309 break;
310 case 5:
311 case 6:
312 case 7:
313 dev_priv->core_freq = 266;
314 default:
315 dev_priv->core_freq = 0;
316 }
317}
318
319static int cdv_chip_setup(struct drm_device *dev)
320{
321 cdv_get_core_freq(dev);
322 gma_intel_opregion_init(dev);
323 psb_intel_init_bios(dev);
324 return 0;
325}
326
327/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
328
329const struct psb_ops cdv_chip_ops = {
330 .name = "GMA3600/3650",
331 .accel_2d = 0,
332 .pipes = 2,
333 .crtcs = 2,
334 .sgx_offset = MRST_SGX_OFFSET,
335 .chip_setup = cdv_chip_setup,
336
337 .crtc_helper = &cdv_intel_helper_funcs,
338 .crtc_funcs = &cdv_intel_crtc_funcs,
339
340 .output_init = cdv_output_init,
341
342#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
343 .backlight_init = cdv_backlight_init,
344#endif
345
346 .init_pm = cdv_init_pm,
347 .save_regs = cdv_save_display_registers,
348 .restore_regs = cdv_restore_display_registers,
349 .power_down = cdv_power_down,
350 .power_up = cdv_power_up,
351};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644
index 00000000000..2a88b7beb55
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
20extern void cdv_intel_crt_init(struct drm_device *dev,
21 struct psb_intel_mode_device *mode_dev);
22extern void cdv_intel_lvds_init(struct drm_device *dev,
23 struct psb_intel_mode_device *mode_dev);
24extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
25 int reg);
26extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
27 struct drm_crtc *crtc);
28
29extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
30{
31 /* Wait for 20ms, i.e. one cycle at 50hz. */
32 /* FIXME: msleep ?? */
33 mdelay(20);
34}
35
36
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644
index 00000000000..6d0f10b7569
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -0,0 +1,333 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
28#include <drm/drmP.h>
29
30#include "intel_bios.h"
31#include "psb_drv.h"
32#include "psb_intel_drv.h"
33#include "psb_intel_reg.h"
34#include "power.h"
35#include <linux/pm_runtime.h>
36
37
38static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
39{
40 struct drm_device *dev = encoder->dev;
41 u32 temp, reg;
42 reg = ADPA;
43
44 temp = REG_READ(reg);
45 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
46 temp &= ~ADPA_DAC_ENABLE;
47
48 switch (mode) {
49 case DRM_MODE_DPMS_ON:
50 temp |= ADPA_DAC_ENABLE;
51 break;
52 case DRM_MODE_DPMS_STANDBY:
53 temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
54 break;
55 case DRM_MODE_DPMS_SUSPEND:
56 temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
57 break;
58 case DRM_MODE_DPMS_OFF:
59 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
60 break;
61 }
62
63 REG_WRITE(reg, temp);
64}
65
66static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
67 struct drm_display_mode *mode)
68{
69 int max_clock = 0;
70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
71 return MODE_NO_DBLESCAN;
72
73 /* The lowest clock for CDV is 20000KHz */
74 if (mode->clock < 20000)
75 return MODE_CLOCK_LOW;
76
77 /* The max clock for CDV is 355 instead of 400 */
78 max_clock = 355000;
79 if (mode->clock > max_clock)
80 return MODE_CLOCK_HIGH;
81
82 if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
83 return MODE_PANEL;
84
85 return MODE_OK;
86}
87
88static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
89 struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode)
91{
92 return true;
93}
94
95static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
96 struct drm_display_mode *mode,
97 struct drm_display_mode *adjusted_mode)
98{
99
100 struct drm_device *dev = encoder->dev;
101 struct drm_crtc *crtc = encoder->crtc;
102 struct psb_intel_crtc *psb_intel_crtc =
103 to_psb_intel_crtc(crtc);
104 int dpll_md_reg;
105 u32 adpa, dpll_md;
106 u32 adpa_reg;
107
108 if (psb_intel_crtc->pipe == 0)
109 dpll_md_reg = DPLL_A_MD;
110 else
111 dpll_md_reg = DPLL_B_MD;
112
113 adpa_reg = ADPA;
114
115 /*
116 * Disable separate mode multiplier used when cloning SDVO to CRT
117 * XXX this needs to be adjusted when we really are cloning
118 */
119 {
120 dpll_md = REG_READ(dpll_md_reg);
121 REG_WRITE(dpll_md_reg,
122 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
123 }
124
125 adpa = 0;
126 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
127 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
128 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
129 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
130
131 if (psb_intel_crtc->pipe == 0)
132 adpa |= ADPA_PIPE_A_SELECT;
133 else
134 adpa |= ADPA_PIPE_B_SELECT;
135
136 REG_WRITE(adpa_reg, adpa);
137}
138
139
140/**
141 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
142 *
143 * \return true if CRT is connected.
144 * \return false if CRT is disconnected.
145 */
146static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
147 bool force)
148{
149 struct drm_device *dev = connector->dev;
150 u32 hotplug_en;
151 int i, tries = 0, ret = false;
152 u32 adpa_orig;
153
154 /* disable the DAC when doing the hotplug detection */
155
156 adpa_orig = REG_READ(ADPA);
157
158 REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
159
160 /*
161 * On a CDV thep, CRT detect sequence need to be done twice
162 * to get a reliable result.
163 */
164 tries = 2;
165
166 hotplug_en = REG_READ(PORT_HOTPLUG_EN);
167 hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
168 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
169
170 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
171 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
172
173 for (i = 0; i < tries ; i++) {
174 unsigned long timeout;
175 /* turn on the FORCE_DETECT */
176 REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
177 timeout = jiffies + msecs_to_jiffies(1000);
178 /* wait for FORCE_DETECT to go off */
179 do {
180 if (!(REG_READ(PORT_HOTPLUG_EN) &
181 CRT_HOTPLUG_FORCE_DETECT))
182 break;
183 msleep(1);
184 } while (time_after(timeout, jiffies));
185 }
186
187 if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
188 CRT_HOTPLUG_MONITOR_NONE)
189 ret = true;
190
191 /* Restore the saved ADPA */
192 REG_WRITE(ADPA, adpa_orig);
193 return ret;
194}
195
196static enum drm_connector_status cdv_intel_crt_detect(
197 struct drm_connector *connector, bool force)
198{
199 if (cdv_intel_crt_detect_hotplug(connector, force))
200 return connector_status_connected;
201 else
202 return connector_status_disconnected;
203}
204
205static void cdv_intel_crt_destroy(struct drm_connector *connector)
206{
207 struct psb_intel_encoder *psb_intel_encoder =
208 psb_intel_attached_encoder(connector);
209
210 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
211 drm_sysfs_connector_remove(connector);
212 drm_connector_cleanup(connector);
213 kfree(connector);
214}
215
216static int cdv_intel_crt_get_modes(struct drm_connector *connector)
217{
218 struct psb_intel_encoder *psb_intel_encoder =
219 psb_intel_attached_encoder(connector);
220 return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
221}
222
223static int cdv_intel_crt_set_property(struct drm_connector *connector,
224 struct drm_property *property,
225 uint64_t value)
226{
227 return 0;
228}
229
230/*
231 * Routines for controlling stuff on the analog port
232 */
233
234static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
235 .dpms = cdv_intel_crt_dpms,
236 .mode_fixup = cdv_intel_crt_mode_fixup,
237 .prepare = psb_intel_encoder_prepare,
238 .commit = psb_intel_encoder_commit,
239 .mode_set = cdv_intel_crt_mode_set,
240};
241
242static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
243 .dpms = drm_helper_connector_dpms,
244 .detect = cdv_intel_crt_detect,
245 .fill_modes = drm_helper_probe_single_connector_modes,
246 .destroy = cdv_intel_crt_destroy,
247 .set_property = cdv_intel_crt_set_property,
248};
249
250static const struct drm_connector_helper_funcs
251 cdv_intel_crt_connector_helper_funcs = {
252 .mode_valid = cdv_intel_crt_mode_valid,
253 .get_modes = cdv_intel_crt_get_modes,
254 .best_encoder = psb_intel_best_encoder,
255};
256
257static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
258{
259 drm_encoder_cleanup(encoder);
260}
261
262static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
263 .destroy = cdv_intel_crt_enc_destroy,
264};
265
266void cdv_intel_crt_init(struct drm_device *dev,
267 struct psb_intel_mode_device *mode_dev)
268{
269
270 struct psb_intel_connector *psb_intel_connector;
271 struct psb_intel_encoder *psb_intel_encoder;
272 struct drm_connector *connector;
273 struct drm_encoder *encoder;
274
275 u32 i2c_reg;
276
277 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
278 if (!psb_intel_encoder)
279 return;
280
281 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
282 if (!psb_intel_connector)
283 goto failed_connector;
284
285 connector = &psb_intel_connector->base;
286 drm_connector_init(dev, connector,
287 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
288
289 encoder = &psb_intel_encoder->base;
290 drm_encoder_init(dev, encoder,
291 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
292
293 psb_intel_connector_attach_encoder(psb_intel_connector,
294 psb_intel_encoder);
295
296 /* Set up the DDC bus. */
297 i2c_reg = GPIOA;
298 /* Remove the following code for CDV */
299 /*
300 if (dev_priv->crt_ddc_bus != 0)
301 i2c_reg = dev_priv->crt_ddc_bus;
302 }*/
303 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
304 i2c_reg, "CRTDDC_A");
305 if (!psb_intel_encoder->ddc_bus) {
306 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
307 "failed.\n");
308 goto failed_ddc;
309 }
310
311 psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
312 /*
313 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
314 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
315 */
316 connector->interlace_allowed = 0;
317 connector->doublescan_allowed = 0;
318
319 drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
320 drm_connector_helper_add(connector,
321 &cdv_intel_crt_connector_helper_funcs);
322
323 drm_sysfs_connector_add(connector);
324
325 return;
326failed_ddc:
327 drm_encoder_cleanup(&psb_intel_encoder->base);
328 drm_connector_cleanup(&psb_intel_connector->base);
329 kfree(psb_intel_connector);
330failed_connector:
331 kfree(psb_intel_encoder);
332 return;
333}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644
index 00000000000..18d11525095
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -0,0 +1,1508 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23
24#include <drm/drmP.h>
25#include "framebuffer.h"
26#include "psb_drv.h"
27#include "psb_intel_drv.h"
28#include "psb_intel_reg.h"
29#include "psb_intel_display.h"
30#include "power.h"
31#include "cdv_device.h"
32
33
34struct cdv_intel_range_t {
35 int min, max;
36};
37
38struct cdv_intel_p2_t {
39 int dot_limit;
40 int p2_slow, p2_fast;
41};
42
43struct cdv_intel_clock_t {
44 /* given values */
45 int n;
46 int m1, m2;
47 int p1, p2;
48 /* derived values */
49 int dot;
50 int vco;
51 int m;
52 int p;
53};
54
55#define INTEL_P2_NUM 2
56
57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2;
60};
61
62#define CDV_LIMIT_SINGLE_LVDS_96 0
63#define CDV_LIMIT_SINGLE_LVDS_100 1
64#define CDV_LIMIT_DAC_HDMI_27 2
65#define CDV_LIMIT_DAC_HDMI_96 3
66
67static const struct cdv_intel_limit_t cdv_intel_limits[] = {
68 { /* CDV_SIGNLE_LVDS_96MHz */
69 .dot = {.min = 20000, .max = 115500},
70 .vco = {.min = 1800000, .max = 3600000},
71 .n = {.min = 2, .max = 6},
72 .m = {.min = 60, .max = 160},
73 .m1 = {.min = 0, .max = 0},
74 .m2 = {.min = 58, .max = 158},
75 .p = {.min = 28, .max = 140},
76 .p1 = {.min = 2, .max = 10},
77 .p2 = {.dot_limit = 200000,
78 .p2_slow = 14, .p2_fast = 14},
79 },
80 { /* CDV_SINGLE_LVDS_100MHz */
81 .dot = {.min = 20000, .max = 115500},
82 .vco = {.min = 1800000, .max = 3600000},
83 .n = {.min = 2, .max = 6},
84 .m = {.min = 60, .max = 160},
85 .m1 = {.min = 0, .max = 0},
86 .m2 = {.min = 58, .max = 158},
87 .p = {.min = 28, .max = 140},
88 .p1 = {.min = 2, .max = 10},
89 /* The single-channel range is 25-112Mhz, and dual-channel
90 * is 80-224Mhz. Prefer single channel as much as possible.
91 */
92 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
93 },
94 { /* CDV_DAC_HDMI_27MHz */
95 .dot = {.min = 20000, .max = 400000},
96 .vco = {.min = 1809000, .max = 3564000},
97 .n = {.min = 1, .max = 1},
98 .m = {.min = 67, .max = 132},
99 .m1 = {.min = 0, .max = 0},
100 .m2 = {.min = 65, .max = 130},
101 .p = {.min = 5, .max = 90},
102 .p1 = {.min = 1, .max = 9},
103 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
104 },
105 { /* CDV_DAC_HDMI_96MHz */
106 .dot = {.min = 20000, .max = 400000},
107 .vco = {.min = 1800000, .max = 3600000},
108 .n = {.min = 2, .max = 6},
109 .m = {.min = 60, .max = 160},
110 .m1 = {.min = 0, .max = 0},
111 .m2 = {.min = 58, .max = 158},
112 .p = {.min = 5, .max = 100},
113 .p1 = {.min = 1, .max = 10},
114 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
115 },
116};
117
118#define _wait_for(COND, MS, W) ({ \
119 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
120 int ret__ = 0; \
121 while (!(COND)) { \
122 if (time_after(jiffies, timeout__)) { \
123 ret__ = -ETIMEDOUT; \
124 break; \
125 } \
126 if (W && !in_dbg_master()) \
127 msleep(W); \
128 } \
129 ret__; \
130})
131
132#define wait_for(COND, MS) _wait_for(COND, MS, 1)
133
134
135static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
136{
137 int ret;
138
139 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
140 if (ret) {
141 DRM_ERROR("timeout waiting for SB to idle before read\n");
142 return ret;
143 }
144
145 REG_WRITE(SB_ADDR, reg);
146 REG_WRITE(SB_PCKT,
147 SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
148 SET_FIELD(SB_DEST_DPLL, SB_DEST) |
149 SET_FIELD(0xf, SB_BYTE_ENABLE));
150
151 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
152 if (ret) {
153 DRM_ERROR("timeout waiting for SB to idle after read\n");
154 return ret;
155 }
156
157 *val = REG_READ(SB_DATA);
158
159 return 0;
160}
161
162static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
163{
164 int ret;
165 static bool dpio_debug = true;
166 u32 temp;
167
168 if (dpio_debug) {
169 if (cdv_sb_read(dev, reg, &temp) == 0)
170 DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
171 DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
172 }
173
174 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
175 if (ret) {
176 DRM_ERROR("timeout waiting for SB to idle before write\n");
177 return ret;
178 }
179
180 REG_WRITE(SB_ADDR, reg);
181 REG_WRITE(SB_DATA, val);
182 REG_WRITE(SB_PCKT,
183 SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
184 SET_FIELD(SB_DEST_DPLL, SB_DEST) |
185 SET_FIELD(0xf, SB_BYTE_ENABLE));
186
187 ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
188 if (ret) {
189 DRM_ERROR("timeout waiting for SB to idle after write\n");
190 return ret;
191 }
192
193 if (dpio_debug) {
194 if (cdv_sb_read(dev, reg, &temp) == 0)
195 DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
196 }
197
198 return 0;
199}
200
201/* Reset the DPIO configuration register. The BIOS does this at every
202 * mode set.
203 */
204static void cdv_sb_reset(struct drm_device *dev)
205{
206
207 REG_WRITE(DPIO_CFG, 0);
208 REG_READ(DPIO_CFG);
209 REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
210}
211
212/* Unlike most Intel display engines, on Cedarview the DPLL registers
213 * are behind this sideband bus. They must be programmed while the
214 * DPLL reference clock is on in the DPLL control register, but before
215 * the DPLL is enabled in the DPLL control register.
216 */
217static int
218cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
219 struct cdv_intel_clock_t *clock)
220{
221 struct psb_intel_crtc *psb_crtc =
222 to_psb_intel_crtc(crtc);
223 int pipe = psb_crtc->pipe;
224 u32 m, n_vco, p;
225 int ret = 0;
226 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
227 u32 ref_value;
228
229 cdv_sb_reset(dev);
230
231 if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
232 DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
233 return -EBUSY;
234 }
235
236 /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
237 ref_value = 0x68A701;
238
239 cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
240
241 /* We don't know what the other fields of these regs are, so
242 * leave them in place.
243 */
244 ret = cdv_sb_read(dev, SB_M(pipe), &m);
245 if (ret)
246 return ret;
247 m &= ~SB_M_DIVIDER_MASK;
248 m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
249 ret = cdv_sb_write(dev, SB_M(pipe), m);
250 if (ret)
251 return ret;
252
253 ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
254 if (ret)
255 return ret;
256
257 /* Follow the BIOS to program the N_DIVIDER REG */
258 n_vco &= 0xFFFF;
259 n_vco |= 0x107;
260 n_vco &= ~(SB_N_VCO_SEL_MASK |
261 SB_N_DIVIDER_MASK |
262 SB_N_CB_TUNE_MASK);
263
264 n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
265
266 if (clock->vco < 2250000) {
267 n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
268 n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
269 } else if (clock->vco < 2750000) {
270 n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
271 n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
272 } else if (clock->vco < 3300000) {
273 n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
274 n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
275 } else {
276 n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
277 n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
278 }
279
280 ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
281 if (ret)
282 return ret;
283
284 ret = cdv_sb_read(dev, SB_P(pipe), &p);
285 if (ret)
286 return ret;
287 p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
288 p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
289 switch (clock->p2) {
290 case 5:
291 p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
292 break;
293 case 10:
294 p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
295 break;
296 case 14:
297 p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
298 break;
299 case 7:
300 p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
301 break;
302 default:
303 DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
304 return -EINVAL;
305 }
306 ret = cdv_sb_write(dev, SB_P(pipe), p);
307 if (ret)
308 return ret;
309
310 /* always Program the Lane Register for the Pipe A*/
311 if (pipe == 0) {
312 /* Program the Lane0/1 for HDMI B */
313 u32 lane_reg, lane_value;
314
315 lane_reg = PSB_LANE0;
316 cdv_sb_read(dev, lane_reg, &lane_value);
317 lane_value &= ~(LANE_PLL_MASK);
318 lane_value |= LANE_PLL_ENABLE;
319 cdv_sb_write(dev, lane_reg, lane_value);
320
321 lane_reg = PSB_LANE1;
322 cdv_sb_read(dev, lane_reg, &lane_value);
323 lane_value &= ~(LANE_PLL_MASK);
324 lane_value |= LANE_PLL_ENABLE;
325 cdv_sb_write(dev, lane_reg, lane_value);
326
327 /* Program the Lane2/3 for HDMI C */
328 lane_reg = PSB_LANE2;
329 cdv_sb_read(dev, lane_reg, &lane_value);
330 lane_value &= ~(LANE_PLL_MASK);
331 lane_value |= LANE_PLL_ENABLE;
332 cdv_sb_write(dev, lane_reg, lane_value);
333
334 lane_reg = PSB_LANE3;
335 cdv_sb_read(dev, lane_reg, &lane_value);
336 lane_value &= ~(LANE_PLL_MASK);
337 lane_value |= LANE_PLL_ENABLE;
338 cdv_sb_write(dev, lane_reg, lane_value);
339 }
340
341 return 0;
342}
343
344/*
345 * Returns whether any encoder on the specified pipe is of the specified type
346 */
347bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
348{
349 struct drm_device *dev = crtc->dev;
350 struct drm_mode_config *mode_config = &dev->mode_config;
351 struct drm_connector *l_entry;
352
353 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
354 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
355 struct psb_intel_encoder *psb_intel_encoder =
356 psb_intel_attached_encoder(l_entry);
357 if (psb_intel_encoder->type == type)
358 return true;
359 }
360 }
361 return false;
362}
363
364static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
365 int refclk)
366{
367 const struct cdv_intel_limit_t *limit;
368 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
369 /*
370 * Now only single-channel LVDS is supported on CDV. If it is
371 * incorrect, please add the dual-channel LVDS.
372 */
373 if (refclk == 96000)
374 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
375 else
376 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
377 } else {
378 if (refclk == 27000)
379 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
380 else
381 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
382 }
383 return limit;
384}
385
386/* m1 is reserved as 0 in CDV, n is a ring counter */
387static void cdv_intel_clock(struct drm_device *dev,
388 int refclk, struct cdv_intel_clock_t *clock)
389{
390 clock->m = clock->m2 + 2;
391 clock->p = clock->p1 * clock->p2;
392 clock->vco = (refclk * clock->m) / clock->n;
393 clock->dot = clock->vco / clock->p;
394}
395
396
397#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
398static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
399 const struct cdv_intel_limit_t *limit,
400 struct cdv_intel_clock_t *clock)
401{
402 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
403 INTELPllInvalid("p1 out of range\n");
404 if (clock->p < limit->p.min || limit->p.max < clock->p)
405 INTELPllInvalid("p out of range\n");
406 /* unnecessary to check the range of m(m1/M2)/n again */
407 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
408 INTELPllInvalid("vco out of range\n");
409 /* XXX: We may need to be checking "Dot clock"
410 * depending on the multiplier, connector, etc.,
411 * rather than just a single range.
412 */
413 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
414 INTELPllInvalid("dot out of range\n");
415
416 return true;
417}
418
419static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
420 int refclk,
421 struct cdv_intel_clock_t *best_clock)
422{
423 struct drm_device *dev = crtc->dev;
424 struct cdv_intel_clock_t clock;
425 const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
426 int err = target;
427
428
429 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
430 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
431 /*
432 * For LVDS, if the panel is on, just rely on its current
433 * settings for dual-channel. We haven't figured out how to
434 * reliably set up different single/dual channel state, if we
435 * even can.
436 */
437 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
438 LVDS_CLKB_POWER_UP)
439 clock.p2 = limit->p2.p2_fast;
440 else
441 clock.p2 = limit->p2.p2_slow;
442 } else {
443 if (target < limit->p2.dot_limit)
444 clock.p2 = limit->p2.p2_slow;
445 else
446 clock.p2 = limit->p2.p2_fast;
447 }
448
449 memset(best_clock, 0, sizeof(*best_clock));
450 clock.m1 = 0;
451 /* m1 is reserved as 0 in CDV, n is a ring counter.
452 So skip the m1 loop */
453 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
454 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
455 clock.m2++) {
456 for (clock.p1 = limit->p1.min;
457 clock.p1 <= limit->p1.max;
458 clock.p1++) {
459 int this_err;
460
461 cdv_intel_clock(dev, refclk, &clock);
462
463 if (!cdv_intel_PLL_is_valid(crtc,
464 limit, &clock))
465 continue;
466
467 this_err = abs(clock.dot - target);
468 if (this_err < err) {
469 *best_clock = clock;
470 err = this_err;
471 }
472 }
473 }
474 }
475
476 return err != target;
477}
478
479int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
480 int x, int y, struct drm_framebuffer *old_fb)
481{
482 struct drm_device *dev = crtc->dev;
483 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
484 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
485 int pipe = psb_intel_crtc->pipe;
486 unsigned long start, offset;
487 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
488 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
489 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
490 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
491 u32 dspcntr;
492 int ret = 0;
493
494 if (!gma_power_begin(dev, true))
495 return 0;
496
497 /* no fb bound */
498 if (!crtc->fb) {
499 dev_err(dev->dev, "No FB bound\n");
500 goto psb_intel_pipe_cleaner;
501 }
502
503
504 /* We are displaying this buffer, make sure it is actually loaded
505 into the GTT */
506 ret = psb_gtt_pin(psbfb->gtt);
507 if (ret < 0)
508 goto psb_intel_pipe_set_base_exit;
509 start = psbfb->gtt->offset;
510 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
511
512 REG_WRITE(dspstride, crtc->fb->pitches[0]);
513
514 dspcntr = REG_READ(dspcntr_reg);
515 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
516
517 switch (crtc->fb->bits_per_pixel) {
518 case 8:
519 dspcntr |= DISPPLANE_8BPP;
520 break;
521 case 16:
522 if (crtc->fb->depth == 15)
523 dspcntr |= DISPPLANE_15_16BPP;
524 else
525 dspcntr |= DISPPLANE_16BPP;
526 break;
527 case 24:
528 case 32:
529 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
530 break;
531 default:
532 dev_err(dev->dev, "Unknown color depth\n");
533 ret = -EINVAL;
534 goto psb_intel_pipe_set_base_exit;
535 }
536 REG_WRITE(dspcntr_reg, dspcntr);
537
538 dev_dbg(dev->dev,
539 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
540
541 REG_WRITE(dspbase, offset);
542 REG_READ(dspbase);
543 REG_WRITE(dspsurf, start);
544 REG_READ(dspsurf);
545
546psb_intel_pipe_cleaner:
547 /* If there was a previous display we can now unpin it */
548 if (old_fb)
549 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
550
551psb_intel_pipe_set_base_exit:
552 gma_power_end(dev);
553 return ret;
554}
555
556/**
557 * Sets the power management mode of the pipe and plane.
558 *
559 * This code should probably grow support for turning the cursor off and back
560 * on appropriately at the same time as we're turning the pipe off/on.
561 */
562static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
563{
564 struct drm_device *dev = crtc->dev;
565 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
566 int pipe = psb_intel_crtc->pipe;
567 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
568 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
569 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
570 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
571 u32 temp;
572 bool enabled;
573
574 /* XXX: When our outputs are all unaware of DPMS modes other than off
575 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
576 */
577 switch (mode) {
578 case DRM_MODE_DPMS_ON:
579 case DRM_MODE_DPMS_STANDBY:
580 case DRM_MODE_DPMS_SUSPEND:
581 /* Enable the DPLL */
582 temp = REG_READ(dpll_reg);
583 if ((temp & DPLL_VCO_ENABLE) == 0) {
584 REG_WRITE(dpll_reg, temp);
585 REG_READ(dpll_reg);
586 /* Wait for the clocks to stabilize. */
587 udelay(150);
588 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
589 REG_READ(dpll_reg);
590 /* Wait for the clocks to stabilize. */
591 udelay(150);
592 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
593 REG_READ(dpll_reg);
594 /* Wait for the clocks to stabilize. */
595 udelay(150);
596 }
597
598 /* Jim Bish - switch plan and pipe per scott */
599 /* Enable the plane */
600 temp = REG_READ(dspcntr_reg);
601 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
602 REG_WRITE(dspcntr_reg,
603 temp | DISPLAY_PLANE_ENABLE);
604 /* Flush the plane changes */
605 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
606 }
607
608 udelay(150);
609
610 /* Enable the pipe */
611 temp = REG_READ(pipeconf_reg);
612 if ((temp & PIPEACONF_ENABLE) == 0)
613 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
614
615 psb_intel_crtc_load_lut(crtc);
616
617 /* Give the overlay scaler a chance to enable
618 * if it's on this pipe */
619 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
620 break;
621 case DRM_MODE_DPMS_OFF:
622 /* Give the overlay scaler a chance to disable
623 * if it's on this pipe */
624 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
625
626 /* Disable the VGA plane that we never use */
627 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
628
629 /* Jim Bish - changed pipe/plane here as well. */
630
631 /* Wait for vblank for the disable to take effect */
632 cdv_intel_wait_for_vblank(dev);
633
634 /* Next, disable display pipes */
635 temp = REG_READ(pipeconf_reg);
636 if ((temp & PIPEACONF_ENABLE) != 0) {
637 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
638 REG_READ(pipeconf_reg);
639 }
640
641 /* Wait for vblank for the disable to take effect. */
642 cdv_intel_wait_for_vblank(dev);
643
644 udelay(150);
645
646 /* Disable display plane */
647 temp = REG_READ(dspcntr_reg);
648 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
649 REG_WRITE(dspcntr_reg,
650 temp & ~DISPLAY_PLANE_ENABLE);
651 /* Flush the plane changes */
652 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
653 REG_READ(dspbase_reg);
654 }
655
656 temp = REG_READ(dpll_reg);
657 if ((temp & DPLL_VCO_ENABLE) != 0) {
658 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
659 REG_READ(dpll_reg);
660 }
661
662 /* Wait for the clocks to turn off. */
663 udelay(150);
664 break;
665 }
666 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
667 /*Set FIFO Watermarks*/
668 REG_WRITE(DSPARB, 0x3F3E);
669}
670
671static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
672{
673 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
674 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
675}
676
677static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
678{
679 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
680 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
681}
682
683void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
684{
685 struct drm_encoder_helper_funcs *encoder_funcs =
686 encoder->helper_private;
687 /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
688 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
689}
690
691void cdv_intel_encoder_commit(struct drm_encoder *encoder)
692{
693 struct drm_encoder_helper_funcs *encoder_funcs =
694 encoder->helper_private;
695 /* lvds has its own version of commit see cdv_intel_lvds_commit */
696 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
697}
698
699static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
700 struct drm_display_mode *mode,
701 struct drm_display_mode *adjusted_mode)
702{
703 return true;
704}
705
706
707/**
708 * Return the pipe currently connected to the panel fitter,
709 * or -1 if the panel fitter is not present or not in use
710 */
711static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
712{
713 u32 pfit_control;
714
715 pfit_control = REG_READ(PFIT_CONTROL);
716
717 /* See if the panel fitter is in use */
718 if ((pfit_control & PFIT_ENABLE) == 0)
719 return -1;
720 return (pfit_control >> 29) & 0x3;
721}
722
723static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
724 struct drm_display_mode *mode,
725 struct drm_display_mode *adjusted_mode,
726 int x, int y,
727 struct drm_framebuffer *old_fb)
728{
729 struct drm_device *dev = crtc->dev;
730 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
731 int pipe = psb_intel_crtc->pipe;
732 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
733 int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
734 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
735 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
736 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
737 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
738 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
739 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
740 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
741 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
742 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
743 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
744 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
745 int refclk;
746 struct cdv_intel_clock_t clock;
747 u32 dpll = 0, dspcntr, pipeconf;
748 bool ok, is_sdvo = false, is_dvo = false;
749 bool is_crt = false, is_lvds = false, is_tv = false;
750 bool is_hdmi = false;
751 struct drm_mode_config *mode_config = &dev->mode_config;
752 struct drm_connector *connector;
753
754 list_for_each_entry(connector, &mode_config->connector_list, head) {
755 struct psb_intel_encoder *psb_intel_encoder =
756 psb_intel_attached_encoder(connector);
757
758 if (!connector->encoder
759 || connector->encoder->crtc != crtc)
760 continue;
761
762 switch (psb_intel_encoder->type) {
763 case INTEL_OUTPUT_LVDS:
764 is_lvds = true;
765 break;
766 case INTEL_OUTPUT_SDVO:
767 is_sdvo = true;
768 break;
769 case INTEL_OUTPUT_DVO:
770 is_dvo = true;
771 break;
772 case INTEL_OUTPUT_TVOUT:
773 is_tv = true;
774 break;
775 case INTEL_OUTPUT_ANALOG:
776 is_crt = true;
777 break;
778 case INTEL_OUTPUT_HDMI:
779 is_hdmi = true;
780 break;
781 }
782 }
783
784 refclk = 96000;
785
786 /* Hack selection about ref clk for CRT */
787 /* Select 27MHz as the reference clk for HDMI */
788 if (is_crt || is_hdmi)
789 refclk = 27000;
790
791 drm_mode_debug_printmodeline(adjusted_mode);
792
793 ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
794 &clock);
795 if (!ok) {
796 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
797 return 0;
798 }
799
800 dpll = DPLL_VGA_MODE_DIS;
801 if (is_tv) {
802 /* XXX: just matching BIOS for now */
803/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
804 dpll |= 3;
805 }
806 dpll |= PLL_REF_INPUT_DREFCLK;
807
808 dpll |= DPLL_SYNCLOCK_ENABLE;
809 dpll |= DPLL_VGA_MODE_DIS;
810 if (is_lvds)
811 dpll |= DPLLB_MODE_LVDS;
812 else
813 dpll |= DPLLB_MODE_DAC_SERIAL;
814 /* dpll |= (2 << 11); */
815
816 /* setup pipeconf */
817 pipeconf = REG_READ(pipeconf_reg);
818
819 /* Set up the display plane register */
820 dspcntr = DISPPLANE_GAMMA_ENABLE;
821
822 if (pipe == 0)
823 dspcntr |= DISPPLANE_SEL_PIPE_A;
824 else
825 dspcntr |= DISPPLANE_SEL_PIPE_B;
826
827 dspcntr |= DISPLAY_PLANE_ENABLE;
828 pipeconf |= PIPEACONF_ENABLE;
829
830 REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
831 REG_READ(dpll_reg);
832
833 cdv_dpll_set_clock_cdv(dev, crtc, &clock);
834
835 udelay(150);
836
837
838 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
839 * This is an exception to the general rule that mode_set doesn't turn
840 * things on.
841 */
842 if (is_lvds) {
843 u32 lvds = REG_READ(LVDS);
844
845 lvds |=
846 LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
847 LVDS_PIPEB_SELECT;
848 /* Set the B0-B3 data pairs corresponding to
849 * whether we're going to
850 * set the DPLLs for dual-channel mode or not.
851 */
852 if (clock.p2 == 7)
853 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
854 else
855 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
856
857 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
858 * appropriately here, but we need to look more
859 * thoroughly into how panels behave in the two modes.
860 */
861
862 REG_WRITE(LVDS, lvds);
863 REG_READ(LVDS);
864 }
865
866 dpll |= DPLL_VCO_ENABLE;
867
868 /* Disable the panel fitter if it was on our pipe */
869 if (cdv_intel_panel_fitter_pipe(dev) == pipe)
870 REG_WRITE(PFIT_CONTROL, 0);
871
872 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
873 drm_mode_debug_printmodeline(mode);
874
875 REG_WRITE(dpll_reg,
876 (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
877 REG_READ(dpll_reg);
878 /* Wait for the clocks to stabilize. */
879 udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
880
881 if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
882 dev_err(dev->dev, "Failed to get DPLL lock\n");
883 return -EBUSY;
884 }
885
886 {
887 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
888 REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
889 }
890
891 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
892 ((adjusted_mode->crtc_htotal - 1) << 16));
893 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
894 ((adjusted_mode->crtc_hblank_end - 1) << 16));
895 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
896 ((adjusted_mode->crtc_hsync_end - 1) << 16));
897 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
898 ((adjusted_mode->crtc_vtotal - 1) << 16));
899 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
900 ((adjusted_mode->crtc_vblank_end - 1) << 16));
901 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
902 ((adjusted_mode->crtc_vsync_end - 1) << 16));
903 /* pipesrc and dspsize control the size that is scaled from,
904 * which should always be the user's requested size.
905 */
906 REG_WRITE(dspsize_reg,
907 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
908 REG_WRITE(dsppos_reg, 0);
909 REG_WRITE(pipesrc_reg,
910 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
911 REG_WRITE(pipeconf_reg, pipeconf);
912 REG_READ(pipeconf_reg);
913
914 cdv_intel_wait_for_vblank(dev);
915
916 REG_WRITE(dspcntr_reg, dspcntr);
917
918 /* Flush the plane changes */
919 {
920 struct drm_crtc_helper_funcs *crtc_funcs =
921 crtc->helper_private;
922 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
923 }
924
925 cdv_intel_wait_for_vblank(dev);
926
927 return 0;
928}
929
930/** Loads the palette/gamma unit for the CRTC with the prepared values */
931void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
932{
933 struct drm_device *dev = crtc->dev;
934 struct drm_psb_private *dev_priv =
935 (struct drm_psb_private *)dev->dev_private;
936 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
937 int palreg = PALETTE_A;
938 int i;
939
940 /* The clocks have to be on to load the palette. */
941 if (!crtc->enabled)
942 return;
943
944 switch (psb_intel_crtc->pipe) {
945 case 0:
946 break;
947 case 1:
948 palreg = PALETTE_B;
949 break;
950 case 2:
951 palreg = PALETTE_C;
952 break;
953 default:
954 dev_err(dev->dev, "Illegal Pipe Number.\n");
955 return;
956 }
957
958 if (gma_power_begin(dev, false)) {
959 for (i = 0; i < 256; i++) {
960 REG_WRITE(palreg + 4 * i,
961 ((psb_intel_crtc->lut_r[i] +
962 psb_intel_crtc->lut_adj[i]) << 16) |
963 ((psb_intel_crtc->lut_g[i] +
964 psb_intel_crtc->lut_adj[i]) << 8) |
965 (psb_intel_crtc->lut_b[i] +
966 psb_intel_crtc->lut_adj[i]));
967 }
968 gma_power_end(dev);
969 } else {
970 for (i = 0; i < 256; i++) {
971 dev_priv->save_palette_a[i] =
972 ((psb_intel_crtc->lut_r[i] +
973 psb_intel_crtc->lut_adj[i]) << 16) |
974 ((psb_intel_crtc->lut_g[i] +
975 psb_intel_crtc->lut_adj[i]) << 8) |
976 (psb_intel_crtc->lut_b[i] +
977 psb_intel_crtc->lut_adj[i]);
978 }
979
980 }
981}
982
983/**
984 * Save HW states of giving crtc
985 */
986static void cdv_intel_crtc_save(struct drm_crtc *crtc)
987{
988 struct drm_device *dev = crtc->dev;
989 /* struct drm_psb_private *dev_priv =
990 (struct drm_psb_private *)dev->dev_private; */
991 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
992 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
993 int pipeA = (psb_intel_crtc->pipe == 0);
994 uint32_t paletteReg;
995 int i;
996
997 if (!crtc_state) {
998 dev_dbg(dev->dev, "No CRTC state found\n");
999 return;
1000 }
1001
1002 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
1003 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
1004 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
1005 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
1006 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
1007 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
1008 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
1009 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
1010 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
1011 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
1012 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
1013 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
1014 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
1015
1016 /*NOTE: DSPSIZE DSPPOS only for psb*/
1017 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
1018 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
1019
1020 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
1021
1022 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1023 crtc_state->saveDSPCNTR,
1024 crtc_state->savePIPECONF,
1025 crtc_state->savePIPESRC,
1026 crtc_state->saveFP0,
1027 crtc_state->saveFP1,
1028 crtc_state->saveDPLL,
1029 crtc_state->saveHTOTAL,
1030 crtc_state->saveHBLANK,
1031 crtc_state->saveHSYNC,
1032 crtc_state->saveVTOTAL,
1033 crtc_state->saveVBLANK,
1034 crtc_state->saveVSYNC,
1035 crtc_state->saveDSPSTRIDE,
1036 crtc_state->saveDSPSIZE,
1037 crtc_state->saveDSPPOS,
1038 crtc_state->saveDSPBASE
1039 );
1040
1041 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
1042 for (i = 0; i < 256; ++i)
1043 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1044}
1045
1046/**
1047 * Restore HW states of giving crtc
1048 */
1049static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1050{
1051 struct drm_device *dev = crtc->dev;
1052 /* struct drm_psb_private * dev_priv =
1053 (struct drm_psb_private *)dev->dev_private; */
1054 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1055 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1056 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
1057 int pipeA = (psb_intel_crtc->pipe == 0);
1058 uint32_t paletteReg;
1059 int i;
1060
1061 if (!crtc_state) {
1062 dev_dbg(dev->dev, "No crtc state\n");
1063 return;
1064 }
1065
1066 DRM_DEBUG(
1067 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1068 REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
1069 REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
1070 REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
1071 REG_READ(pipeA ? FPA0 : FPB0),
1072 REG_READ(pipeA ? FPA1 : FPB1),
1073 REG_READ(pipeA ? DPLL_A : DPLL_B),
1074 REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
1075 REG_READ(pipeA ? HBLANK_A : HBLANK_B),
1076 REG_READ(pipeA ? HSYNC_A : HSYNC_B),
1077 REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
1078 REG_READ(pipeA ? VBLANK_A : VBLANK_B),
1079 REG_READ(pipeA ? VSYNC_A : VSYNC_B),
1080 REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
1081 REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
1082 REG_READ(pipeA ? DSPAPOS : DSPBPOS),
1083 REG_READ(pipeA ? DSPABASE : DSPBBASE)
1084 );
1085
1086 DRM_DEBUG(
1087 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1088 crtc_state->saveDSPCNTR,
1089 crtc_state->savePIPECONF,
1090 crtc_state->savePIPESRC,
1091 crtc_state->saveFP0,
1092 crtc_state->saveFP1,
1093 crtc_state->saveDPLL,
1094 crtc_state->saveHTOTAL,
1095 crtc_state->saveHBLANK,
1096 crtc_state->saveHSYNC,
1097 crtc_state->saveVTOTAL,
1098 crtc_state->saveVBLANK,
1099 crtc_state->saveVSYNC,
1100 crtc_state->saveDSPSTRIDE,
1101 crtc_state->saveDSPSIZE,
1102 crtc_state->saveDSPPOS,
1103 crtc_state->saveDSPBASE
1104 );
1105
1106
1107 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1108 REG_WRITE(pipeA ? DPLL_A : DPLL_B,
1109 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1110 REG_READ(pipeA ? DPLL_A : DPLL_B);
1111 DRM_DEBUG("write dpll: %x\n",
1112 REG_READ(pipeA ? DPLL_A : DPLL_B));
1113 udelay(150);
1114 }
1115
1116 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
1117 REG_READ(pipeA ? FPA0 : FPB0);
1118
1119 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
1120 REG_READ(pipeA ? FPA1 : FPB1);
1121
1122 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
1123 REG_READ(pipeA ? DPLL_A : DPLL_B);
1124 udelay(150);
1125
1126 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
1127 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
1128 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
1129 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
1130 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
1131 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
1132 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
1133
1134 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
1135 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
1136
1137 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
1138 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
1139 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
1140
1141 cdv_intel_wait_for_vblank(dev);
1142
1143 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
1144 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
1145
1146 cdv_intel_wait_for_vblank(dev);
1147
1148 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
1149 for (i = 0; i < 256; ++i)
1150 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1151}
1152
1153static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1154 struct drm_file *file_priv,
1155 uint32_t handle,
1156 uint32_t width, uint32_t height)
1157{
1158 struct drm_device *dev = crtc->dev;
1159 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1160 int pipe = psb_intel_crtc->pipe;
1161 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1162 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1163 uint32_t temp;
1164 size_t addr = 0;
1165 struct gtt_range *gt;
1166 struct drm_gem_object *obj;
1167 int ret;
1168
1169 /* if we want to turn of the cursor ignore width and height */
1170 if (!handle) {
1171 /* turn off the cursor */
1172 temp = CURSOR_MODE_DISABLE;
1173
1174 if (gma_power_begin(dev, false)) {
1175 REG_WRITE(control, temp);
1176 REG_WRITE(base, 0);
1177 gma_power_end(dev);
1178 }
1179
1180 /* unpin the old GEM object */
1181 if (psb_intel_crtc->cursor_obj) {
1182 gt = container_of(psb_intel_crtc->cursor_obj,
1183 struct gtt_range, gem);
1184 psb_gtt_unpin(gt);
1185 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1186 psb_intel_crtc->cursor_obj = NULL;
1187 }
1188
1189 return 0;
1190 }
1191
1192 /* Currently we only support 64x64 cursors */
1193 if (width != 64 || height != 64) {
1194 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1195 return -EINVAL;
1196 }
1197
1198 obj = drm_gem_object_lookup(dev, file_priv, handle);
1199 if (!obj)
1200 return -ENOENT;
1201
1202 if (obj->size < width * height * 4) {
1203 dev_dbg(dev->dev, "buffer is to small\n");
1204 return -ENOMEM;
1205 }
1206
1207 gt = container_of(obj, struct gtt_range, gem);
1208
1209 /* Pin the memory into the GTT */
1210 ret = psb_gtt_pin(gt);
1211 if (ret) {
1212 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1213 return ret;
1214 }
1215
1216 addr = gt->offset; /* Or resource.start ??? */
1217
1218 psb_intel_crtc->cursor_addr = addr;
1219
1220 temp = 0;
1221 /* set the pipe for the cursor */
1222 temp |= (pipe << 28);
1223 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1224
1225 if (gma_power_begin(dev, false)) {
1226 REG_WRITE(control, temp);
1227 REG_WRITE(base, addr);
1228 gma_power_end(dev);
1229 }
1230
1231 /* unpin the old GEM object */
1232 if (psb_intel_crtc->cursor_obj) {
1233 gt = container_of(psb_intel_crtc->cursor_obj,
1234 struct gtt_range, gem);
1235 psb_gtt_unpin(gt);
1236 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1237 psb_intel_crtc->cursor_obj = obj;
1238 }
1239 return 0;
1240}
1241
1242static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1243{
1244 struct drm_device *dev = crtc->dev;
1245 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1246 int pipe = psb_intel_crtc->pipe;
1247 uint32_t temp = 0;
1248 uint32_t adder;
1249
1250
1251 if (x < 0) {
1252 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1253 x = -x;
1254 }
1255 if (y < 0) {
1256 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1257 y = -y;
1258 }
1259
1260 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1261 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1262
1263 adder = psb_intel_crtc->cursor_addr;
1264
1265 if (gma_power_begin(dev, false)) {
1266 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1267 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1268 gma_power_end(dev);
1269 }
1270 return 0;
1271}
1272
1273static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1274 u16 *green, u16 *blue, uint32_t start, uint32_t size)
1275{
1276 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1277 int i;
1278 int end = (start + size > 256) ? 256 : start + size;
1279
1280 for (i = start; i < end; i++) {
1281 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1282 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1283 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1284 }
1285
1286 cdv_intel_crtc_load_lut(crtc);
1287}
1288
1289static int cdv_crtc_set_config(struct drm_mode_set *set)
1290{
1291 int ret = 0;
1292 struct drm_device *dev = set->crtc->dev;
1293 struct drm_psb_private *dev_priv = dev->dev_private;
1294
1295 if (!dev_priv->rpm_enabled)
1296 return drm_crtc_helper_set_config(set);
1297
1298 pm_runtime_forbid(&dev->pdev->dev);
1299
1300 ret = drm_crtc_helper_set_config(set);
1301
1302 pm_runtime_allow(&dev->pdev->dev);
1303
1304 return ret;
1305}
1306
1307/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
1308
1309/* FIXME: why are we using this, should it be cdv_ in this tree ? */
1310
1311static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
1312{
1313 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
1314 clock->p = clock->p1 * clock->p2;
1315 clock->vco = refclk * clock->m / (clock->n + 2);
1316 clock->dot = clock->vco / clock->p;
1317}
1318
1319/* Returns the clock of the currently programmed mode of the given pipe. */
1320static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1321 struct drm_crtc *crtc)
1322{
1323 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1324 int pipe = psb_intel_crtc->pipe;
1325 u32 dpll;
1326 u32 fp;
1327 struct cdv_intel_clock_t clock;
1328 bool is_lvds;
1329 struct drm_psb_private *dev_priv = dev->dev_private;
1330
1331 if (gma_power_begin(dev, false)) {
1332 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
1333 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1334 fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
1335 else
1336 fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
1337 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1338 gma_power_end(dev);
1339 } else {
1340 dpll = (pipe == 0) ?
1341 dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
1342
1343 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1344 fp = (pipe == 0) ?
1345 dev_priv->saveFPA0 :
1346 dev_priv->saveFPB0;
1347 else
1348 fp = (pipe == 0) ?
1349 dev_priv->saveFPA1 :
1350 dev_priv->saveFPB1;
1351
1352 is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
1353 }
1354
1355 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1356 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1357 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1358
1359 if (is_lvds) {
1360 clock.p1 =
1361 ffs((dpll &
1362 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
1363 DPLL_FPA01_P1_POST_DIV_SHIFT);
1364 if (clock.p1 == 0) {
1365 clock.p1 = 4;
1366 dev_err(dev->dev, "PLL %d\n", dpll);
1367 }
1368 clock.p2 = 14;
1369
1370 if ((dpll & PLL_REF_INPUT_MASK) ==
1371 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1372 /* XXX: might not be 66MHz */
1373 i8xx_clock(66000, &clock);
1374 } else
1375 i8xx_clock(48000, &clock);
1376 } else {
1377 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1378 clock.p1 = 2;
1379 else {
1380 clock.p1 =
1381 ((dpll &
1382 DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
1383 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
1384 }
1385 if (dpll & PLL_P2_DIVIDE_BY_4)
1386 clock.p2 = 4;
1387 else
1388 clock.p2 = 2;
1389
1390 i8xx_clock(48000, &clock);
1391 }
1392
1393 /* XXX: It would be nice to validate the clocks, but we can't reuse
1394 * i830PllIsValid() because it relies on the xf86_config connector
1395 * configuration being accurate, which it isn't necessarily.
1396 */
1397
1398 return clock.dot;
1399}
1400
1401/** Returns the currently programmed mode of the given pipe. */
1402struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1403 struct drm_crtc *crtc)
1404{
1405 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1406 int pipe = psb_intel_crtc->pipe;
1407 struct drm_display_mode *mode;
1408 int htot;
1409 int hsync;
1410 int vtot;
1411 int vsync;
1412 struct drm_psb_private *dev_priv = dev->dev_private;
1413
1414 if (gma_power_begin(dev, false)) {
1415 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
1416 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
1417 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
1418 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
1419 gma_power_end(dev);
1420 } else {
1421 htot = (pipe == 0) ?
1422 dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
1423 hsync = (pipe == 0) ?
1424 dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
1425 vtot = (pipe == 0) ?
1426 dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
1427 vsync = (pipe == 0) ?
1428 dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
1429 }
1430
1431 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
1432 if (!mode)
1433 return NULL;
1434
1435 mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
1436 mode->hdisplay = (htot & 0xffff) + 1;
1437 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
1438 mode->hsync_start = (hsync & 0xffff) + 1;
1439 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
1440 mode->vdisplay = (vtot & 0xffff) + 1;
1441 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
1442 mode->vsync_start = (vsync & 0xffff) + 1;
1443 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
1444
1445 drm_mode_set_name(mode);
1446 drm_mode_set_crtcinfo(mode, 0);
1447
1448 return mode;
1449}
1450
1451static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1452{
1453 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1454
1455 kfree(psb_intel_crtc->crtc_state);
1456 drm_crtc_cleanup(crtc);
1457 kfree(psb_intel_crtc);
1458}
1459
1460const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1461 .dpms = cdv_intel_crtc_dpms,
1462 .mode_fixup = cdv_intel_crtc_mode_fixup,
1463 .mode_set = cdv_intel_crtc_mode_set,
1464 .mode_set_base = cdv_intel_pipe_set_base,
1465 .prepare = cdv_intel_crtc_prepare,
1466 .commit = cdv_intel_crtc_commit,
1467};
1468
1469const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
1470 .save = cdv_intel_crtc_save,
1471 .restore = cdv_intel_crtc_restore,
1472 .cursor_set = cdv_intel_crtc_cursor_set,
1473 .cursor_move = cdv_intel_crtc_cursor_move,
1474 .gamma_set = cdv_intel_crtc_gamma_set,
1475 .set_config = cdv_crtc_set_config,
1476 .destroy = cdv_intel_crtc_destroy,
1477};
1478
1479/*
1480 * Set the default value of cursor control and base register
1481 * to zero. This is a workaround for h/w defect on oaktrail
1482 */
1483void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
1484{
1485 uint32_t control;
1486 uint32_t base;
1487
1488 switch (pipe) {
1489 case 0:
1490 control = CURACNTR;
1491 base = CURABASE;
1492 break;
1493 case 1:
1494 control = CURBCNTR;
1495 base = CURBBASE;
1496 break;
1497 case 2:
1498 control = CURCCNTR;
1499 base = CURCBASE;
1500 break;
1501 default:
1502 return;
1503 }
1504
1505 REG_WRITE(control, 0);
1506 REG_WRITE(base, 0);
1507}
1508
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644
index 00000000000..50d7cfb5166
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,394 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * jim liu <jim.liu@intel.com>
25 *
26 * FIXME:
27 * We should probably make this generic and share it with Medfield
28 */
29
30#include <drm/drmP.h>
31#include <drm/drm.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_edid.h>
34#include "psb_intel_drv.h"
35#include "psb_drv.h"
36#include "psb_intel_reg.h"
37#include <linux/pm_runtime.h>
38
39/* hdmi control bits */
40#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
41#define HDMI_BORDER_ENABLE (1 << 7)
42#define HDMI_AUDIO_ENABLE (1 << 6)
43#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
44#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
45/* hdmi-b control bits */
46#define HDMIB_PIPE_B_SELECT (1 << 30)
47
48
49struct mid_intel_hdmi_priv {
50 u32 hdmi_reg;
51 u32 save_HDMIB;
52 bool has_hdmi_sink;
53 bool has_hdmi_audio;
54 /* Should set this when detect hotplug */
55 bool hdmi_device_connected;
56 struct mdfld_hdmi_i2c *i2c_bus;
57 struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
58 struct drm_device *dev;
59};
60
61static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
62 struct drm_display_mode *mode,
63 struct drm_display_mode *adjusted_mode)
64{
65 struct drm_device *dev = encoder->dev;
66 struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
67 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
68 u32 hdmib;
69 struct drm_crtc *crtc = encoder->crtc;
70 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
71
72 hdmib = (2 << 10);
73
74 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
75 hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
76 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
77 hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
78
79 if (intel_crtc->pipe == 1)
80 hdmib |= HDMIB_PIPE_B_SELECT;
81
82 if (hdmi_priv->has_hdmi_audio) {
83 hdmib |= HDMI_AUDIO_ENABLE;
84 hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
85 }
86
87 REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
88 REG_READ(hdmi_priv->hdmi_reg);
89}
90
91static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
92 struct drm_display_mode *mode,
93 struct drm_display_mode *adjusted_mode)
94{
95 return true;
96}
97
98static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
99{
100 struct drm_device *dev = encoder->dev;
101 struct psb_intel_encoder *psb_intel_encoder =
102 to_psb_intel_encoder(encoder);
103 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
104 u32 hdmib;
105
106 hdmib = REG_READ(hdmi_priv->hdmi_reg);
107
108 if (mode != DRM_MODE_DPMS_ON)
109 REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
110 else
111 REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
112 REG_READ(hdmi_priv->hdmi_reg);
113}
114
115static void cdv_hdmi_save(struct drm_connector *connector)
116{
117 struct drm_device *dev = connector->dev;
118 struct psb_intel_encoder *psb_intel_encoder =
119 psb_intel_attached_encoder(connector);
120 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
121
122 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
123}
124
125static void cdv_hdmi_restore(struct drm_connector *connector)
126{
127 struct drm_device *dev = connector->dev;
128 struct psb_intel_encoder *psb_intel_encoder =
129 psb_intel_attached_encoder(connector);
130 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
131
132 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
133 REG_READ(hdmi_priv->hdmi_reg);
134}
135
136static enum drm_connector_status cdv_hdmi_detect(
137 struct drm_connector *connector, bool force)
138{
139 struct psb_intel_encoder *psb_intel_encoder =
140 psb_intel_attached_encoder(connector);
141 struct psb_intel_connector *psb_intel_connector =
142 to_psb_intel_connector(connector);
143 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
144 struct edid *edid = NULL;
145 enum drm_connector_status status = connector_status_disconnected;
146
147 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
148
149 hdmi_priv->has_hdmi_sink = false;
150 hdmi_priv->has_hdmi_audio = false;
151 if (edid) {
152 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
153 status = connector_status_connected;
154 hdmi_priv->has_hdmi_sink =
155 drm_detect_hdmi_monitor(edid);
156 hdmi_priv->has_hdmi_audio =
157 drm_detect_monitor_audio(edid);
158 }
159
160 psb_intel_connector->base.display_info.raw_edid = NULL;
161 kfree(edid);
162 }
163 return status;
164}
165
166static int cdv_hdmi_set_property(struct drm_connector *connector,
167 struct drm_property *property,
168 uint64_t value)
169{
170 struct drm_encoder *encoder = connector->encoder;
171
172 if (!strcmp(property->name, "scaling mode") && encoder) {
173 struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
174 bool centre;
175 uint64_t curValue;
176
177 if (!crtc)
178 return -1;
179
180 switch (value) {
181 case DRM_MODE_SCALE_FULLSCREEN:
182 break;
183 case DRM_MODE_SCALE_NO_SCALE:
184 break;
185 case DRM_MODE_SCALE_ASPECT:
186 break;
187 default:
188 return -1;
189 }
190
191 if (drm_connector_property_get_value(connector,
192 property, &curValue))
193 return -1;
194
195 if (curValue == value)
196 return 0;
197
198 if (drm_connector_property_set_value(connector,
199 property, value))
200 return -1;
201
202 centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
203 (value == DRM_MODE_SCALE_NO_SCALE);
204
205 if (crtc->saved_mode.hdisplay != 0 &&
206 crtc->saved_mode.vdisplay != 0) {
207 if (centre) {
208 if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
209 encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
210 return -1;
211 } else {
212 struct drm_encoder_helper_funcs *helpers
213 = encoder->helper_private;
214 helpers->mode_set(encoder, &crtc->saved_mode,
215 &crtc->saved_adjusted_mode);
216 }
217 }
218 }
219 return 0;
220}
221
222/*
223 * Return the list of HDMI DDC modes if available.
224 */
225static int cdv_hdmi_get_modes(struct drm_connector *connector)
226{
227 struct psb_intel_encoder *psb_intel_encoder =
228 psb_intel_attached_encoder(connector);
229 struct edid *edid = NULL;
230 int ret = 0;
231
232 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
233 if (edid) {
234 drm_mode_connector_update_edid_property(connector, edid);
235 ret = drm_add_edid_modes(connector, edid);
236 kfree(edid);
237 }
238 return ret;
239}
240
241static int cdv_hdmi_mode_valid(struct drm_connector *connector,
242 struct drm_display_mode *mode)
243{
244
245 if (mode->clock > 165000)
246 return MODE_CLOCK_HIGH;
247 if (mode->clock < 20000)
248 return MODE_CLOCK_HIGH;
249
250 /* just in case */
251 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
252 return MODE_NO_DBLESCAN;
253
254 /* just in case */
255 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
256 return MODE_NO_INTERLACE;
257
258 /*
259 * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
260 * will go beyond the stolen memory size allocated to the framebuffer
261 */
262 if (mode->hdisplay > 1680)
263 return MODE_PANEL;
264 if (mode->vdisplay > 1050)
265 return MODE_PANEL;
266 return MODE_OK;
267}
268
269static void cdv_hdmi_destroy(struct drm_connector *connector)
270{
271 struct psb_intel_encoder *psb_intel_encoder =
272 psb_intel_attached_encoder(connector);
273
274 if (psb_intel_encoder->i2c_bus)
275 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
276 drm_sysfs_connector_remove(connector);
277 drm_connector_cleanup(connector);
278 kfree(connector);
279}
280
281static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
282 .dpms = cdv_hdmi_dpms,
283 .mode_fixup = cdv_hdmi_mode_fixup,
284 .prepare = psb_intel_encoder_prepare,
285 .mode_set = cdv_hdmi_mode_set,
286 .commit = psb_intel_encoder_commit,
287};
288
289static const struct drm_connector_helper_funcs
290 cdv_hdmi_connector_helper_funcs = {
291 .get_modes = cdv_hdmi_get_modes,
292 .mode_valid = cdv_hdmi_mode_valid,
293 .best_encoder = psb_intel_best_encoder,
294};
295
296static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
297 .dpms = drm_helper_connector_dpms,
298 .save = cdv_hdmi_save,
299 .restore = cdv_hdmi_restore,
300 .detect = cdv_hdmi_detect,
301 .fill_modes = drm_helper_probe_single_connector_modes,
302 .set_property = cdv_hdmi_set_property,
303 .destroy = cdv_hdmi_destroy,
304};
305
306void cdv_hdmi_init(struct drm_device *dev,
307 struct psb_intel_mode_device *mode_dev, int reg)
308{
309 struct psb_intel_encoder *psb_intel_encoder;
310 struct psb_intel_connector *psb_intel_connector;
311 struct drm_connector *connector;
312 struct drm_encoder *encoder;
313 struct mid_intel_hdmi_priv *hdmi_priv;
314 int ddc_bus;
315
316 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
317 GFP_KERNEL);
318
319 if (!psb_intel_encoder)
320 return;
321
322 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
323 GFP_KERNEL);
324
325 if (!psb_intel_connector)
326 goto err_connector;
327
328 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
329
330 if (!hdmi_priv)
331 goto err_priv;
332
333 connector = &psb_intel_connector->base;
334 encoder = &psb_intel_encoder->base;
335 drm_connector_init(dev, connector,
336 &cdv_hdmi_connector_funcs,
337 DRM_MODE_CONNECTOR_DVID);
338
339 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
340 DRM_MODE_ENCODER_TMDS);
341
342 psb_intel_connector_attach_encoder(psb_intel_connector,
343 psb_intel_encoder);
344 psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
345 hdmi_priv->hdmi_reg = reg;
346 hdmi_priv->has_hdmi_sink = false;
347 psb_intel_encoder->dev_priv = hdmi_priv;
348
349 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
350 drm_connector_helper_add(connector,
351 &cdv_hdmi_connector_helper_funcs);
352 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
353 connector->interlace_allowed = false;
354 connector->doublescan_allowed = false;
355
356 drm_connector_attach_property(connector,
357 dev->mode_config.scaling_mode_property,
358 DRM_MODE_SCALE_FULLSCREEN);
359
360 switch (reg) {
361 case SDVOB:
362 ddc_bus = GPIOE;
363 break;
364 case SDVOC:
365 ddc_bus = GPIOD;
366 break;
367 default:
368 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
369 goto failed_ddc;
370 break;
371 }
372
373 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
374 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
375
376 if (!psb_intel_encoder->i2c_bus) {
377 dev_err(dev->dev, "No ddc adapter available!\n");
378 goto failed_ddc;
379 }
380
381 hdmi_priv->hdmi_i2c_adapter =
382 &(psb_intel_encoder->i2c_bus->adapter);
383 hdmi_priv->dev = dev;
384 drm_sysfs_connector_add(connector);
385 return;
386
387failed_ddc:
388 drm_encoder_cleanup(encoder);
389 drm_connector_cleanup(connector);
390err_priv:
391 kfree(psb_intel_connector);
392err_connector:
393 kfree(psb_intel_encoder);
394}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644
index 00000000000..50e744be985
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -0,0 +1,732 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <linux/dmi.h>
25#include <drm/drmP.h>
26
27#include "intel_bios.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "power.h"
32#include <linux/pm_runtime.h>
33#include "cdv_device.h"
34
35/**
36 * LVDS I2C backlight control macros
37 */
38#define BRIGHTNESS_MAX_LEVEL 100
39#define BRIGHTNESS_MASK 0xFF
40#define BLC_I2C_TYPE 0x01
41#define BLC_PWM_TYPT 0x02
42
43#define BLC_POLARITY_NORMAL 0
44#define BLC_POLARITY_INVERSE 1
45
46#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
47#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
48#define PSB_BLC_PWM_PRECISION_FACTOR (10)
49#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
50#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
51
52struct cdv_intel_lvds_priv {
53 /**
54 * Saved LVDO output states
55 */
56 uint32_t savePP_ON;
57 uint32_t savePP_OFF;
58 uint32_t saveLVDS;
59 uint32_t savePP_CONTROL;
60 uint32_t savePP_CYCLE;
61 uint32_t savePFIT_CONTROL;
62 uint32_t savePFIT_PGM_RATIOS;
63 uint32_t saveBLC_PWM_CTL;
64};
65
66/*
67 * Returns the maximum level of the backlight duty cycle field.
68 */
69static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
70{
71 struct drm_psb_private *dev_priv = dev->dev_private;
72 u32 retval;
73
74 if (gma_power_begin(dev, false)) {
75 retval = ((REG_READ(BLC_PWM_CTL) &
76 BACKLIGHT_MODULATION_FREQ_MASK) >>
77 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
78
79 gma_power_end(dev);
80 } else
81 retval = ((dev_priv->saveBLC_PWM_CTL &
82 BACKLIGHT_MODULATION_FREQ_MASK) >>
83 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
84
85 return retval;
86}
87
88/*
89 * Set LVDS backlight level by I2C command
90 */
91static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
92 unsigned int level)
93{
94 struct drm_psb_private *dev_priv = dev->dev_private;
95 struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
96 u8 out_buf[2];
97 unsigned int blc_i2c_brightness;
98
99 struct i2c_msg msgs[] = {
100 {
101 .addr = lvds_i2c_bus->slave_addr,
102 .flags = 0,
103 .len = 2,
104 .buf = out_buf,
105 }
106 };
107
108 blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
109 BRIGHTNESS_MASK /
110 BRIGHTNESS_MAX_LEVEL);
111
112 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
113 blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
114
115 out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
116 out_buf[1] = (u8)blc_i2c_brightness;
117
118 if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
119 return 0;
120
121 DRM_ERROR("I2C transfer error\n");
122 return -1;
123}
124
125
126static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
127{
128 struct drm_psb_private *dev_priv = dev->dev_private;
129
130 u32 max_pwm_blc;
131 u32 blc_pwm_duty_cycle;
132
133 max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
134
135 /*BLC_PWM_CTL Should be initiated while backlight device init*/
136 BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
137
138 blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
139
140 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
141 blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
142
143 blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
144 REG_WRITE(BLC_PWM_CTL,
145 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
146 (blc_pwm_duty_cycle));
147
148 return 0;
149}
150
151/*
152 * Set LVDS backlight level either by I2C or PWM
153 */
154void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
155{
156 struct drm_psb_private *dev_priv = dev->dev_private;
157
158 if (!dev_priv->lvds_bl) {
159 DRM_ERROR("NO LVDS Backlight Info\n");
160 return;
161 }
162
163 if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
164 cdv_lvds_i2c_set_brightness(dev, level);
165 else
166 cdv_lvds_pwm_set_brightness(dev, level);
167}
168
169/**
170 * Sets the backlight level.
171 *
172 * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
173 */
174static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
175{
176 struct drm_psb_private *dev_priv = dev->dev_private;
177 u32 blc_pwm_ctl;
178
179 if (gma_power_begin(dev, false)) {
180 blc_pwm_ctl =
181 REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
182 REG_WRITE(BLC_PWM_CTL,
183 (blc_pwm_ctl |
184 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
185 gma_power_end(dev);
186 } else {
187 blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
188 ~BACKLIGHT_DUTY_CYCLE_MASK;
189 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
190 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
191 }
192}
193
194/**
195 * Sets the power state for the panel.
196 */
197static void cdv_intel_lvds_set_power(struct drm_device *dev,
198 struct drm_encoder *encoder, bool on)
199{
200 struct drm_psb_private *dev_priv = dev->dev_private;
201 u32 pp_status;
202
203 if (!gma_power_begin(dev, true))
204 return;
205
206 if (on) {
207 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
208 POWER_TARGET_ON);
209 do {
210 pp_status = REG_READ(PP_STATUS);
211 } while ((pp_status & PP_ON) == 0);
212
213 cdv_intel_lvds_set_backlight(dev,
214 dev_priv->mode_dev.backlight_duty_cycle);
215 } else {
216 cdv_intel_lvds_set_backlight(dev, 0);
217
218 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
219 ~POWER_TARGET_ON);
220 do {
221 pp_status = REG_READ(PP_STATUS);
222 } while (pp_status & PP_ON);
223 }
224 gma_power_end(dev);
225}
226
227static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
228{
229 struct drm_device *dev = encoder->dev;
230 if (mode == DRM_MODE_DPMS_ON)
231 cdv_intel_lvds_set_power(dev, encoder, true);
232 else
233 cdv_intel_lvds_set_power(dev, encoder, false);
234 /* XXX: We never power down the LVDS pairs. */
235}
236
237static void cdv_intel_lvds_save(struct drm_connector *connector)
238{
239}
240
241static void cdv_intel_lvds_restore(struct drm_connector *connector)
242{
243}
244
245int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
246 struct drm_display_mode *mode)
247{
248 struct drm_device *dev = connector->dev;
249 struct drm_psb_private *dev_priv = dev->dev_private;
250 struct drm_display_mode *fixed_mode =
251 dev_priv->mode_dev.panel_fixed_mode;
252
253 /* just in case */
254 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
255 return MODE_NO_DBLESCAN;
256
257 /* just in case */
258 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
259 return MODE_NO_INTERLACE;
260
261 if (fixed_mode) {
262 if (mode->hdisplay > fixed_mode->hdisplay)
263 return MODE_PANEL;
264 if (mode->vdisplay > fixed_mode->vdisplay)
265 return MODE_PANEL;
266 }
267 return MODE_OK;
268}
269
270bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
271 struct drm_display_mode *mode,
272 struct drm_display_mode *adjusted_mode)
273{
274 struct drm_device *dev = encoder->dev;
275 struct drm_psb_private *dev_priv = dev->dev_private;
276 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
277 struct drm_encoder *tmp_encoder;
278 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
279
280 /* Should never happen!! */
281 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
282 head) {
283 if (tmp_encoder != encoder
284 && tmp_encoder->crtc == encoder->crtc) {
285 printk(KERN_ERR "Can't enable LVDS and another "
286 "encoder on the same pipe\n");
287 return false;
288 }
289 }
290
291 /*
292 * If we have timings from the BIOS for the panel, put them in
293 * to the adjusted mode. The CRTC will be set up for this mode,
294 * with the panel scaling set up to source from the H/VDisplay
295 * of the original mode.
296 */
297 if (panel_fixed_mode != NULL) {
298 adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
299 adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
300 adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
301 adjusted_mode->htotal = panel_fixed_mode->htotal;
302 adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
303 adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
304 adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
305 adjusted_mode->vtotal = panel_fixed_mode->vtotal;
306 adjusted_mode->clock = panel_fixed_mode->clock;
307 drm_mode_set_crtcinfo(adjusted_mode,
308 CRTC_INTERLACE_HALVE_V);
309 }
310
311 /*
312 * XXX: It would be nice to support lower refresh rates on the
313 * panels to reduce power consumption, and perhaps match the
314 * user's requested refresh rate.
315 */
316
317 return true;
318}
319
320static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
321{
322 struct drm_device *dev = encoder->dev;
323 struct drm_psb_private *dev_priv = dev->dev_private;
324 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
325
326 if (!gma_power_begin(dev, true))
327 return;
328
329 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
330 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
331 BACKLIGHT_DUTY_CYCLE_MASK);
332
333 cdv_intel_lvds_set_power(dev, encoder, false);
334
335 gma_power_end(dev);
336}
337
338static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
339{
340 struct drm_device *dev = encoder->dev;
341 struct drm_psb_private *dev_priv = dev->dev_private;
342 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
343
344 if (mode_dev->backlight_duty_cycle == 0)
345 mode_dev->backlight_duty_cycle =
346 cdv_intel_lvds_get_max_backlight(dev);
347
348 cdv_intel_lvds_set_power(dev, encoder, true);
349}
350
351static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
352 struct drm_display_mode *mode,
353 struct drm_display_mode *adjusted_mode)
354{
355 struct drm_device *dev = encoder->dev;
356 struct drm_psb_private *dev_priv = dev->dev_private;
357 u32 pfit_control;
358
359 /*
360 * The LVDS pin pair will already have been turned on in the
361 * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
362 * settings.
363 */
364
365 /*
366 * Enable automatic panel scaling so that non-native modes fill the
367 * screen. Should be enabled before the pipe is enabled, according to
368 * register description and PRM.
369 */
370 if (mode->hdisplay != adjusted_mode->hdisplay ||
371 mode->vdisplay != adjusted_mode->vdisplay)
372 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
373 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
374 HORIZ_INTERP_BILINEAR);
375 else
376 pfit_control = 0;
377
378 if (dev_priv->lvds_dither)
379 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
380
381 REG_WRITE(PFIT_CONTROL, pfit_control);
382}
383
384/**
385 * Detect the LVDS connection.
386 *
387 * This always returns CONNECTOR_STATUS_CONNECTED.
388 * This connector should only have
389 * been set up if the LVDS was actually connected anyway.
390 */
391static enum drm_connector_status cdv_intel_lvds_detect(
392 struct drm_connector *connector, bool force)
393{
394 return connector_status_connected;
395}
396
397/**
398 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
399 */
400static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
401{
402 struct drm_device *dev = connector->dev;
403 struct drm_psb_private *dev_priv = dev->dev_private;
404 struct psb_intel_encoder *psb_intel_encoder =
405 psb_intel_attached_encoder(connector);
406 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
407 int ret;
408
409 ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
410
411 if (ret)
412 return ret;
413
414 /* Didn't get an EDID, so
415 * Set wide sync ranges so we get all modes
416 * handed to valid_mode for checking
417 */
418 connector->display_info.min_vfreq = 0;
419 connector->display_info.max_vfreq = 200;
420 connector->display_info.min_hfreq = 0;
421 connector->display_info.max_hfreq = 200;
422 if (mode_dev->panel_fixed_mode != NULL) {
423 struct drm_display_mode *mode =
424 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
425 drm_mode_probed_add(connector, mode);
426 return 1;
427 }
428
429 return 0;
430}
431
432/**
433 * cdv_intel_lvds_destroy - unregister and free LVDS structures
434 * @connector: connector to free
435 *
436 * Unregister the DDC bus for this connector then free the driver private
437 * structure.
438 */
439void cdv_intel_lvds_destroy(struct drm_connector *connector)
440{
441 struct psb_intel_encoder *psb_intel_encoder =
442 psb_intel_attached_encoder(connector);
443
444 if (psb_intel_encoder->i2c_bus)
445 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
446 drm_sysfs_connector_remove(connector);
447 drm_connector_cleanup(connector);
448 kfree(connector);
449}
450
451int cdv_intel_lvds_set_property(struct drm_connector *connector,
452 struct drm_property *property,
453 uint64_t value)
454{
455 struct drm_encoder *encoder = connector->encoder;
456
457 if (!strcmp(property->name, "scaling mode") && encoder) {
458 struct psb_intel_crtc *crtc =
459 to_psb_intel_crtc(encoder->crtc);
460 uint64_t curValue;
461
462 if (!crtc)
463 return -1;
464
465 switch (value) {
466 case DRM_MODE_SCALE_FULLSCREEN:
467 break;
468 case DRM_MODE_SCALE_NO_SCALE:
469 break;
470 case DRM_MODE_SCALE_ASPECT:
471 break;
472 default:
473 return -1;
474 }
475
476 if (drm_connector_property_get_value(connector,
477 property,
478 &curValue))
479 return -1;
480
481 if (curValue == value)
482 return 0;
483
484 if (drm_connector_property_set_value(connector,
485 property,
486 value))
487 return -1;
488
489 if (crtc->saved_mode.hdisplay != 0 &&
490 crtc->saved_mode.vdisplay != 0) {
491 if (!drm_crtc_helper_set_mode(encoder->crtc,
492 &crtc->saved_mode,
493 encoder->crtc->x,
494 encoder->crtc->y,
495 encoder->crtc->fb))
496 return -1;
497 }
498 } else if (!strcmp(property->name, "backlight") && encoder) {
499 if (drm_connector_property_set_value(connector,
500 property,
501 value))
502 return -1;
503 else {
504#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
505 struct drm_psb_private *dev_priv =
506 encoder->dev->dev_private;
507 struct backlight_device *bd =
508 dev_priv->backlight_device;
509 bd->props.brightness = value;
510 backlight_update_status(bd);
511#endif
512 }
513 } else if (!strcmp(property->name, "DPMS") && encoder) {
514 struct drm_encoder_helper_funcs *helpers =
515 encoder->helper_private;
516 helpers->dpms(encoder, value);
517 }
518 return 0;
519}
520
521static const struct drm_encoder_helper_funcs
522 cdv_intel_lvds_helper_funcs = {
523 .dpms = cdv_intel_lvds_encoder_dpms,
524 .mode_fixup = cdv_intel_lvds_mode_fixup,
525 .prepare = cdv_intel_lvds_prepare,
526 .mode_set = cdv_intel_lvds_mode_set,
527 .commit = cdv_intel_lvds_commit,
528};
529
530static const struct drm_connector_helper_funcs
531 cdv_intel_lvds_connector_helper_funcs = {
532 .get_modes = cdv_intel_lvds_get_modes,
533 .mode_valid = cdv_intel_lvds_mode_valid,
534 .best_encoder = psb_intel_best_encoder,
535};
536
537static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
538 .dpms = drm_helper_connector_dpms,
539 .save = cdv_intel_lvds_save,
540 .restore = cdv_intel_lvds_restore,
541 .detect = cdv_intel_lvds_detect,
542 .fill_modes = drm_helper_probe_single_connector_modes,
543 .set_property = cdv_intel_lvds_set_property,
544 .destroy = cdv_intel_lvds_destroy,
545};
546
547
548static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
549{
550 drm_encoder_cleanup(encoder);
551}
552
553const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
554 .destroy = cdv_intel_lvds_enc_destroy,
555};
556
557/**
558 * cdv_intel_lvds_init - setup LVDS connectors on this device
559 * @dev: drm device
560 *
561 * Create the connector, register the LVDS DDC bus, and try to figure out what
562 * modes we can display on the LVDS panel (if present).
563 */
564void cdv_intel_lvds_init(struct drm_device *dev,
565 struct psb_intel_mode_device *mode_dev)
566{
567 struct psb_intel_encoder *psb_intel_encoder;
568 struct psb_intel_connector *psb_intel_connector;
569 struct cdv_intel_lvds_priv *lvds_priv;
570 struct drm_connector *connector;
571 struct drm_encoder *encoder;
572 struct drm_display_mode *scan;
573 struct drm_crtc *crtc;
574 struct drm_psb_private *dev_priv = dev->dev_private;
575 u32 lvds;
576 int pipe;
577
578 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
579 GFP_KERNEL);
580 if (!psb_intel_encoder)
581 return;
582
583 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
584 GFP_KERNEL);
585 if (!psb_intel_connector)
586 goto failed_connector;
587
588 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
589 if (!lvds_priv)
590 goto failed_lvds_priv;
591
592 psb_intel_encoder->dev_priv = lvds_priv;
593
594 connector = &psb_intel_connector->base;
595 encoder = &psb_intel_encoder->base;
596
597
598 drm_connector_init(dev, connector,
599 &cdv_intel_lvds_connector_funcs,
600 DRM_MODE_CONNECTOR_LVDS);
601
602 drm_encoder_init(dev, encoder,
603 &cdv_intel_lvds_enc_funcs,
604 DRM_MODE_ENCODER_LVDS);
605
606
607 psb_intel_connector_attach_encoder(psb_intel_connector,
608 psb_intel_encoder);
609 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
610
611 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
612 drm_connector_helper_add(connector,
613 &cdv_intel_lvds_connector_helper_funcs);
614 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
615 connector->interlace_allowed = false;
616 connector->doublescan_allowed = false;
617
618 /*Attach connector properties*/
619 drm_connector_attach_property(connector,
620 dev->mode_config.scaling_mode_property,
621 DRM_MODE_SCALE_FULLSCREEN);
622 drm_connector_attach_property(connector,
623 dev_priv->backlight_property,
624 BRIGHTNESS_MAX_LEVEL);
625
626 /**
627 * Set up I2C bus
628 * FIXME: distroy i2c_bus when exit
629 */
630 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
631 GPIOB,
632 "LVDSBLC_B");
633 if (!psb_intel_encoder->i2c_bus) {
634 dev_printk(KERN_ERR,
635 &dev->pdev->dev, "I2C bus registration failed.\n");
636 goto failed_blc_i2c;
637 }
638 psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
639 dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
640
641 /*
642 * LVDS discovery:
643 * 1) check for EDID on DDC
644 * 2) check for VBT data
645 * 3) check to see if LVDS is already on
646 * if none of the above, no panel
647 * 4) make sure lid is open
648 * if closed, act like it's not there for now
649 */
650
651 /* Set up the DDC bus. */
652 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
653 GPIOC,
654 "LVDSDDC_C");
655 if (!psb_intel_encoder->ddc_bus) {
656 dev_printk(KERN_ERR, &dev->pdev->dev,
657 "DDC bus registration " "failed.\n");
658 goto failed_ddc;
659 }
660
661 /*
662 * Attempt to get the fixed panel mode from DDC. Assume that the
663 * preferred mode is the right one.
664 */
665 psb_intel_ddc_get_modes(connector,
666 &psb_intel_encoder->ddc_bus->adapter);
667 list_for_each_entry(scan, &connector->probed_modes, head) {
668 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
669 mode_dev->panel_fixed_mode =
670 drm_mode_duplicate(dev, scan);
671 goto out; /* FIXME: check for quirks */
672 }
673 }
674
675 /* Failed to get EDID, what about VBT? do we need this?*/
676 if (dev_priv->lfp_lvds_vbt_mode) {
677 mode_dev->panel_fixed_mode =
678 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
679 if (mode_dev->panel_fixed_mode) {
680 mode_dev->panel_fixed_mode->type |=
681 DRM_MODE_TYPE_PREFERRED;
682 goto out; /* FIXME: check for quirks */
683 }
684 }
685 /*
686 * If we didn't get EDID, try checking if the panel is already turned
687 * on. If so, assume that whatever is currently programmed is the
688 * correct mode.
689 */
690 lvds = REG_READ(LVDS);
691 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
692 crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
693
694 if (crtc && (lvds & LVDS_PORT_EN)) {
695 mode_dev->panel_fixed_mode =
696 cdv_intel_crtc_mode_get(dev, crtc);
697 if (mode_dev->panel_fixed_mode) {
698 mode_dev->panel_fixed_mode->type |=
699 DRM_MODE_TYPE_PREFERRED;
700 goto out; /* FIXME: check for quirks */
701 }
702 }
703
704 /* If we still don't have a mode after all that, give up. */
705 if (!mode_dev->panel_fixed_mode) {
706 DRM_DEBUG
707 ("Found no modes on the lvds, ignoring the LVDS\n");
708 goto failed_find;
709 }
710
711out:
712 drm_sysfs_connector_add(connector);
713 return;
714
715failed_find:
716 printk(KERN_ERR "Failed find\n");
717 if (psb_intel_encoder->ddc_bus)
718 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
719failed_ddc:
720 printk(KERN_ERR "Failed DDC\n");
721 if (psb_intel_encoder->i2c_bus)
722 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
723failed_blc_i2c:
724 printk(KERN_ERR "Failed BLC\n");
725 drm_encoder_cleanup(encoder);
726 drm_connector_cleanup(connector);
727 kfree(lvds_priv);
728failed_lvds_priv:
729 kfree(psb_intel_connector);
730failed_connector:
731 kfree(psb_intel_encoder);
732}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644
index 00000000000..791c0ef1a65
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -0,0 +1,831 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/tty.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fb.h>
29#include <linux/init.h>
30#include <linux/console.h>
31
32#include <drm/drmP.h>
33#include <drm/drm.h>
34#include <drm/drm_crtc.h>
35#include <drm/drm_fb_helper.h>
36
37#include "psb_drv.h"
38#include "psb_intel_reg.h"
39#include "psb_intel_drv.h"
40#include "framebuffer.h"
41#include "gtt.h"
42
43static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
44static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
45 struct drm_file *file_priv,
46 unsigned int *handle);
47
48static const struct drm_framebuffer_funcs psb_fb_funcs = {
49 .destroy = psb_user_framebuffer_destroy,
50 .create_handle = psb_user_framebuffer_create_handle,
51};
52
53#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
54
55static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
56 unsigned blue, unsigned transp,
57 struct fb_info *info)
58{
59 struct psb_fbdev *fbdev = info->par;
60 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
61 uint32_t v;
62
63 if (!fb)
64 return -ENOMEM;
65
66 if (regno > 255)
67 return 1;
68
69 red = CMAP_TOHW(red, info->var.red.length);
70 blue = CMAP_TOHW(blue, info->var.blue.length);
71 green = CMAP_TOHW(green, info->var.green.length);
72 transp = CMAP_TOHW(transp, info->var.transp.length);
73
74 v = (red << info->var.red.offset) |
75 (green << info->var.green.offset) |
76 (blue << info->var.blue.offset) |
77 (transp << info->var.transp.offset);
78
79 if (regno < 16) {
80 switch (fb->bits_per_pixel) {
81 case 16:
82 ((uint32_t *) info->pseudo_palette)[regno] = v;
83 break;
84 case 24:
85 case 32:
86 ((uint32_t *) info->pseudo_palette)[regno] = v;
87 break;
88 }
89 }
90
91 return 0;
92}
93
94static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
95{
96 struct psb_fbdev *fbdev = info->par;
97 struct psb_framebuffer *psbfb = &fbdev->pfb;
98 struct drm_device *dev = psbfb->base.dev;
99
100 /*
101 * We have to poke our nose in here. The core fb code assumes
102 * panning is part of the hardware that can be invoked before
103 * the actual fb is mapped. In our case that isn't quite true.
104 */
105 if (psbfb->gtt->npage) {
106 /* GTT roll shifts in 4K pages, we need to shift the right
107 number of pages */
108 int pages = info->fix.line_length >> 12;
109 psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
110 }
111 return 0;
112}
113
114void psbfb_suspend(struct drm_device *dev)
115{
116 struct drm_framebuffer *fb = 0;
117 struct psb_framebuffer *psbfb = to_psb_fb(fb);
118
119 console_lock();
120 mutex_lock(&dev->mode_config.mutex);
121 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
122 struct fb_info *info = psbfb->fbdev;
123 fb_set_suspend(info, 1);
124 drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
125 }
126 mutex_unlock(&dev->mode_config.mutex);
127 console_unlock();
128}
129
130void psbfb_resume(struct drm_device *dev)
131{
132 struct drm_framebuffer *fb = 0;
133 struct psb_framebuffer *psbfb = to_psb_fb(fb);
134
135 console_lock();
136 mutex_lock(&dev->mode_config.mutex);
137 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
138 struct fb_info *info = psbfb->fbdev;
139 fb_set_suspend(info, 0);
140 drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
141 }
142 mutex_unlock(&dev->mode_config.mutex);
143 console_unlock();
144 drm_helper_disable_unused_functions(dev);
145}
146
147static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
148{
149 struct psb_framebuffer *psbfb = vma->vm_private_data;
150 struct drm_device *dev = psbfb->base.dev;
151 struct drm_psb_private *dev_priv = dev->dev_private;
152 int page_num;
153 int i;
154 unsigned long address;
155 int ret;
156 unsigned long pfn;
157 /* FIXME: assumes fb at stolen base which may not be true */
158 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
159
160 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
161 address = (unsigned long)vmf->virtual_address;
162
163 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
164
165 for (i = 0; i < page_num; i++) {
166 pfn = (phys_addr >> PAGE_SHIFT);
167
168 ret = vm_insert_mixed(vma, address, pfn);
169 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
170 break;
171 else if (unlikely(ret != 0)) {
172 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
173 return ret;
174 }
175 address += PAGE_SIZE;
176 phys_addr += PAGE_SIZE;
177 }
178 return VM_FAULT_NOPAGE;
179}
180
181static void psbfb_vm_open(struct vm_area_struct *vma)
182{
183}
184
185static void psbfb_vm_close(struct vm_area_struct *vma)
186{
187}
188
189static struct vm_operations_struct psbfb_vm_ops = {
190 .fault = psbfb_vm_fault,
191 .open = psbfb_vm_open,
192 .close = psbfb_vm_close
193};
194
195static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
196{
197 struct psb_fbdev *fbdev = info->par;
198 struct psb_framebuffer *psbfb = &fbdev->pfb;
199
200 if (vma->vm_pgoff != 0)
201 return -EINVAL;
202 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
203 return -EINVAL;
204
205 if (!psbfb->addr_space)
206 psbfb->addr_space = vma->vm_file->f_mapping;
207 /*
208 * If this is a GEM object then info->screen_base is the virtual
209 * kernel remapping of the object. FIXME: Review if this is
210 * suitable for our mmap work
211 */
212 vma->vm_ops = &psbfb_vm_ops;
213 vma->vm_private_data = (void *)psbfb;
214 vma->vm_flags |= VM_RESERVED | VM_IO |
215 VM_MIXEDMAP | VM_DONTEXPAND;
216 return 0;
217}
218
219static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
220 unsigned long arg)
221{
222 return -ENOTTY;
223}
224
225static struct fb_ops psbfb_ops = {
226 .owner = THIS_MODULE,
227 .fb_check_var = drm_fb_helper_check_var,
228 .fb_set_par = drm_fb_helper_set_par,
229 .fb_blank = drm_fb_helper_blank,
230 .fb_setcolreg = psbfb_setcolreg,
231 .fb_fillrect = cfb_fillrect,
232 .fb_copyarea = psbfb_copyarea,
233 .fb_imageblit = cfb_imageblit,
234 .fb_mmap = psbfb_mmap,
235 .fb_sync = psbfb_sync,
236 .fb_ioctl = psbfb_ioctl,
237};
238
239static struct fb_ops psbfb_roll_ops = {
240 .owner = THIS_MODULE,
241 .fb_check_var = drm_fb_helper_check_var,
242 .fb_set_par = drm_fb_helper_set_par,
243 .fb_blank = drm_fb_helper_blank,
244 .fb_setcolreg = psbfb_setcolreg,
245 .fb_fillrect = cfb_fillrect,
246 .fb_copyarea = cfb_copyarea,
247 .fb_imageblit = cfb_imageblit,
248 .fb_pan_display = psbfb_pan,
249 .fb_mmap = psbfb_mmap,
250 .fb_sync = psbfb_sync,
251 .fb_ioctl = psbfb_ioctl,
252};
253
254static struct fb_ops psbfb_unaccel_ops = {
255 .owner = THIS_MODULE,
256 .fb_check_var = drm_fb_helper_check_var,
257 .fb_set_par = drm_fb_helper_set_par,
258 .fb_blank = drm_fb_helper_blank,
259 .fb_setcolreg = psbfb_setcolreg,
260 .fb_fillrect = cfb_fillrect,
261 .fb_copyarea = cfb_copyarea,
262 .fb_imageblit = cfb_imageblit,
263 .fb_mmap = psbfb_mmap,
264 .fb_ioctl = psbfb_ioctl,
265};
266
267/**
268 * psb_framebuffer_init - initialize a framebuffer
269 * @dev: our DRM device
270 * @fb: framebuffer to set up
271 * @mode_cmd: mode description
272 * @gt: backing object
273 *
274 * Configure and fill in the boilerplate for our frame buffer. Return
275 * 0 on success or an error code if we fail.
276 */
277static int psb_framebuffer_init(struct drm_device *dev,
278 struct psb_framebuffer *fb,
279 struct drm_mode_fb_cmd2 *mode_cmd,
280 struct gtt_range *gt)
281{
282 u32 bpp, depth;
283 int ret;
284
285 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
286
287 if (mode_cmd->pitches[0] & 63)
288 return -EINVAL;
289 switch (bpp) {
290 case 8:
291 case 16:
292 case 24:
293 case 32:
294 break;
295 default:
296 return -EINVAL;
297 }
298 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
299 if (ret) {
300 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
301 return ret;
302 }
303 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
304 fb->gtt = gt;
305 return 0;
306}
307
308/**
309 * psb_framebuffer_create - create a framebuffer backed by gt
310 * @dev: our DRM device
311 * @mode_cmd: the description of the requested mode
312 * @gt: the backing object
313 *
314 * Create a framebuffer object backed by the gt, and fill in the
315 * boilerplate required
316 *
317 * TODO: review object references
318 */
319
320static struct drm_framebuffer *psb_framebuffer_create
321 (struct drm_device *dev,
322 struct drm_mode_fb_cmd2 *mode_cmd,
323 struct gtt_range *gt)
324{
325 struct psb_framebuffer *fb;
326 int ret;
327
328 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
329 if (!fb)
330 return ERR_PTR(-ENOMEM);
331
332 ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
333 if (ret) {
334 kfree(fb);
335 return ERR_PTR(ret);
336 }
337 return &fb->base;
338}
339
340/**
341 * psbfb_alloc - allocate frame buffer memory
342 * @dev: the DRM device
343 * @aligned_size: space needed
344 * @force: fall back to GEM buffers if need be
345 *
346 * Allocate the frame buffer. In the usual case we get a GTT range that
347 * is stolen memory backed and life is simple. If there isn't sufficient
348 * we fail as we don't have the virtual mapping space to really vmap it
349 * and the kernel console code can't handle non linear framebuffers.
350 *
351 * Re-address this as and if the framebuffer layer grows this ability.
352 */
353static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
354{
355 struct gtt_range *backing;
356 /* Begin by trying to use stolen memory backing */
357 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
358 if (backing) {
359 if (drm_gem_private_object_init(dev,
360 &backing->gem, aligned_size) == 0)
361 return backing;
362 psb_gtt_free_range(dev, backing);
363 }
364 return NULL;
365}
366
367/**
368 * psbfb_create - create a framebuffer
369 * @fbdev: the framebuffer device
370 * @sizes: specification of the layout
371 *
372 * Create a framebuffer to the specifications provided
373 */
374static int psbfb_create(struct psb_fbdev *fbdev,
375 struct drm_fb_helper_surface_size *sizes)
376{
377 struct drm_device *dev = fbdev->psb_fb_helper.dev;
378 struct drm_psb_private *dev_priv = dev->dev_private;
379 struct fb_info *info;
380 struct drm_framebuffer *fb;
381 struct psb_framebuffer *psbfb = &fbdev->pfb;
382 struct drm_mode_fb_cmd2 mode_cmd;
383 struct device *device = &dev->pdev->dev;
384 int size;
385 int ret;
386 struct gtt_range *backing;
387 u32 bpp, depth;
388 int gtt_roll = 0;
389 int pitch_lines = 0;
390
391 mode_cmd.width = sizes->surface_width;
392 mode_cmd.height = sizes->surface_height;
393 bpp = sizes->surface_bpp;
394
395 /* No 24bit packed */
396 if (bpp == 24)
397 bpp = 32;
398
399 do {
400 /*
401 * Acceleration via the GTT requires pitch to be
402 * power of two aligned. Preferably page but less
403 * is ok with some fonts
404 */
405 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
406 depth = sizes->surface_depth;
407
408 size = mode_cmd.pitches[0] * mode_cmd.height;
409 size = ALIGN(size, PAGE_SIZE);
410
411 /* Allocate the fb in the GTT with stolen page backing */
412 backing = psbfb_alloc(dev, size);
413
414 if (pitch_lines)
415 pitch_lines *= 2;
416 else
417 pitch_lines = 1;
418 gtt_roll++;
419 } while (backing == NULL && pitch_lines <= 16);
420
421 /* The final pitch we accepted if we succeeded */
422 pitch_lines /= 2;
423
424 if (backing == NULL) {
425 /*
426 * We couldn't get the space we wanted, fall back to the
427 * display engine requirement instead. The HW requires
428 * the pitch to be 64 byte aligned
429 */
430
431 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
432 pitch_lines = 64;
433
434 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
435
436 size = mode_cmd.pitches[0] * mode_cmd.height;
437 size = ALIGN(size, PAGE_SIZE);
438
439 /* Allocate the framebuffer in the GTT with stolen page backing */
440 backing = psbfb_alloc(dev, size);
441 if (backing == NULL)
442 return -ENOMEM;
443 }
444
445 mutex_lock(&dev->struct_mutex);
446
447 info = framebuffer_alloc(0, device);
448 if (!info) {
449 ret = -ENOMEM;
450 goto out_err1;
451 }
452 info->par = fbdev;
453
454 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
455
456 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
457 if (ret)
458 goto out_unref;
459
460 fb = &psbfb->base;
461 psbfb->fbdev = info;
462
463 fbdev->psb_fb_helper.fb = fb;
464 fbdev->psb_fb_helper.fbdev = info;
465
466 strcpy(info->fix.id, "psbfb");
467
468 info->flags = FBINFO_DEFAULT;
469 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
470 info->fbops = &psbfb_ops;
471 else if (gtt_roll) { /* GTT rolling seems best */
472 info->fbops = &psbfb_roll_ops;
473 info->flags |= FBINFO_HWACCEL_YPAN;
474 } else /* Software */
475 info->fbops = &psbfb_unaccel_ops;
476
477 ret = fb_alloc_cmap(&info->cmap, 256, 0);
478 if (ret) {
479 ret = -ENOMEM;
480 goto out_unref;
481 }
482
483 info->fix.smem_start = dev->mode_config.fb_base;
484 info->fix.smem_len = size;
485 info->fix.ywrapstep = gtt_roll;
486 info->fix.ypanstep = 0;
487
488 /* Accessed stolen memory directly */
489 info->screen_base = (char *)dev_priv->vram_addr +
490 backing->offset;
491 info->screen_size = size;
492
493 if (dev_priv->gtt.stolen_size) {
494 info->apertures = alloc_apertures(1);
495 if (!info->apertures) {
496 ret = -ENOMEM;
497 goto out_unref;
498 }
499 info->apertures->ranges[0].base = dev->mode_config.fb_base;
500 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
501 }
502
503 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
504 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
505 sizes->fb_width, sizes->fb_height);
506
507 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
508 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
509
510 info->pixmap.size = 64 * 1024;
511 info->pixmap.buf_align = 8;
512 info->pixmap.access_align = 32;
513 info->pixmap.flags = FB_PIXMAP_SYSTEM;
514 info->pixmap.scan_align = 1;
515
516 dev_info(dev->dev, "allocated %dx%d fb\n",
517 psbfb->base.width, psbfb->base.height);
518
519 mutex_unlock(&dev->struct_mutex);
520 return 0;
521out_unref:
522 if (backing->stolen)
523 psb_gtt_free_range(dev, backing);
524 else
525 drm_gem_object_unreference(&backing->gem);
526out_err1:
527 mutex_unlock(&dev->struct_mutex);
528 psb_gtt_free_range(dev, backing);
529 return ret;
530}
531
532/**
533 * psb_user_framebuffer_create - create framebuffer
534 * @dev: our DRM device
535 * @filp: client file
536 * @cmd: mode request
537 *
538 * Create a new framebuffer backed by a userspace GEM object
539 */
540static struct drm_framebuffer *psb_user_framebuffer_create
541 (struct drm_device *dev, struct drm_file *filp,
542 struct drm_mode_fb_cmd2 *cmd)
543{
544 struct gtt_range *r;
545 struct drm_gem_object *obj;
546
547 /*
548 * Find the GEM object and thus the gtt range object that is
549 * to back this space
550 */
551 obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
552 if (obj == NULL)
553 return ERR_PTR(-ENOENT);
554
555 /* Let the core code do all the work */
556 r = container_of(obj, struct gtt_range, gem);
557 return psb_framebuffer_create(dev, cmd, r);
558}
559
560static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
561 u16 blue, int regno)
562{
563}
564
565static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
566 u16 *green, u16 *blue, int regno)
567{
568}
569
570static int psbfb_probe(struct drm_fb_helper *helper,
571 struct drm_fb_helper_surface_size *sizes)
572{
573 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
574 int new_fb = 0;
575 int ret;
576
577 if (!helper->fb) {
578 ret = psbfb_create(psb_fbdev, sizes);
579 if (ret)
580 return ret;
581 new_fb = 1;
582 }
583 return new_fb;
584}
585
586struct drm_fb_helper_funcs psb_fb_helper_funcs = {
587 .gamma_set = psbfb_gamma_set,
588 .gamma_get = psbfb_gamma_get,
589 .fb_probe = psbfb_probe,
590};
591
592int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
593{
594 struct fb_info *info;
595 struct psb_framebuffer *psbfb = &fbdev->pfb;
596
597 if (fbdev->psb_fb_helper.fbdev) {
598 info = fbdev->psb_fb_helper.fbdev;
599 unregister_framebuffer(info);
600 if (info->cmap.len)
601 fb_dealloc_cmap(&info->cmap);
602 framebuffer_release(info);
603 }
604 drm_fb_helper_fini(&fbdev->psb_fb_helper);
605 drm_framebuffer_cleanup(&psbfb->base);
606
607 if (psbfb->gtt)
608 drm_gem_object_unreference(&psbfb->gtt->gem);
609 return 0;
610}
611
612int psb_fbdev_init(struct drm_device *dev)
613{
614 struct psb_fbdev *fbdev;
615 struct drm_psb_private *dev_priv = dev->dev_private;
616
617 fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
618 if (!fbdev) {
619 dev_err(dev->dev, "no memory\n");
620 return -ENOMEM;
621 }
622
623 dev_priv->fbdev = fbdev;
624 fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
625
626 drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
627 INTELFB_CONN_LIMIT);
628
629 drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
630 drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
631 return 0;
632}
633
634void psb_fbdev_fini(struct drm_device *dev)
635{
636 struct drm_psb_private *dev_priv = dev->dev_private;
637
638 if (!dev_priv->fbdev)
639 return;
640
641 psb_fbdev_destroy(dev, dev_priv->fbdev);
642 kfree(dev_priv->fbdev);
643 dev_priv->fbdev = NULL;
644}
645
646static void psbfb_output_poll_changed(struct drm_device *dev)
647{
648 struct drm_psb_private *dev_priv = dev->dev_private;
649 struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
650 drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
651}
652
653/**
654 * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
655 * @fb: framebuffer
656 * @file_priv: our DRM file
657 * @handle: returned handle
658 *
659 * Our framebuffer object is a GTT range which also contains a GEM
660 * object. We need to turn it into a handle for userspace. GEM will do
661 * the work for us
662 */
663static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
664 struct drm_file *file_priv,
665 unsigned int *handle)
666{
667 struct psb_framebuffer *psbfb = to_psb_fb(fb);
668 struct gtt_range *r = psbfb->gtt;
669 return drm_gem_handle_create(file_priv, &r->gem, handle);
670}
671
672/**
673 * psb_user_framebuffer_destroy - destruct user created fb
674 * @fb: framebuffer
675 *
676 * User framebuffers are backed by GEM objects so all we have to do is
677 * clean up a bit and drop the reference, GEM will handle the fallout
678 */
679static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
680{
681 struct psb_framebuffer *psbfb = to_psb_fb(fb);
682 struct gtt_range *r = psbfb->gtt;
683 struct drm_device *dev = fb->dev;
684 struct drm_psb_private *dev_priv = dev->dev_private;
685 struct psb_fbdev *fbdev = dev_priv->fbdev;
686 struct drm_crtc *crtc;
687 int reset = 0;
688
689 /* Should never get stolen memory for a user fb */
690 WARN_ON(r->stolen);
691
692 /* Check if we are erroneously live */
693 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
694 if (crtc->fb == fb)
695 reset = 1;
696
697 if (reset)
698 /*
699 * Now force a sane response before we permit the DRM CRTC
700 * layer to do stupid things like blank the display. Instead
701 * we reset this framebuffer as if the user had forced a reset.
702 * We must do this before the cleanup so that the DRM layer
703 * doesn't get a chance to stick its oar in where it isn't
704 * wanted.
705 */
706 drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
707
708 /* Let DRM do its clean up */
709 drm_framebuffer_cleanup(fb);
710 /* We are no longer using the resource in GEM */
711 drm_gem_object_unreference_unlocked(&r->gem);
712 kfree(fb);
713}
714
715static const struct drm_mode_config_funcs psb_mode_funcs = {
716 .fb_create = psb_user_framebuffer_create,
717 .output_poll_changed = psbfb_output_poll_changed,
718};
719
720static int psb_create_backlight_property(struct drm_device *dev)
721{
722 struct drm_psb_private *dev_priv = dev->dev_private;
723 struct drm_property *backlight;
724
725 if (dev_priv->backlight_property)
726 return 0;
727
728 backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
729 "backlight", 2);
730 backlight->values[0] = 0;
731 backlight->values[1] = 100;
732
733 dev_priv->backlight_property = backlight;
734
735 return 0;
736}
737
738static void psb_setup_outputs(struct drm_device *dev)
739{
740 struct drm_psb_private *dev_priv = dev->dev_private;
741 struct drm_connector *connector;
742
743 drm_mode_create_scaling_mode_property(dev);
744 psb_create_backlight_property(dev);
745
746 dev_priv->ops->output_init(dev);
747
748 list_for_each_entry(connector, &dev->mode_config.connector_list,
749 head) {
750 struct psb_intel_encoder *psb_intel_encoder =
751 psb_intel_attached_encoder(connector);
752 struct drm_encoder *encoder = &psb_intel_encoder->base;
753 int crtc_mask = 0, clone_mask = 0;
754
755 /* valid crtcs */
756 switch (psb_intel_encoder->type) {
757 case INTEL_OUTPUT_ANALOG:
758 crtc_mask = (1 << 0);
759 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
760 break;
761 case INTEL_OUTPUT_SDVO:
762 crtc_mask = ((1 << 0) | (1 << 1));
763 clone_mask = (1 << INTEL_OUTPUT_SDVO);
764 break;
765 case INTEL_OUTPUT_LVDS:
766 if (IS_MRST(dev))
767 crtc_mask = (1 << 0);
768 else
769 crtc_mask = (1 << 1);
770 clone_mask = (1 << INTEL_OUTPUT_LVDS);
771 break;
772 case INTEL_OUTPUT_MIPI:
773 crtc_mask = (1 << 0);
774 clone_mask = (1 << INTEL_OUTPUT_MIPI);
775 break;
776 case INTEL_OUTPUT_MIPI2:
777 crtc_mask = (1 << 2);
778 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
779 break;
780 case INTEL_OUTPUT_HDMI:
781 if (IS_MFLD(dev))
782 crtc_mask = (1 << 1);
783 else
784 crtc_mask = (1 << 0);
785 clone_mask = (1 << INTEL_OUTPUT_HDMI);
786 break;
787 }
788 encoder->possible_crtcs = crtc_mask;
789 encoder->possible_clones =
790 psb_intel_connector_clones(dev, clone_mask);
791 }
792}
793
794void psb_modeset_init(struct drm_device *dev)
795{
796 struct drm_psb_private *dev_priv = dev->dev_private;
797 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
798 int i;
799
800 drm_mode_config_init(dev);
801
802 dev->mode_config.min_width = 0;
803 dev->mode_config.min_height = 0;
804
805 dev->mode_config.funcs = (void *) &psb_mode_funcs;
806
807 /* set memory base */
808 /* Oaktrail and Poulsbo should use BAR 2*/
809 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
810 &(dev->mode_config.fb_base));
811
812 /* num pipes is 2 for PSB but 1 for Mrst */
813 for (i = 0; i < dev_priv->num_pipe; i++)
814 psb_intel_crtc_init(dev, i, mode_dev);
815
816 dev->mode_config.max_width = 2048;
817 dev->mode_config.max_height = 2048;
818
819 psb_setup_outputs(dev);
820}
821
822void psb_modeset_cleanup(struct drm_device *dev)
823{
824 mutex_lock(&dev->struct_mutex);
825
826 drm_kms_helper_poll_fini(dev);
827 psb_fbdev_fini(dev);
828 drm_mode_config_cleanup(dev);
829
830 mutex_unlock(&dev->struct_mutex);
831}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644
index 00000000000..989558a9e6e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2008-2011, Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21
22#ifndef _FRAMEBUFFER_H_
23#define _FRAMEBUFFER_H_
24
25#include <drm/drmP.h>
26#include <drm/drm_fb_helper.h>
27
28#include "psb_drv.h"
29
30struct psb_framebuffer {
31 struct drm_framebuffer base;
32 struct address_space *addr_space;
33 struct fb_info *fbdev;
34 struct gtt_range *gtt;
35};
36
37struct psb_fbdev {
38 struct drm_fb_helper psb_fb_helper;
39 struct psb_framebuffer pfb;
40};
41
42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
43
44extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
45
46#endif
47
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644
index 00000000000..9fbb86868e2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -0,0 +1,292 @@
1/*
2 * psb GEM interface
3 *
4 * Copyright (c) 2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Alan Cox
20 *
21 * TODO:
22 * - we need to work out if the MMU is relevant (eg for
23 * accelerated operations on a GEM object)
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm.h>
28#include "gma_drm.h"
29#include "psb_drv.h"
30
31int psb_gem_init_object(struct drm_gem_object *obj)
32{
33 return -EINVAL;
34}
35
36void psb_gem_free_object(struct drm_gem_object *obj)
37{
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 drm_gem_object_release_wrap(obj);
40 /* This must occur last as it frees up the memory of the GEM object */
41 psb_gtt_free_range(obj->dev, gtt);
42}
43
44int psb_gem_get_aperture(struct drm_device *dev, void *data,
45 struct drm_file *file)
46{
47 return -EINVAL;
48}
49
50/**
51 * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
52 * @file: our drm client file
53 * @dev: drm device
54 * @handle: GEM handle to the object (from dumb_create)
55 *
56 * Do the necessary setup to allow the mapping of the frame buffer
57 * into user memory. We don't have to do much here at the moment.
58 */
59int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
60 uint32_t handle, uint64_t *offset)
61{
62 int ret = 0;
63 struct drm_gem_object *obj;
64
65 if (!(dev->driver->driver_features & DRIVER_GEM))
66 return -ENODEV;
67
68 mutex_lock(&dev->struct_mutex);
69
70 /* GEM does all our handle to object mapping */
71 obj = drm_gem_object_lookup(dev, file, handle);
72 if (obj == NULL) {
73 ret = -ENOENT;
74 goto unlock;
75 }
76 /* What validation is needed here ? */
77
78 /* Make it mmapable */
79 if (!obj->map_list.map) {
80 ret = gem_create_mmap_offset(obj);
81 if (ret)
82 goto out;
83 }
84 /* GEM should really work out the hash offsets for us */
85 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
86out:
87 drm_gem_object_unreference(obj);
88unlock:
89 mutex_unlock(&dev->struct_mutex);
90 return ret;
91}
92
93/**
94 * psb_gem_create - create a mappable object
95 * @file: the DRM file of the client
96 * @dev: our device
97 * @size: the size requested
98 * @handlep: returned handle (opaque number)
99 *
100 * Create a GEM object, fill in the boilerplate and attach a handle to
101 * it so that userspace can speak about it. This does the core work
102 * for the various methods that do/will create GEM objects for things
103 */
104static int psb_gem_create(struct drm_file *file,
105 struct drm_device *dev, uint64_t size, uint32_t *handlep)
106{
107 struct gtt_range *r;
108 int ret;
109 u32 handle;
110
111 size = roundup(size, PAGE_SIZE);
112
113 /* Allocate our object - for now a direct gtt range which is not
114 stolen memory backed */
115 r = psb_gtt_alloc_range(dev, size, "gem", 0);
116 if (r == NULL) {
117 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
118 return -ENOSPC;
119 }
120 /* Initialize the extra goodies GEM needs to do all the hard work */
121 if (drm_gem_object_init(dev, &r->gem, size) != 0) {
122 psb_gtt_free_range(dev, r);
123 /* GEM doesn't give an error code so use -ENOMEM */
124 dev_err(dev->dev, "GEM init failed for %lld\n", size);
125 return -ENOMEM;
126 }
127 /* Give the object a handle so we can carry it more easily */
128 ret = drm_gem_handle_create(file, &r->gem, &handle);
129 if (ret) {
130 dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
131 &r->gem, size);
132 drm_gem_object_release(&r->gem);
133 psb_gtt_free_range(dev, r);
134 return ret;
135 }
136 /* We have the initial and handle reference but need only one now */
137 drm_gem_object_unreference(&r->gem);
138 *handlep = handle;
139 return 0;
140}
141
142/**
143 * psb_gem_dumb_create - create a dumb buffer
144 * @drm_file: our client file
145 * @dev: our device
146 * @args: the requested arguments copied from userspace
147 *
148 * Allocate a buffer suitable for use for a frame buffer of the
149 * form described by user space. Give userspace a handle by which
150 * to reference it.
151 */
152int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
153 struct drm_mode_create_dumb *args)
154{
155 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
156 args->size = args->pitch * args->height;
157 return psb_gem_create(file, dev, args->size, &args->handle);
158}
159
160/**
161 * psb_gem_dumb_destroy - destroy a dumb buffer
162 * @file: client file
163 * @dev: our DRM device
164 * @handle: the object handle
165 *
166 * Destroy a handle that was created via psb_gem_dumb_create, at least
167 * we hope it was created that way. i915 seems to assume the caller
168 * does the checking but that might be worth review ! FIXME
169 */
170int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
171 uint32_t handle)
172{
173 /* No special work needed, drop the reference and see what falls out */
174 return drm_gem_handle_delete(file, handle);
175}
176
177/**
178 * psb_gem_fault - pagefault handler for GEM objects
179 * @vma: the VMA of the GEM object
180 * @vmf: fault detail
181 *
182 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
183 * does most of the work for us including the actual map/unmap calls
184 * but we need to do the actual page work.
185 *
186 * This code eventually needs to handle faulting objects in and out
187 * of the GTT and repacking it when we run out of space. We can put
188 * that off for now and for our simple uses
189 *
190 * The VMA was set up by GEM. In doing so it also ensured that the
191 * vma->vm_private_data points to the GEM object that is backing this
192 * mapping.
193 */
194int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
195{
196 struct drm_gem_object *obj;
197 struct gtt_range *r;
198 int ret;
199 unsigned long pfn;
200 pgoff_t page_offset;
201 struct drm_device *dev;
202 struct drm_psb_private *dev_priv;
203
204 obj = vma->vm_private_data; /* GEM object */
205 dev = obj->dev;
206 dev_priv = dev->dev_private;
207
208 r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
209
210 /* Make sure we don't parallel update on a fault, nor move or remove
211 something from beneath our feet */
212 mutex_lock(&dev->struct_mutex);
213
214 /* For now the mmap pins the object and it stays pinned. As things
215 stand that will do us no harm */
216 if (r->mmapping == 0) {
217 ret = psb_gtt_pin(r);
218 if (ret < 0) {
219 dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
220 goto fail;
221 }
222 r->mmapping = 1;
223 }
224
225 /* Page relative to the VMA start - we must calculate this ourselves
226 because vmf->pgoff is the fake GEM offset */
227 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
228 >> PAGE_SHIFT;
229
230 /* CPU view of the page, don't go via the GART for CPU writes */
231 if (r->stolen)
232 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
233 else
234 pfn = page_to_pfn(r->pages[page_offset]);
235 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
236
237fail:
238 mutex_unlock(&dev->struct_mutex);
239 switch (ret) {
240 case 0:
241 case -ERESTARTSYS:
242 case -EINTR:
243 return VM_FAULT_NOPAGE;
244 case -ENOMEM:
245 return VM_FAULT_OOM;
246 default:
247 return VM_FAULT_SIGBUS;
248 }
249}
250
251static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
252 int size, u32 *handle)
253{
254 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
255 if (gtt == NULL)
256 return -ENOMEM;
257 if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
258 goto free_gtt;
259 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
260 return 0;
261free_gtt:
262 psb_gtt_free_range(dev, gtt);
263 return -ENOMEM;
264}
265
266/*
267 * GEM interfaces for our specific client
268 */
269int psb_gem_create_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct drm_psb_gem_create *args = data;
273 int ret;
274 if (args->flags & GMA_GEM_CREATE_STOLEN) {
275 ret = psb_gem_create_stolen(file, dev, args->size,
276 &args->handle);
277 if (ret == 0)
278 return 0;
279 /* Fall throguh */
280 args->flags &= ~GMA_GEM_CREATE_STOLEN;
281 }
282 return psb_gem_create(file, dev, args->size, &args->handle);
283}
284
285int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
286 struct drm_file *file)
287{
288 struct drm_psb_gem_mmap *args = data;
289 return dev->driver->dumb_map_offset(file, dev,
290 args->handle, &args->offset);
291}
292
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
new file mode 100644
index 00000000000..daac1212065
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.c
@@ -0,0 +1,89 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <drm/drmP.h>
21#include <drm/drm.h>
22
23void drm_gem_object_release_wrap(struct drm_gem_object *obj)
24{
25 /* Remove the list map if one is present */
26 if (obj->map_list.map) {
27 struct drm_gem_mm *mm = obj->dev->mm_private;
28 struct drm_map_list *list = &obj->map_list;
29 drm_ht_remove_item(&mm->offset_hash, &list->hash);
30 drm_mm_put_block(list->file_offset_node);
31 kfree(list->map);
32 list->map = NULL;
33 }
34 drm_gem_object_release(obj);
35}
36
37/**
38 * gem_create_mmap_offset - invent an mmap offset
39 * @obj: our object
40 *
41 * Standard implementation of offset generation for mmap as is
42 * duplicated in several drivers. This belongs in GEM.
43 */
44int gem_create_mmap_offset(struct drm_gem_object *obj)
45{
46 struct drm_device *dev = obj->dev;
47 struct drm_gem_mm *mm = dev->mm_private;
48 struct drm_map_list *list;
49 struct drm_local_map *map;
50 int ret;
51
52 list = &obj->map_list;
53 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
54 if (list->map == NULL)
55 return -ENOMEM;
56 map = list->map;
57 map->type = _DRM_GEM;
58 map->size = obj->size;
59 map->handle = obj;
60
61 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
62 obj->size / PAGE_SIZE, 0, 0);
63 if (!list->file_offset_node) {
64 dev_err(dev->dev, "failed to allocate offset for bo %d\n",
65 obj->name);
66 ret = -ENOSPC;
67 goto free_it;
68 }
69 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
70 obj->size / PAGE_SIZE, 0);
71 if (!list->file_offset_node) {
72 ret = -ENOMEM;
73 goto free_it;
74 }
75 list->hash.key = list->file_offset_node->start;
76 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
77 if (ret) {
78 dev_err(dev->dev, "failed to add to map hash\n");
79 goto free_mm;
80 }
81 return 0;
82
83free_mm:
84 drm_mm_put_block(list->file_offset_node);
85free_it:
86 kfree(list->map);
87 list->map = NULL;
88 return ret;
89}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
new file mode 100644
index 00000000000..ce5ce30f74d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.h
@@ -0,0 +1,2 @@
1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
2extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 00000000000..e770bd190a5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,553 @@
1/*
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
20 */
21
22#include <drm/drmP.h>
23#include "psb_drv.h"
24
25
26/*
27 * GTT resource allocator - manage page mappings in GTT space
28 */
29
30/**
31 * psb_gtt_mask_pte - generate GTT pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GTT
34 *
35 * Set the GTT entry for the appropriate memory type.
36 */
37static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
38{
39 uint32_t mask = PSB_PTE_VALID;
40
41 if (type & PSB_MMU_CACHED_MEMORY)
42 mask |= PSB_PTE_CACHED;
43 if (type & PSB_MMU_RO_MEMORY)
44 mask |= PSB_PTE_RO;
45 if (type & PSB_MMU_WO_MEMORY)
46 mask |= PSB_PTE_WO;
47
48 return (pfn << PAGE_SHIFT) | mask;
49}
50
51/**
52 * psb_gtt_entry - find the GTT entries for a gtt_range
53 * @dev: our DRM device
54 * @r: our GTT range
55 *
56 * Given a gtt_range object return the GTT offset of the page table
57 * entries for this gtt_range
58 */
59u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
60{
61 struct drm_psb_private *dev_priv = dev->dev_private;
62 unsigned long offset;
63
64 offset = r->resource.start - dev_priv->gtt_mem->start;
65
66 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
67}
68
69/**
70 * psb_gtt_insert - put an object into the GTT
71 * @dev: our DRM device
72 * @r: our GTT range
73 *
74 * Take our preallocated GTT range and insert the GEM object into
75 * the GTT. This is protected via the gtt mutex which the caller
76 * must hold.
77 */
78static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
79{
80 u32 *gtt_slot, pte;
81 struct page **pages;
82 int i;
83
84 if (r->pages == NULL) {
85 WARN_ON(1);
86 return -EINVAL;
87 }
88
89 WARN_ON(r->stolen); /* refcount these maybe ? */
90
91 gtt_slot = psb_gtt_entry(dev, r);
92 pages = r->pages;
93
94 /* Make sure changes are visible to the GPU */
95 set_pages_array_uc(pages, r->npage);
96
97 /* Write our page entries into the GTT itself */
98 for (i = r->roll; i < r->npage; i++) {
99 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
100 iowrite32(pte, gtt_slot++);
101 }
102 for (i = 0; i < r->roll; i++) {
103 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
104 iowrite32(pte, gtt_slot++);
105 }
106 /* Make sure all the entries are set before we return */
107 ioread32(gtt_slot - 1);
108
109 return 0;
110}
111
112/**
113 * psb_gtt_remove - remove an object from the GTT
114 * @dev: our DRM device
115 * @r: our GTT range
116 *
117 * Remove a preallocated GTT range from the GTT. Overwrite all the
118 * page table entries with the dummy page. This is protected via the gtt
119 * mutex which the caller must hold.
120 */
121static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
122{
123 struct drm_psb_private *dev_priv = dev->dev_private;
124 u32 *gtt_slot, pte;
125 int i;
126
127 WARN_ON(r->stolen);
128
129 gtt_slot = psb_gtt_entry(dev, r);
130 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
131
132 for (i = 0; i < r->npage; i++)
133 iowrite32(pte, gtt_slot++);
134 ioread32(gtt_slot - 1);
135 set_pages_array_wb(r->pages, r->npage);
136}
137
138/**
139 * psb_gtt_roll - set scrolling position
140 * @dev: our DRM device
141 * @r: the gtt mapping we are using
142 * @roll: roll offset
143 *
144 * Roll an existing pinned mapping by moving the pages through the GTT.
145 * This allows us to implement hardware scrolling on the consoles without
146 * a 2D engine
147 */
148void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
149{
150 u32 *gtt_slot, pte;
151 int i;
152
153 if (roll >= r->npage) {
154 WARN_ON(1);
155 return;
156 }
157
158 r->roll = roll;
159
160 /* Not currently in the GTT - no worry we will write the mapping at
161 the right position when it gets pinned */
162 if (!r->stolen && !r->in_gart)
163 return;
164
165 gtt_slot = psb_gtt_entry(dev, r);
166
167 for (i = r->roll; i < r->npage; i++) {
168 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
169 iowrite32(pte, gtt_slot++);
170 }
171 for (i = 0; i < r->roll; i++) {
172 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
173 iowrite32(pte, gtt_slot++);
174 }
175 ioread32(gtt_slot - 1);
176}
177
178/**
179 * psb_gtt_attach_pages - attach and pin GEM pages
180 * @gt: the gtt range
181 *
182 * Pin and build an in kernel list of the pages that back our GEM object.
183 * While we hold this the pages cannot be swapped out. This is protected
184 * via the gtt mutex which the caller must hold.
185 */
186static int psb_gtt_attach_pages(struct gtt_range *gt)
187{
188 struct inode *inode;
189 struct address_space *mapping;
190 int i;
191 struct page *p;
192 int pages = gt->gem.size / PAGE_SIZE;
193
194 WARN_ON(gt->pages);
195
196 /* This is the shared memory object that backs the GEM resource */
197 inode = gt->gem.filp->f_path.dentry->d_inode;
198 mapping = inode->i_mapping;
199
200 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
201 if (gt->pages == NULL)
202 return -ENOMEM;
203 gt->npage = pages;
204
205 for (i = 0; i < pages; i++) {
206 /* FIXME: needs updating as per mail from Hugh Dickins */
207 p = read_cache_page_gfp(mapping, i,
208 __GFP_COLD | GFP_KERNEL);
209 if (IS_ERR(p))
210 goto err;
211 gt->pages[i] = p;
212 }
213 return 0;
214
215err:
216 while (i--)
217 page_cache_release(gt->pages[i]);
218 kfree(gt->pages);
219 gt->pages = NULL;
220 return PTR_ERR(p);
221}
222
223/**
224 * psb_gtt_detach_pages - attach and pin GEM pages
225 * @gt: the gtt range
226 *
227 * Undo the effect of psb_gtt_attach_pages. At this point the pages
228 * must have been removed from the GTT as they could now be paged out
229 * and move bus address. This is protected via the gtt mutex which the
230 * caller must hold.
231 */
232static void psb_gtt_detach_pages(struct gtt_range *gt)
233{
234 int i;
235 for (i = 0; i < gt->npage; i++) {
236 /* FIXME: do we need to force dirty */
237 set_page_dirty(gt->pages[i]);
238 page_cache_release(gt->pages[i]);
239 }
240 kfree(gt->pages);
241 gt->pages = NULL;
242}
243
244/**
245 * psb_gtt_pin - pin pages into the GTT
246 * @gt: range to pin
247 *
248 * Pin a set of pages into the GTT. The pins are refcounted so that
249 * multiple pins need multiple unpins to undo.
250 *
251 * Non GEM backed objects treat this as a no-op as they are always GTT
252 * backed objects.
253 */
254int psb_gtt_pin(struct gtt_range *gt)
255{
256 int ret = 0;
257 struct drm_device *dev = gt->gem.dev;
258 struct drm_psb_private *dev_priv = dev->dev_private;
259
260 mutex_lock(&dev_priv->gtt_mutex);
261
262 if (gt->in_gart == 0 && gt->stolen == 0) {
263 ret = psb_gtt_attach_pages(gt);
264 if (ret < 0)
265 goto out;
266 ret = psb_gtt_insert(dev, gt);
267 if (ret < 0) {
268 psb_gtt_detach_pages(gt);
269 goto out;
270 }
271 }
272 gt->in_gart++;
273out:
274 mutex_unlock(&dev_priv->gtt_mutex);
275 return ret;
276}
277
278/**
279 * psb_gtt_unpin - Drop a GTT pin requirement
280 * @gt: range to pin
281 *
282 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
283 * will be removed from the GTT which will also drop the page references
284 * and allow the VM to clean up or page stuff.
285 *
286 * Non GEM backed objects treat this as a no-op as they are always GTT
287 * backed objects.
288 */
289void psb_gtt_unpin(struct gtt_range *gt)
290{
291 struct drm_device *dev = gt->gem.dev;
292 struct drm_psb_private *dev_priv = dev->dev_private;
293
294 mutex_lock(&dev_priv->gtt_mutex);
295
296 WARN_ON(!gt->in_gart);
297
298 gt->in_gart--;
299 if (gt->in_gart == 0 && gt->stolen == 0) {
300 psb_gtt_remove(dev, gt);
301 psb_gtt_detach_pages(gt);
302 }
303 mutex_unlock(&dev_priv->gtt_mutex);
304}
305
306/*
307 * GTT resource allocator - allocate and manage GTT address space
308 */
309
310/**
311 * psb_gtt_alloc_range - allocate GTT address space
312 * @dev: Our DRM device
313 * @len: length (bytes) of address space required
314 * @name: resource name
315 * @backed: resource should be backed by stolen pages
316 *
317 * Ask the kernel core to find us a suitable range of addresses
318 * to use for a GTT mapping.
319 *
320 * Returns a gtt_range structure describing the object, or NULL on
321 * error. On successful return the resource is both allocated and marked
322 * as in use.
323 */
324struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
325 const char *name, int backed)
326{
327 struct drm_psb_private *dev_priv = dev->dev_private;
328 struct gtt_range *gt;
329 struct resource *r = dev_priv->gtt_mem;
330 int ret;
331 unsigned long start, end;
332
333 if (backed) {
334 /* The start of the GTT is the stolen pages */
335 start = r->start;
336 end = r->start + dev_priv->gtt.stolen_size - 1;
337 } else {
338 /* The rest we will use for GEM backed objects */
339 start = r->start + dev_priv->gtt.stolen_size;
340 end = r->end;
341 }
342
343 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
344 if (gt == NULL)
345 return NULL;
346 gt->resource.name = name;
347 gt->stolen = backed;
348 gt->in_gart = backed;
349 gt->roll = 0;
350 /* Ensure this is set for non GEM objects */
351 gt->gem.dev = dev;
352 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
353 len, start, end, PAGE_SIZE, NULL, NULL);
354 if (ret == 0) {
355 gt->offset = gt->resource.start - r->start;
356 return gt;
357 }
358 kfree(gt);
359 return NULL;
360}
361
362/**
363 * psb_gtt_free_range - release GTT address space
364 * @dev: our DRM device
365 * @gt: a mapping created with psb_gtt_alloc_range
366 *
367 * Release a resource that was allocated with psb_gtt_alloc_range. If the
368 * object has been pinned by mmap users we clean this up here currently.
369 */
370void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
371{
372 /* Undo the mmap pin if we are destroying the object */
373 if (gt->mmapping) {
374 psb_gtt_unpin(gt);
375 gt->mmapping = 0;
376 }
377 WARN_ON(gt->in_gart && !gt->stolen);
378 release_resource(&gt->resource);
379 kfree(gt);
380}
381
382void psb_gtt_alloc(struct drm_device *dev)
383{
384 struct drm_psb_private *dev_priv = dev->dev_private;
385 init_rwsem(&dev_priv->gtt.sem);
386}
387
388void psb_gtt_takedown(struct drm_device *dev)
389{
390 struct drm_psb_private *dev_priv = dev->dev_private;
391
392 if (dev_priv->gtt_map) {
393 iounmap(dev_priv->gtt_map);
394 dev_priv->gtt_map = NULL;
395 }
396 if (dev_priv->gtt_initialized) {
397 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
398 dev_priv->gmch_ctrl);
399 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
400 (void) PSB_RVDC32(PSB_PGETBL_CTL);
401 }
402 if (dev_priv->vram_addr)
403 iounmap(dev_priv->gtt_map);
404}
405
406int psb_gtt_init(struct drm_device *dev, int resume)
407{
408 struct drm_psb_private *dev_priv = dev->dev_private;
409 unsigned gtt_pages;
410 unsigned long stolen_size, vram_stolen_size;
411 unsigned i, num_pages;
412 unsigned pfn_base;
413 uint32_t vram_pages;
414 uint32_t dvmt_mode = 0;
415 struct psb_gtt *pg;
416
417 int ret = 0;
418 uint32_t pte;
419
420 mutex_init(&dev_priv->gtt_mutex);
421
422 psb_gtt_alloc(dev);
423 pg = &dev_priv->gtt;
424
425 /* Enable the GTT */
426 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
429
430 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432 (void) PSB_RVDC32(PSB_PGETBL_CTL);
433
434 /* The root resource we allocate address space from */
435 dev_priv->gtt_initialized = 1;
436
437 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
438
439 /*
440 * The video mmu has a hw bug when accessing 0x0D0000000.
441 * Make gatt start at 0x0e000,0000. This doesn't actually
442 * matter for us but may do if the video acceleration ever
443 * gets opened up.
444 */
445 pg->mmu_gatt_start = 0xE0000000;
446
447 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
448 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
449 >> PAGE_SHIFT;
450 /* Some CDV firmware doesn't report this currently. In which case the
451 system has 64 gtt pages */
452 if (pg->gtt_start == 0 || gtt_pages == 0) {
453 dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
454 gtt_pages = 64;
455 pg->gtt_start = dev_priv->pge_ctl;
456 }
457
458 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
459 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
460 >> PAGE_SHIFT;
461 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
462
463 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
464 static struct resource fudge; /* Preferably peppermint */
465 /* This can occur on CDV SDV systems. Fudge it in this case.
466 We really don't care what imaginary space is being allocated
467 at this point */
468 dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
469 pg->gatt_start = 0x40000000;
470 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
471 /* This is a little confusing but in fact the GTT is providing
472 a view from the GPU into memory and not vice versa. As such
473 this is really allocating space that is not the same as the
474 CPU address space on CDV */
475 fudge.start = 0x40000000;
476 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
477 fudge.name = "fudge";
478 fudge.flags = IORESOURCE_MEM;
479 dev_priv->gtt_mem = &fudge;
480 }
481
482 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
483 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
484 - PAGE_SIZE;
485
486 stolen_size = vram_stolen_size;
487
488 printk(KERN_INFO "Stolen memory information\n");
489 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
490 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
491 vram_stolen_size/1024);
492 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
493 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
494 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
495
496 if (resume && (gtt_pages != pg->gtt_pages) &&
497 (stolen_size != pg->stolen_size)) {
498 dev_err(dev->dev, "GTT resume error.\n");
499 ret = -EINVAL;
500 goto out_err;
501 }
502
503 pg->gtt_pages = gtt_pages;
504 pg->stolen_size = stolen_size;
505 dev_priv->vram_stolen_size = vram_stolen_size;
506
507 /*
508 * Map the GTT and the stolen memory area
509 */
510 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
511 gtt_pages << PAGE_SHIFT);
512 if (!dev_priv->gtt_map) {
513 dev_err(dev->dev, "Failure to map gtt.\n");
514 ret = -ENOMEM;
515 goto out_err;
516 }
517
518 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
519 if (!dev_priv->vram_addr) {
520 dev_err(dev->dev, "Failure to map stolen base.\n");
521 ret = -ENOMEM;
522 goto out_err;
523 }
524
525 /*
526 * Insert vram stolen pages into the GTT
527 */
528
529 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
530 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
531 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
532 num_pages, pfn_base << PAGE_SHIFT, 0);
533 for (i = 0; i < num_pages; ++i) {
534 pte = psb_gtt_mask_pte(pfn_base + i, 0);
535 iowrite32(pte, dev_priv->gtt_map + i);
536 }
537
538 /*
539 * Init rest of GTT to the scratch page to avoid accidents or scribbles
540 */
541
542 pfn_base = page_to_pfn(dev_priv->scratch_page);
543 pte = psb_gtt_mask_pte(pfn_base, 0);
544 for (; i < gtt_pages; ++i)
545 iowrite32(pte, dev_priv->gtt_map + i);
546
547 (void) ioread32(dev_priv->gtt_map + i - 1);
548 return 0;
549
550out_err:
551 psb_gtt_takedown(dev);
552 return ret;
553}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 00000000000..aa1742387f5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,64 @@
1/**************************************************************************
2 * Copyright (c) 2007-2008, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#ifndef _PSB_GTT_H_
21#define _PSB_GTT_H_
22
23#include <drm/drmP.h>
24
25/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
26struct psb_gtt {
27 uint32_t gatt_start;
28 uint32_t mmu_gatt_start;
29 uint32_t gtt_start;
30 uint32_t gtt_phys_start;
31 unsigned gtt_pages;
32 unsigned gatt_pages;
33 unsigned long stolen_size;
34 unsigned long vram_stolen_size;
35 struct rw_semaphore sem;
36};
37
38/* Exported functions */
39extern int psb_gtt_init(struct drm_device *dev, int resume);
40extern void psb_gtt_takedown(struct drm_device *dev);
41
42/* Each gtt_range describes an allocation in the GTT area */
43struct gtt_range {
44 struct resource resource; /* Resource for our allocation */
45 u32 offset; /* GTT offset of our object */
46 struct drm_gem_object gem; /* GEM high level stuff */
47 int in_gart; /* Currently in the GART (ref ct) */
48 bool stolen; /* Backed from stolen RAM */
49 bool mmapping; /* Is mmappable */
50 struct page **pages; /* Backing pages if present */
51 int npage; /* Number of backing pages */
52 int roll; /* Roll applied to the GTT entries */
53};
54
55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
56 const char *name, int backed);
57extern void psb_gtt_kref_put(struct gtt_range *gt);
58extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
59extern int psb_gtt_pin(struct gtt_range *gt);
60extern void psb_gtt_unpin(struct gtt_range *gt);
61extern void psb_gtt_roll(struct drm_device *dev,
62 struct gtt_range *gt, int roll);
63
64#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644
index 00000000000..d4d0c5b8bf9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -0,0 +1,303 @@
1/*
2 * Copyright (c) 2006 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "gma_drm.h"
24#include "psb_drv.h"
25#include "psb_intel_drv.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28
29
30static void *find_section(struct bdb_header *bdb, int section_id)
31{
32 u8 *base = (u8 *)bdb;
33 int index = 0;
34 u16 total, current_size;
35 u8 current_id;
36
37 /* skip to first section */
38 index += bdb->header_size;
39 total = bdb->bdb_size;
40
41 /* walk the sections looking for section_id */
42 while (index < total) {
43 current_id = *(base + index);
44 index++;
45 current_size = *((u16 *)(base + index));
46 index += 2;
47 if (current_id == section_id)
48 return base + index;
49 index += current_size;
50 }
51
52 return NULL;
53}
54
55static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
56 struct lvds_dvo_timing *dvo_timing)
57{
58 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
59 dvo_timing->hactive_lo;
60 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
61 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
62 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
63 dvo_timing->hsync_pulse_width;
64 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
65 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
66
67 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
68 dvo_timing->vactive_lo;
69 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
70 dvo_timing->vsync_off;
71 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
72 dvo_timing->vsync_pulse_width;
73 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
74 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
75 panel_fixed_mode->clock = dvo_timing->clock * 10;
76 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
77
78 /* Some VBTs have bogus h/vtotal values */
79 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
80 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
81 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
82 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
83
84 drm_mode_set_name(panel_fixed_mode);
85}
86
87static void parse_backlight_data(struct drm_psb_private *dev_priv,
88 struct bdb_header *bdb)
89{
90 struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
91 struct bdb_lvds_backlight *lvds_bl;
92 u8 p_type = 0;
93 void *bl_start = NULL;
94 struct bdb_lvds_options *lvds_opts
95 = find_section(bdb, BDB_LVDS_OPTIONS);
96
97 dev_priv->lvds_bl = NULL;
98
99 if (lvds_opts)
100 p_type = lvds_opts->panel_type;
101 else
102 return;
103
104 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
105 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
106
107 lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
108 if (!lvds_bl) {
109 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
110 return;
111 }
112 memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
113 dev_priv->lvds_bl = lvds_bl;
114}
115
116/* Try to find integrated panel data */
117static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
118 struct bdb_header *bdb)
119{
120 struct bdb_lvds_options *lvds_options;
121 struct bdb_lvds_lfp_data *lvds_lfp_data;
122 struct bdb_lvds_lfp_data_entry *entry;
123 struct lvds_dvo_timing *dvo_timing;
124 struct drm_display_mode *panel_fixed_mode;
125
126 /* Defaults if we can't find VBT info */
127 dev_priv->lvds_dither = 0;
128 dev_priv->lvds_vbt = 0;
129
130 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
131 if (!lvds_options)
132 return;
133
134 dev_priv->lvds_dither = lvds_options->pixel_dither;
135 if (lvds_options->panel_type == 0xff)
136 return;
137
138 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
139 if (!lvds_lfp_data)
140 return;
141
142
143 entry = &lvds_lfp_data->data[lvds_options->panel_type];
144 dvo_timing = &entry->dvo_timing;
145
146 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
147 GFP_KERNEL);
148 if (panel_fixed_mode == NULL) {
149 dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
150 return;
151 }
152
153 dev_priv->lvds_vbt = 1;
154 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
155
156 if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
157 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
158 drm_mode_debug_printmodeline(panel_fixed_mode);
159 } else {
160 dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
161 dev_priv->lvds_vbt = 0;
162 kfree(panel_fixed_mode);
163 }
164 return;
165}
166
167/* Try to find sdvo panel data */
168static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
169 struct bdb_header *bdb)
170{
171 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
172 struct lvds_dvo_timing *dvo_timing;
173 struct drm_display_mode *panel_fixed_mode;
174
175 dev_priv->sdvo_lvds_vbt_mode = NULL;
176
177 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
178 if (!sdvo_lvds_options)
179 return;
180
181 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
182 if (!dvo_timing)
183 return;
184
185 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
186
187 if (!panel_fixed_mode)
188 return;
189
190 fill_detail_timing_data(panel_fixed_mode,
191 dvo_timing + sdvo_lvds_options->panel_type);
192
193 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
194
195 return;
196}
197
198static void parse_general_features(struct drm_psb_private *dev_priv,
199 struct bdb_header *bdb)
200{
201 struct bdb_general_features *general;
202
203 /* Set sensible defaults in case we can't find the general block */
204 dev_priv->int_tv_support = 1;
205 dev_priv->int_crt_support = 1;
206
207 general = find_section(bdb, BDB_GENERAL_FEATURES);
208 if (general) {
209 dev_priv->int_tv_support = general->int_tv_support;
210 dev_priv->int_crt_support = general->int_crt_support;
211 dev_priv->lvds_use_ssc = general->enable_ssc;
212
213 if (dev_priv->lvds_use_ssc) {
214 dev_priv->lvds_ssc_freq
215 = general->ssc_freq ? 100 : 96;
216 }
217 }
218}
219
220/**
221 * psb_intel_init_bios - initialize VBIOS settings & find VBT
222 * @dev: DRM device
223 *
224 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
225 * to appropriate values.
226 *
227 * VBT existence is a sanity check that is relied on by other i830_bios.c code.
228 * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
229 * feed an updated VBT back through that, compared to what we'll fetch using
230 * this method of groping around in the BIOS data.
231 *
232 * Returns 0 on success, nonzero on failure.
233 */
234bool psb_intel_init_bios(struct drm_device *dev)
235{
236 struct drm_psb_private *dev_priv = dev->dev_private;
237 struct pci_dev *pdev = dev->pdev;
238 struct vbt_header *vbt = NULL;
239 struct bdb_header *bdb;
240 u8 __iomem *bios;
241 size_t size;
242 int i;
243
244 bios = pci_map_rom(pdev, &size);
245 if (!bios)
246 return -1;
247
248 /* Scour memory looking for the VBT signature */
249 for (i = 0; i + 4 < size; i++) {
250 if (!memcmp(bios + i, "$VBT", 4)) {
251 vbt = (struct vbt_header *)(bios + i);
252 break;
253 }
254 }
255
256 if (!vbt) {
257 dev_err(dev->dev, "VBT signature missing\n");
258 pci_unmap_rom(pdev, bios);
259 return -1;
260 }
261
262 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
263
264 /* Grab useful general definitions */
265 parse_general_features(dev_priv, bdb);
266 parse_lfp_panel_data(dev_priv, bdb);
267 parse_sdvo_panel_data(dev_priv, bdb);
268 parse_backlight_data(dev_priv, bdb);
269
270 pci_unmap_rom(pdev, bios);
271
272 return 0;
273}
274
275/**
276 * Destroy and free VBT data
277 */
278void psb_intel_destroy_bios(struct drm_device *dev)
279{
280 struct drm_psb_private *dev_priv = dev->dev_private;
281 struct drm_display_mode *sdvo_lvds_vbt_mode =
282 dev_priv->sdvo_lvds_vbt_mode;
283 struct drm_display_mode *lfp_lvds_vbt_mode =
284 dev_priv->lfp_lvds_vbt_mode;
285 struct bdb_lvds_backlight *lvds_bl =
286 dev_priv->lvds_bl;
287
288 /*free sdvo panel mode*/
289 if (sdvo_lvds_vbt_mode) {
290 dev_priv->sdvo_lvds_vbt_mode = NULL;
291 kfree(sdvo_lvds_vbt_mode);
292 }
293
294 if (lfp_lvds_vbt_mode) {
295 dev_priv->lfp_lvds_vbt_mode = NULL;
296 kfree(lfp_lvds_vbt_mode);
297 }
298
299 if (lvds_bl) {
300 dev_priv->lvds_bl = NULL;
301 kfree(lvds_bl);
302 }
303}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644
index 00000000000..70f1bf01818
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -0,0 +1,430 @@
1/*
2 * Copyright (c) 2006 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 *
20 */
21
22#ifndef _I830_BIOS_H_
23#define _I830_BIOS_H_
24
25#include <drm/drmP.h>
26
27struct vbt_header {
28 u8 signature[20]; /**< Always starts with 'VBT$' */
29 u16 version; /**< decimal */
30 u16 header_size; /**< in bytes */
31 u16 vbt_size; /**< in bytes */
32 u8 vbt_checksum;
33 u8 reserved0;
34 u32 bdb_offset; /**< from beginning of VBT */
35 u32 aim_offset[4]; /**< from beginning of VBT */
36} __attribute__((packed));
37
38
39struct bdb_header {
40 u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
41 u16 version; /**< decimal */
42 u16 header_size; /**< in bytes */
43 u16 bdb_size; /**< in bytes */
44};
45
46/* strictly speaking, this is a "skip" block, but it has interesting info */
47struct vbios_data {
48 u8 type; /* 0 == desktop, 1 == mobile */
49 u8 relstage;
50 u8 chipset;
51 u8 lvds_present:1;
52 u8 tv_present:1;
53 u8 rsvd2:6; /* finish byte */
54 u8 rsvd3[4];
55 u8 signon[155];
56 u8 copyright[61];
57 u16 code_segment;
58 u8 dos_boot_mode;
59 u8 bandwidth_percent;
60 u8 rsvd4; /* popup memory size */
61 u8 resize_pci_bios;
62 u8 rsvd5; /* is crt already on ddc2 */
63} __attribute__((packed));
64
65/*
66 * There are several types of BIOS data blocks (BDBs), each block has
67 * an ID and size in the first 3 bytes (ID in first, size in next 2).
68 * Known types are listed below.
69 */
70#define BDB_GENERAL_FEATURES 1
71#define BDB_GENERAL_DEFINITIONS 2
72#define BDB_OLD_TOGGLE_LIST 3
73#define BDB_MODE_SUPPORT_LIST 4
74#define BDB_GENERIC_MODE_TABLE 5
75#define BDB_EXT_MMIO_REGS 6
76#define BDB_SWF_IO 7
77#define BDB_SWF_MMIO 8
78#define BDB_DOT_CLOCK_TABLE 9
79#define BDB_MODE_REMOVAL_TABLE 10
80#define BDB_CHILD_DEVICE_TABLE 11
81#define BDB_DRIVER_FEATURES 12
82#define BDB_DRIVER_PERSISTENCE 13
83#define BDB_EXT_TABLE_PTRS 14
84#define BDB_DOT_CLOCK_OVERRIDE 15
85#define BDB_DISPLAY_SELECT 16
86/* 17 rsvd */
87#define BDB_DRIVER_ROTATION 18
88#define BDB_DISPLAY_REMOVE 19
89#define BDB_OEM_CUSTOM 20
90#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
91#define BDB_SDVO_LVDS_OPTIONS 22
92#define BDB_SDVO_PANEL_DTDS 23
93#define BDB_SDVO_LVDS_PNP_IDS 24
94#define BDB_SDVO_LVDS_POWER_SEQ 25
95#define BDB_TV_OPTIONS 26
96#define BDB_LVDS_OPTIONS 40
97#define BDB_LVDS_LFP_DATA_PTRS 41
98#define BDB_LVDS_LFP_DATA 42
99#define BDB_LVDS_BACKLIGHT 43
100#define BDB_LVDS_POWER 44
101#define BDB_SKIP 254 /* VBIOS private block, ignore */
102
103struct bdb_general_features {
104 /* bits 1 */
105 u8 panel_fitting:2;
106 u8 flexaim:1;
107 u8 msg_enable:1;
108 u8 clear_screen:3;
109 u8 color_flip:1;
110
111 /* bits 2 */
112 u8 download_ext_vbt:1;
113 u8 enable_ssc:1;
114 u8 ssc_freq:1;
115 u8 enable_lfp_on_override:1;
116 u8 disable_ssc_ddt:1;
117 u8 rsvd8:3; /* finish byte */
118
119 /* bits 3 */
120 u8 disable_smooth_vision:1;
121 u8 single_dvi:1;
122 u8 rsvd9:6; /* finish byte */
123
124 /* bits 4 */
125 u8 legacy_monitor_detect;
126
127 /* bits 5 */
128 u8 int_crt_support:1;
129 u8 int_tv_support:1;
130 u8 rsvd11:6; /* finish byte */
131} __attribute__((packed));
132
133struct bdb_general_definitions {
134 /* DDC GPIO */
135 u8 crt_ddc_gmbus_pin;
136
137 /* DPMS bits */
138 u8 dpms_acpi:1;
139 u8 skip_boot_crt_detect:1;
140 u8 dpms_aim:1;
141 u8 rsvd1:5; /* finish byte */
142
143 /* boot device bits */
144 u8 boot_display[2];
145 u8 child_dev_size;
146
147 /* device info */
148 u8 tv_or_lvds_info[33];
149 u8 dev1[33];
150 u8 dev2[33];
151 u8 dev3[33];
152 u8 dev4[33];
153 /* may be another device block here on some platforms */
154};
155
156struct bdb_lvds_options {
157 u8 panel_type;
158 u8 rsvd1;
159 /* LVDS capabilities, stored in a dword */
160 u8 pfit_mode:2;
161 u8 pfit_text_mode_enhanced:1;
162 u8 pfit_gfx_mode_enhanced:1;
163 u8 pfit_ratio_auto:1;
164 u8 pixel_dither:1;
165 u8 lvds_edid:1;
166 u8 rsvd2:1;
167 u8 rsvd4;
168} __attribute__((packed));
169
170struct bdb_lvds_backlight {
171 u8 type:2;
172 u8 pol:1;
173 u8 gpio:3;
174 u8 gmbus:2;
175 u16 freq;
176 u8 minbrightness;
177 u8 i2caddr;
178 u8 brightnesscmd;
179 /*FIXME: more...*/
180} __attribute__((packed));
181
182/* LFP pointer table contains entries to the struct below */
183struct bdb_lvds_lfp_data_ptr {
184 u16 fp_timing_offset; /* offsets are from start of bdb */
185 u8 fp_table_size;
186 u16 dvo_timing_offset;
187 u8 dvo_table_size;
188 u16 panel_pnp_id_offset;
189 u8 pnp_table_size;
190} __attribute__((packed));
191
192struct bdb_lvds_lfp_data_ptrs {
193 u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
194 struct bdb_lvds_lfp_data_ptr ptr[16];
195} __attribute__((packed));
196
197/* LFP data has 3 blocks per entry */
198struct lvds_fp_timing {
199 u16 x_res;
200 u16 y_res;
201 u32 lvds_reg;
202 u32 lvds_reg_val;
203 u32 pp_on_reg;
204 u32 pp_on_reg_val;
205 u32 pp_off_reg;
206 u32 pp_off_reg_val;
207 u32 pp_cycle_reg;
208 u32 pp_cycle_reg_val;
209 u32 pfit_reg;
210 u32 pfit_reg_val;
211 u16 terminator;
212} __attribute__((packed));
213
214struct lvds_dvo_timing {
215 u16 clock; /**< In 10khz */
216 u8 hactive_lo;
217 u8 hblank_lo;
218 u8 hblank_hi:4;
219 u8 hactive_hi:4;
220 u8 vactive_lo;
221 u8 vblank_lo;
222 u8 vblank_hi:4;
223 u8 vactive_hi:4;
224 u8 hsync_off_lo;
225 u8 hsync_pulse_width;
226 u8 vsync_pulse_width:4;
227 u8 vsync_off:4;
228 u8 rsvd0:6;
229 u8 hsync_off_hi:2;
230 u8 h_image;
231 u8 v_image;
232 u8 max_hv;
233 u8 h_border;
234 u8 v_border;
235 u8 rsvd1:3;
236 u8 digital:2;
237 u8 vsync_positive:1;
238 u8 hsync_positive:1;
239 u8 rsvd2:1;
240} __attribute__((packed));
241
242struct lvds_pnp_id {
243 u16 mfg_name;
244 u16 product_code;
245 u32 serial;
246 u8 mfg_week;
247 u8 mfg_year;
248} __attribute__((packed));
249
250struct bdb_lvds_lfp_data_entry {
251 struct lvds_fp_timing fp_timing;
252 struct lvds_dvo_timing dvo_timing;
253 struct lvds_pnp_id pnp_id;
254} __attribute__((packed));
255
256struct bdb_lvds_lfp_data {
257 struct bdb_lvds_lfp_data_entry data[16];
258} __attribute__((packed));
259
260struct aimdb_header {
261 char signature[16];
262 char oem_device[20];
263 u16 aimdb_version;
264 u16 aimdb_header_size;
265 u16 aimdb_size;
266} __attribute__((packed));
267
268struct aimdb_block {
269 u8 aimdb_id;
270 u16 aimdb_size;
271} __attribute__((packed));
272
273struct vch_panel_data {
274 u16 fp_timing_offset;
275 u8 fp_timing_size;
276 u16 dvo_timing_offset;
277 u8 dvo_timing_size;
278 u16 text_fitting_offset;
279 u8 text_fitting_size;
280 u16 graphics_fitting_offset;
281 u8 graphics_fitting_size;
282} __attribute__((packed));
283
284struct vch_bdb_22 {
285 struct aimdb_block aimdb_block;
286 struct vch_panel_data panels[16];
287} __attribute__((packed));
288
289struct bdb_sdvo_lvds_options {
290 u8 panel_backlight;
291 u8 h40_set_panel_type;
292 u8 panel_type;
293 u8 ssc_clk_freq;
294 u16 als_low_trip;
295 u16 als_high_trip;
296 u8 sclalarcoeff_tab_row_num;
297 u8 sclalarcoeff_tab_row_size;
298 u8 coefficient[8];
299 u8 panel_misc_bits_1;
300 u8 panel_misc_bits_2;
301 u8 panel_misc_bits_3;
302 u8 panel_misc_bits_4;
303} __attribute__((packed));
304
305
306extern bool psb_intel_init_bios(struct drm_device *dev);
307extern void psb_intel_destroy_bios(struct drm_device *dev);
308
309/*
310 * Driver<->VBIOS interaction occurs through scratch bits in
311 * GR18 & SWF*.
312 */
313
314/* GR18 bits are set on display switch and hotkey events */
315#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
316#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
317#define GR18_HK_NONE (0x0<<3)
318#define GR18_HK_LFP_STRETCH (0x1<<3)
319#define GR18_HK_TOGGLE_DISP (0x2<<3)
320#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
321#define GR18_HK_POPUP_DISABLED (0x6<<3)
322#define GR18_HK_POPUP_ENABLED (0x7<<3)
323#define GR18_HK_PFIT (0x8<<3)
324#define GR18_HK_APM_CHANGE (0xa<<3)
325#define GR18_HK_MULTIPLE (0xc<<3)
326#define GR18_USER_INT_EN (1<<2)
327#define GR18_A0000_FLUSH_EN (1<<1)
328#define GR18_SMM_EN (1<<0)
329
330/* Set by driver, cleared by VBIOS */
331#define SWF00_YRES_SHIFT 16
332#define SWF00_XRES_SHIFT 0
333#define SWF00_RES_MASK 0xffff
334
335/* Set by VBIOS at boot time and driver at runtime */
336#define SWF01_TV2_FORMAT_SHIFT 8
337#define SWF01_TV1_FORMAT_SHIFT 0
338#define SWF01_TV_FORMAT_MASK 0xffff
339
340#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
341#define SWF10_GTT_OVERRIDE_EN (1<<28)
342#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
343#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
344#define SWF10_OLD_TOGGLE 0x0
345#define SWF10_TOGGLE_LIST_1 0x1
346#define SWF10_TOGGLE_LIST_2 0x2
347#define SWF10_TOGGLE_LIST_3 0x3
348#define SWF10_TOGGLE_LIST_4 0x4
349#define SWF10_PANNING_EN (1<<23)
350#define SWF10_DRIVER_LOADED (1<<22)
351#define SWF10_EXTENDED_DESKTOP (1<<21)
352#define SWF10_EXCLUSIVE_MODE (1<<20)
353#define SWF10_OVERLAY_EN (1<<19)
354#define SWF10_PLANEB_HOLDOFF (1<<18)
355#define SWF10_PLANEA_HOLDOFF (1<<17)
356#define SWF10_VGA_HOLDOFF (1<<16)
357#define SWF10_ACTIVE_DISP_MASK 0xffff
358#define SWF10_PIPEB_LFP2 (1<<15)
359#define SWF10_PIPEB_EFP2 (1<<14)
360#define SWF10_PIPEB_TV2 (1<<13)
361#define SWF10_PIPEB_CRT2 (1<<12)
362#define SWF10_PIPEB_LFP (1<<11)
363#define SWF10_PIPEB_EFP (1<<10)
364#define SWF10_PIPEB_TV (1<<9)
365#define SWF10_PIPEB_CRT (1<<8)
366#define SWF10_PIPEA_LFP2 (1<<7)
367#define SWF10_PIPEA_EFP2 (1<<6)
368#define SWF10_PIPEA_TV2 (1<<5)
369#define SWF10_PIPEA_CRT2 (1<<4)
370#define SWF10_PIPEA_LFP (1<<3)
371#define SWF10_PIPEA_EFP (1<<2)
372#define SWF10_PIPEA_TV (1<<1)
373#define SWF10_PIPEA_CRT (1<<0)
374
375#define SWF11_MEMORY_SIZE_SHIFT 16
376#define SWF11_SV_TEST_EN (1<<15)
377#define SWF11_IS_AGP (1<<14)
378#define SWF11_DISPLAY_HOLDOFF (1<<13)
379#define SWF11_DPMS_REDUCED (1<<12)
380#define SWF11_IS_VBE_MODE (1<<11)
381#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
382#define SWF11_DPMS_MASK 0x07
383#define SWF11_DPMS_OFF (1<<2)
384#define SWF11_DPMS_SUSPEND (1<<1)
385#define SWF11_DPMS_STANDBY (1<<0)
386#define SWF11_DPMS_ON 0
387
388#define SWF14_GFX_PFIT_EN (1<<31)
389#define SWF14_TEXT_PFIT_EN (1<<30)
390#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
391#define SWF14_POPUP_EN (1<<28)
392#define SWF14_DISPLAY_HOLDOFF (1<<27)
393#define SWF14_DISP_DETECT_EN (1<<26)
394#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
395#define SWF14_DRIVER_STATUS (1<<24)
396#define SWF14_OS_TYPE_WIN9X (1<<23)
397#define SWF14_OS_TYPE_WINNT (1<<22)
398/* 21:19 rsvd */
399#define SWF14_PM_TYPE_MASK 0x00070000
400#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
401#define SWF14_PM_ACPI (0x3 << 16)
402#define SWF14_PM_APM_12 (0x2 << 16)
403#define SWF14_PM_APM_11 (0x1 << 16)
404#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
405 /* if GR18 indicates a display switch */
406#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
407#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
408#define SWF14_DS_PIPEB_TV2_EN (1<<13)
409#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
410#define SWF14_DS_PIPEB_LFP_EN (1<<11)
411#define SWF14_DS_PIPEB_EFP_EN (1<<10)
412#define SWF14_DS_PIPEB_TV_EN (1<<9)
413#define SWF14_DS_PIPEB_CRT_EN (1<<8)
414#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
415#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
416#define SWF14_DS_PIPEA_TV2_EN (1<<5)
417#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
418#define SWF14_DS_PIPEA_LFP_EN (1<<3)
419#define SWF14_DS_PIPEA_EFP_EN (1<<2)
420#define SWF14_DS_PIPEA_TV_EN (1<<1)
421#define SWF14_DS_PIPEA_CRT_EN (1<<0)
422 /* if GR18 indicates a panel fitting request */
423#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
424 /* if GR18 indicates an APM change request */
425#define SWF14_APM_HIBERNATE 0x4
426#define SWF14_APM_SUSPEND 0x3
427#define SWF14_APM_STANDBY 0x1
428#define SWF14_APM_RESTORE 0x0
429
430#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 00000000000..147584ac8d0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,493 @@
1/*
2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2008,2010 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric@anholt.net>
27 * Chris Wilson <chris@chris-wilson.co.uk>
28 */
29#include <linux/module.h>
30#include <linux/i2c.h>
31#include <linux/i2c-algo-bit.h>
32#include "drmP.h"
33#include "drm.h"
34#include "psb_intel_drv.h"
35#include "gma_drm.h"
36#include "psb_drv.h"
37#include "psb_intel_reg.h"
38
39#define _wait_for(COND, MS, W) ({ \
40 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
41 int ret__ = 0; \
42 while (! (COND)) { \
43 if (time_after(jiffies, timeout__)) { \
44 ret__ = -ETIMEDOUT; \
45 break; \
46 } \
47 if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
48 } \
49 ret__; \
50})
51
52#define wait_for(COND, MS) _wait_for(COND, MS, 1)
53#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
54
55/* Intel GPIO access functions */
56
57#define I2C_RISEFALL_TIME 20
58
59static inline struct intel_gmbus *
60to_intel_gmbus(struct i2c_adapter *i2c)
61{
62 return container_of(i2c, struct intel_gmbus, adapter);
63}
64
65struct intel_gpio {
66 struct i2c_adapter adapter;
67 struct i2c_algo_bit_data algo;
68 struct drm_psb_private *dev_priv;
69 u32 reg;
70};
71
72void
73gma_intel_i2c_reset(struct drm_device *dev)
74{
75 REG_WRITE(GMBUS0, 0);
76}
77
78static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
79{
80 /* When using bit bashing for I2C, this bit needs to be set to 1 */
81 /* FIXME: We are never Pineview, right?
82
83 u32 val;
84
85 if (!IS_PINEVIEW(dev_priv->dev))
86 return;
87
88 val = REG_READ(DSPCLK_GATE_D);
89 if (enable)
90 val |= DPCUNIT_CLOCK_GATE_DISABLE;
91 else
92 val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
93 REG_WRITE(DSPCLK_GATE_D, val);
94
95 return;
96 */
97}
98
99static u32 get_reserved(struct intel_gpio *gpio)
100{
101 struct drm_psb_private *dev_priv = gpio->dev_priv;
102 struct drm_device *dev = dev_priv->dev;
103 u32 reserved = 0;
104
105 /* On most chips, these bits must be preserved in software. */
106 reserved = REG_READ(gpio->reg) &
107 (GPIO_DATA_PULLUP_DISABLE |
108 GPIO_CLOCK_PULLUP_DISABLE);
109
110 return reserved;
111}
112
113static int get_clock(void *data)
114{
115 struct intel_gpio *gpio = data;
116 struct drm_psb_private *dev_priv = gpio->dev_priv;
117 struct drm_device *dev = dev_priv->dev;
118 u32 reserved = get_reserved(gpio);
119 REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
120 REG_WRITE(gpio->reg, reserved);
121 return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
122}
123
124static int get_data(void *data)
125{
126 struct intel_gpio *gpio = data;
127 struct drm_psb_private *dev_priv = gpio->dev_priv;
128 struct drm_device *dev = dev_priv->dev;
129 u32 reserved = get_reserved(gpio);
130 REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
131 REG_WRITE(gpio->reg, reserved);
132 return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
133}
134
135static void set_clock(void *data, int state_high)
136{
137 struct intel_gpio *gpio = data;
138 struct drm_psb_private *dev_priv = gpio->dev_priv;
139 struct drm_device *dev = dev_priv->dev;
140 u32 reserved = get_reserved(gpio);
141 u32 clock_bits;
142
143 if (state_high)
144 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
145 else
146 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
147 GPIO_CLOCK_VAL_MASK;
148
149 REG_WRITE(gpio->reg, reserved | clock_bits);
150 REG_READ(gpio->reg); /* Posting */
151}
152
153static void set_data(void *data, int state_high)
154{
155 struct intel_gpio *gpio = data;
156 struct drm_psb_private *dev_priv = gpio->dev_priv;
157 struct drm_device *dev = dev_priv->dev;
158 u32 reserved = get_reserved(gpio);
159 u32 data_bits;
160
161 if (state_high)
162 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
163 else
164 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
165 GPIO_DATA_VAL_MASK;
166
167 REG_WRITE(gpio->reg, reserved | data_bits);
168 REG_READ(gpio->reg);
169}
170
171static struct i2c_adapter *
172intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
173{
174 static const int map_pin_to_reg[] = {
175 0,
176 GPIOB,
177 GPIOA,
178 GPIOC,
179 GPIOD,
180 GPIOE,
181 0,
182 GPIOF,
183 };
184 struct intel_gpio *gpio;
185
186 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
187 return NULL;
188
189 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
190 if (gpio == NULL)
191 return NULL;
192
193 gpio->reg = map_pin_to_reg[pin];
194 gpio->dev_priv = dev_priv;
195
196 snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
197 "gma500 GPIO%c", "?BACDE?F"[pin]);
198 gpio->adapter.owner = THIS_MODULE;
199 gpio->adapter.algo_data = &gpio->algo;
200 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
201 gpio->algo.setsda = set_data;
202 gpio->algo.setscl = set_clock;
203 gpio->algo.getsda = get_data;
204 gpio->algo.getscl = get_clock;
205 gpio->algo.udelay = I2C_RISEFALL_TIME;
206 gpio->algo.timeout = usecs_to_jiffies(2200);
207 gpio->algo.data = gpio;
208
209 if (i2c_bit_add_bus(&gpio->adapter))
210 goto out_free;
211
212 return &gpio->adapter;
213
214out_free:
215 kfree(gpio);
216 return NULL;
217}
218
219static int
220intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
221 struct i2c_adapter *adapter,
222 struct i2c_msg *msgs,
223 int num)
224{
225 struct intel_gpio *gpio = container_of(adapter,
226 struct intel_gpio,
227 adapter);
228 int ret;
229
230 gma_intel_i2c_reset(dev_priv->dev);
231
232 intel_i2c_quirk_set(dev_priv, true);
233 set_data(gpio, 1);
234 set_clock(gpio, 1);
235 udelay(I2C_RISEFALL_TIME);
236
237 ret = adapter->algo->master_xfer(adapter, msgs, num);
238
239 set_data(gpio, 1);
240 set_clock(gpio, 1);
241 intel_i2c_quirk_set(dev_priv, false);
242
243 return ret;
244}
245
246static int
247gmbus_xfer(struct i2c_adapter *adapter,
248 struct i2c_msg *msgs,
249 int num)
250{
251 struct intel_gmbus *bus = container_of(adapter,
252 struct intel_gmbus,
253 adapter);
254 struct drm_psb_private *dev_priv = adapter->algo_data;
255 struct drm_device *dev = dev_priv->dev;
256 int i, reg_offset;
257
258 if (bus->force_bit)
259 return intel_i2c_quirk_xfer(dev_priv,
260 bus->force_bit, msgs, num);
261
262 reg_offset = 0;
263
264 REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
265
266 for (i = 0; i < num; i++) {
267 u16 len = msgs[i].len;
268 u8 *buf = msgs[i].buf;
269
270 if (msgs[i].flags & I2C_M_RD) {
271 REG_WRITE(GMBUS1 + reg_offset,
272 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
273 (len << GMBUS_BYTE_COUNT_SHIFT) |
274 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
275 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
276 REG_READ(GMBUS2+reg_offset);
277 do {
278 u32 val, loop = 0;
279
280 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
281 goto timeout;
282 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
283 goto clear_err;
284
285 val = REG_READ(GMBUS3 + reg_offset);
286 do {
287 *buf++ = val & 0xff;
288 val >>= 8;
289 } while (--len && ++loop < 4);
290 } while (len);
291 } else {
292 u32 val, loop;
293
294 val = loop = 0;
295 do {
296 val |= *buf++ << (8 * loop);
297 } while (--len && ++loop < 4);
298
299 REG_WRITE(GMBUS3 + reg_offset, val);
300 REG_WRITE(GMBUS1 + reg_offset,
301 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
302 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
303 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
304 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
305 REG_READ(GMBUS2+reg_offset);
306
307 while (len) {
308 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
309 goto timeout;
310 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
311 goto clear_err;
312
313 val = loop = 0;
314 do {
315 val |= *buf++ << (8 * loop);
316 } while (--len && ++loop < 4);
317
318 REG_WRITE(GMBUS3 + reg_offset, val);
319 REG_READ(GMBUS2+reg_offset);
320 }
321 }
322
323 if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
324 goto timeout;
325 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
326 goto clear_err;
327 }
328
329 goto done;
330
331clear_err:
332 /* Toggle the Software Clear Interrupt bit. This has the effect
333 * of resetting the GMBUS controller and so clearing the
334 * BUS_ERROR raised by the slave's NAK.
335 */
336 REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
337 REG_WRITE(GMBUS1 + reg_offset, 0);
338
339done:
340 /* Mark the GMBUS interface as disabled. We will re-enable it at the
341 * start of the next xfer, till then let it sleep.
342 */
343 REG_WRITE(GMBUS0 + reg_offset, 0);
344 return i;
345
346timeout:
347 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
348 bus->reg0 & 0xff, bus->adapter.name);
349 REG_WRITE(GMBUS0 + reg_offset, 0);
350
351 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
352 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
353 if (!bus->force_bit)
354 return -ENOMEM;
355
356 return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
357}
358
359static u32 gmbus_func(struct i2c_adapter *adapter)
360{
361 struct intel_gmbus *bus = container_of(adapter,
362 struct intel_gmbus,
363 adapter);
364
365 if (bus->force_bit)
366 bus->force_bit->algo->functionality(bus->force_bit);
367
368 return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
369 /* I2C_FUNC_10BIT_ADDR | */
370 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
371 I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
372}
373
374static const struct i2c_algorithm gmbus_algorithm = {
375 .master_xfer = gmbus_xfer,
376 .functionality = gmbus_func
377};
378
379/**
380 * intel_gmbus_setup - instantiate all Intel i2c GMBuses
381 * @dev: DRM device
382 */
383int gma_intel_setup_gmbus(struct drm_device *dev)
384{
385 static const char *names[GMBUS_NUM_PORTS] = {
386 "disabled",
387 "ssc",
388 "vga",
389 "panel",
390 "dpc",
391 "dpb",
392 "reserved",
393 "dpd",
394 };
395 struct drm_psb_private *dev_priv = dev->dev_private;
396 int ret, i;
397
398 dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
399 GFP_KERNEL);
400 if (dev_priv->gmbus == NULL)
401 return -ENOMEM;
402
403 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
404 struct intel_gmbus *bus = &dev_priv->gmbus[i];
405
406 bus->adapter.owner = THIS_MODULE;
407 bus->adapter.class = I2C_CLASS_DDC;
408 snprintf(bus->adapter.name,
409 sizeof(bus->adapter.name),
410 "gma500 gmbus %s",
411 names[i]);
412
413 bus->adapter.dev.parent = &dev->pdev->dev;
414 bus->adapter.algo_data = dev_priv;
415
416 bus->adapter.algo = &gmbus_algorithm;
417 ret = i2c_add_adapter(&bus->adapter);
418 if (ret)
419 goto err;
420
421 /* By default use a conservative clock rate */
422 bus->reg0 = i | GMBUS_RATE_100KHZ;
423
424 /* XXX force bit banging until GMBUS is fully debugged */
425 bus->force_bit = intel_gpio_create(dev_priv, i);
426 }
427
428 gma_intel_i2c_reset(dev_priv->dev);
429
430 return 0;
431
432err:
433 while (--i) {
434 struct intel_gmbus *bus = &dev_priv->gmbus[i];
435 i2c_del_adapter(&bus->adapter);
436 }
437 kfree(dev_priv->gmbus);
438 dev_priv->gmbus = NULL;
439 return ret;
440}
441
442void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
443{
444 struct intel_gmbus *bus = to_intel_gmbus(adapter);
445
446 /* speed:
447 * 0x0 = 100 KHz
448 * 0x1 = 50 KHz
449 * 0x2 = 400 KHz
450 * 0x3 = 1000 Khz
451 */
452 bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
453}
454
455void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
456{
457 struct intel_gmbus *bus = to_intel_gmbus(adapter);
458
459 if (force_bit) {
460 if (bus->force_bit == NULL) {
461 struct drm_psb_private *dev_priv = adapter->algo_data;
462 bus->force_bit = intel_gpio_create(dev_priv,
463 bus->reg0 & 0xff);
464 }
465 } else {
466 if (bus->force_bit) {
467 i2c_del_adapter(bus->force_bit);
468 kfree(bus->force_bit);
469 bus->force_bit = NULL;
470 }
471 }
472}
473
474void gma_intel_teardown_gmbus(struct drm_device *dev)
475{
476 struct drm_psb_private *dev_priv = dev->dev_private;
477 int i;
478
479 if (dev_priv->gmbus == NULL)
480 return;
481
482 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
483 struct intel_gmbus *bus = &dev_priv->gmbus[i];
484 if (bus->force_bit) {
485 i2c_del_adapter(bus->force_bit);
486 kfree(bus->force_bit);
487 }
488 i2c_del_adapter(&bus->adapter);
489 }
490
491 kfree(dev_priv->gmbus);
492 dev_priv->gmbus = NULL;
493}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644
index 00000000000..98a28c20955
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20#include <linux/export.h>
21#include <linux/i2c.h>
22#include <linux/i2c-algo-bit.h>
23
24#include "psb_drv.h"
25#include "psb_intel_reg.h"
26
27/*
28 * Intel GPIO access functions
29 */
30
31#define I2C_RISEFALL_TIME 20
32
33static int get_clock(void *data)
34{
35 struct psb_intel_i2c_chan *chan = data;
36 struct drm_device *dev = chan->drm_dev;
37 u32 val;
38
39 val = REG_READ(chan->reg);
40 return (val & GPIO_CLOCK_VAL_IN) != 0;
41}
42
43static int get_data(void *data)
44{
45 struct psb_intel_i2c_chan *chan = data;
46 struct drm_device *dev = chan->drm_dev;
47 u32 val;
48
49 val = REG_READ(chan->reg);
50 return (val & GPIO_DATA_VAL_IN) != 0;
51}
52
53static void set_clock(void *data, int state_high)
54{
55 struct psb_intel_i2c_chan *chan = data;
56 struct drm_device *dev = chan->drm_dev;
57 u32 reserved = 0, clock_bits;
58
59 /* On most chips, these bits must be preserved in software. */
60 reserved =
61 REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
62 GPIO_CLOCK_PULLUP_DISABLE);
63
64 if (state_high)
65 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
66 else
67 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
68 GPIO_CLOCK_VAL_MASK;
69 REG_WRITE(chan->reg, reserved | clock_bits);
70 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
71}
72
73static void set_data(void *data, int state_high)
74{
75 struct psb_intel_i2c_chan *chan = data;
76 struct drm_device *dev = chan->drm_dev;
77 u32 reserved = 0, data_bits;
78
79 /* On most chips, these bits must be preserved in software. */
80 reserved =
81 REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
82 GPIO_CLOCK_PULLUP_DISABLE);
83
84 if (state_high)
85 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
86 else
87 data_bits =
88 GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
89 GPIO_DATA_VAL_MASK;
90
91 REG_WRITE(chan->reg, reserved | data_bits);
92 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
93}
94
95/**
96 * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
97 * @dev: DRM device
98 * @output: driver specific output device
99 * @reg: GPIO reg to use
100 * @name: name for this bus
101 *
102 * Creates and registers a new i2c bus with the Linux i2c layer, for use
103 * in output probing and control (e.g. DDC or SDVO control functions).
104 *
105 * Possible values for @reg include:
106 * %GPIOA
107 * %GPIOB
108 * %GPIOC
109 * %GPIOD
110 * %GPIOE
111 * %GPIOF
112 * %GPIOG
113 * %GPIOH
114 * see PRM for details on how these different busses are used.
115 */
116struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
117 const u32 reg, const char *name)
118{
119 struct psb_intel_i2c_chan *chan;
120
121 chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
122 if (!chan)
123 goto out_free;
124
125 chan->drm_dev = dev;
126 chan->reg = reg;
127 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
128 chan->adapter.owner = THIS_MODULE;
129 chan->adapter.algo_data = &chan->algo;
130 chan->adapter.dev.parent = &dev->pdev->dev;
131 chan->algo.setsda = set_data;
132 chan->algo.setscl = set_clock;
133 chan->algo.getsda = get_data;
134 chan->algo.getscl = get_clock;
135 chan->algo.udelay = 20;
136 chan->algo.timeout = usecs_to_jiffies(2200);
137 chan->algo.data = chan;
138
139 i2c_set_adapdata(&chan->adapter, chan);
140
141 if (i2c_bit_add_bus(&chan->adapter))
142 goto out_free;
143
144 /* JJJ: raise SCL and SDA? */
145 set_data(chan, 1);
146 set_clock(chan, 1);
147 udelay(20);
148
149 return chan;
150
151out_free:
152 kfree(chan);
153 return NULL;
154}
155
156/**
157 * psb_intel_i2c_destroy - unregister and free i2c bus resources
158 * @output: channel to free
159 *
160 * Unregister the adapter from the i2c layer, then free the structure.
161 */
162void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
163{
164 if (!chan)
165 return;
166
167 i2c_del_adapter(&chan->adapter);
168 kfree(chan);
169}
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
new file mode 100644
index 00000000000..d946bc1b17b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_opregion.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * FIXME: resolve with the i915 version
24 */
25
26#include "psb_drv.h"
27
28struct opregion_header {
29 u8 signature[16];
30 u32 size;
31 u32 opregion_ver;
32 u8 bios_ver[32];
33 u8 vbios_ver[16];
34 u8 driver_ver[16];
35 u32 mboxes;
36 u8 reserved[164];
37} __packed;
38
39struct opregion_apci {
40 /*FIXME: add it later*/
41} __packed;
42
43struct opregion_swsci {
44 /*FIXME: add it later*/
45} __packed;
46
47struct opregion_acpi {
48 /*FIXME: add it later*/
49} __packed;
50
51int gma_intel_opregion_init(struct drm_device *dev)
52{
53 struct drm_psb_private *dev_priv = dev->dev_private;
54 u32 opregion_phy;
55 void *base;
56 u32 *lid_state;
57
58 dev_priv->lid_state = NULL;
59
60 pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
61 if (opregion_phy == 0)
62 return -ENOTSUPP;
63
64 base = ioremap(opregion_phy, 8*1024);
65 if (!base)
66 return -ENOMEM;
67
68 lid_state = base + 0x01ac;
69
70 dev_priv->lid_state = lid_state;
71 dev_priv->lid_last_state = readl(lid_state);
72 return 0;
73}
74
75int gma_intel_opregion_exit(struct drm_device *dev)
76{
77 struct drm_psb_private *dev_priv = dev->dev_private;
78 if (dev_priv->lid_state)
79 iounmap(dev_priv->lid_state);
80 return 0;
81}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644
index 00000000000..5eee9ad80da
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -0,0 +1,263 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20/* TODO
21 * - Split functions by vbt type
22 * - Make them all take drm_device
23 * - Check ioremap failures
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm.h>
28#include "gma_drm.h"
29#include "psb_drv.h"
30#include "mid_bios.h"
31
32static void mid_get_fuse_settings(struct drm_device *dev)
33{
34 struct drm_psb_private *dev_priv = dev->dev_private;
35 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
36 uint32_t fuse_value = 0;
37 uint32_t fuse_value_tmp = 0;
38
39#define FB_REG06 0xD0810600
40#define FB_MIPI_DISABLE (1 << 11)
41#define FB_REG09 0xD0810900
42#define FB_REG09 0xD0810900
43#define FB_SKU_MASK 0x7000
44#define FB_SKU_SHIFT 12
45#define FB_SKU_100 0
46#define FB_SKU_100L 1
47#define FB_SKU_83 2
48 if (pci_root == NULL) {
49 WARN_ON(1);
50 return;
51 }
52
53
54 pci_write_config_dword(pci_root, 0xD0, FB_REG06);
55 pci_read_config_dword(pci_root, 0xD4, &fuse_value);
56
57 /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
58 if (IS_MRST(dev))
59 dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
60
61 DRM_INFO("internal display is %s\n",
62 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
63
64 /* Prevent runtime suspend at start*/
65 if (dev_priv->iLVDS_enable) {
66 dev_priv->is_lvds_on = true;
67 dev_priv->is_mipi_on = false;
68 } else {
69 dev_priv->is_mipi_on = true;
70 dev_priv->is_lvds_on = false;
71 }
72
73 dev_priv->video_device_fuse = fuse_value;
74
75 pci_write_config_dword(pci_root, 0xD0, FB_REG09);
76 pci_read_config_dword(pci_root, 0xD4, &fuse_value);
77
78 dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
79 fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
80
81 dev_priv->fuse_reg_value = fuse_value;
82
83 switch (fuse_value_tmp) {
84 case FB_SKU_100:
85 dev_priv->core_freq = 200;
86 break;
87 case FB_SKU_100L:
88 dev_priv->core_freq = 100;
89 break;
90 case FB_SKU_83:
91 dev_priv->core_freq = 166;
92 break;
93 default:
94 dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
95 fuse_value_tmp);
96 dev_priv->core_freq = 0;
97 }
98 dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
99 pci_dev_put(pci_root);
100}
101
102/*
103 * Get the revison ID, B0:D2:F0;0x08
104 */
105static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
106{
107 uint32_t platform_rev_id = 0;
108 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
109
110 if (pci_gfx_root == NULL) {
111 WARN_ON(1);
112 return;
113 }
114 pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
115 dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
116 pci_dev_put(pci_gfx_root);
117 dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
118 dev_priv->platform_rev_id);
119}
120
121static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
122{
123 struct drm_device *dev = dev_priv->dev;
124 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
125 u32 addr;
126 u16 new_size;
127 u8 *vbt_virtual;
128 u8 bpi;
129 u8 number_desc = 0;
130 struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
131 struct gct_r10_timing_info ti;
132 void *pGCT;
133 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
134
135 /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
136 pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
137 pci_dev_put(pci_gfx_root);
138
139 dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
140
141 /* check for platform config address == 0. */
142 /* this means fw doesn't support vbt */
143
144 if (addr == 0) {
145 vbt->size = 0;
146 return;
147 }
148
149 /* get the virtual address of the vbt */
150 vbt_virtual = ioremap(addr, sizeof(*vbt));
151 if (vbt_virtual == NULL) {
152 vbt->size = 0;
153 return;
154 }
155
156 memcpy(vbt, vbt_virtual, sizeof(*vbt));
157 iounmap(vbt_virtual); /* Free virtual address space */
158
159 /* No matching signature don't process the data */
160 if (memcmp(vbt->signature, "$GCT", 4)) {
161 vbt->size = 0;
162 return;
163 }
164
165 dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
166
167 switch (vbt->revision) {
168 case 0:
169 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
170 vbt->size - sizeof(*vbt) + 4);
171 pGCT = vbt->oaktrail_gct;
172 bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
173 dev_priv->gct_data.bpi = bpi;
174 dev_priv->gct_data.pt =
175 ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
176 memcpy(&dev_priv->gct_data.DTD,
177 &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
178 sizeof(struct oaktrail_timing_info));
179 dev_priv->gct_data.Panel_Port_Control =
180 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
181 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
182 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
183 break;
184 case 1:
185 vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
186 vbt->size - sizeof(*vbt) + 4);
187 pGCT = vbt->oaktrail_gct;
188 bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
189 dev_priv->gct_data.bpi = bpi;
190 dev_priv->gct_data.pt =
191 ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
192 memcpy(&dev_priv->gct_data.DTD,
193 &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
194 sizeof(struct oaktrail_timing_info));
195 dev_priv->gct_data.Panel_Port_Control =
196 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
197 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
198 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
199 break;
200 case 0x10:
201 /*header definition changed from rev 01 (v2) to rev 10h. */
202 /*so, some values have changed location*/
203 new_size = vbt->checksum; /*checksum contains lo size byte*/
204 /*LSB of oaktrail_gct contains hi size byte*/
205 new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
206
207 vbt->checksum = vbt->size; /*size contains the checksum*/
208 if (new_size > 0xff)
209 vbt->size = 0xff; /*restrict size to 255*/
210 else
211 vbt->size = new_size;
212
213 /* number of descriptors defined in the GCT */
214 number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
215 bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
216 vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
217 GCT_R10_DISPLAY_DESC_SIZE * number_desc);
218 pGCT = vbt->oaktrail_gct;
219 pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
220 dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
221
222 /*copy the GCT display timings into a temp structure*/
223 memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
224
225 /*now copy the temp struct into the dev_priv->gct_data*/
226 dp_ti->pixel_clock = ti.pixel_clock;
227 dp_ti->hactive_hi = ti.hactive_hi;
228 dp_ti->hactive_lo = ti.hactive_lo;
229 dp_ti->hblank_hi = ti.hblank_hi;
230 dp_ti->hblank_lo = ti.hblank_lo;
231 dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
232 dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
233 dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
234 dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
235 dp_ti->vactive_hi = ti.vactive_hi;
236 dp_ti->vactive_lo = ti.vactive_lo;
237 dp_ti->vblank_hi = ti.vblank_hi;
238 dp_ti->vblank_lo = ti.vblank_lo;
239 dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
240 dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
241 dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
242 dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
243
244 /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
245 dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
246 *((u8 *)pGCT + 0x0d);
247 dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
248 (*((u8 *)pGCT + 0x0e)) << 8;
249 break;
250 default:
251 dev_err(dev->dev, "Unknown revision of GCT!\n");
252 vbt->size = 0;
253 }
254}
255
256int mid_chip_setup(struct drm_device *dev)
257{
258 struct drm_psb_private *dev_priv = dev->dev_private;
259 mid_get_fuse_settings(dev);
260 mid_get_vbt_data(dev_priv);
261 mid_get_pci_revID(dev_priv);
262 return 0;
263}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644
index 00000000000..00e7d564b7e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.h
@@ -0,0 +1,21 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20extern int mid_chip_setup(struct drm_device *dev);
21
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 00000000000..c904d73b1de
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,858 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 **************************************************************************/
18#include <drm/drmP.h>
19#include "psb_drv.h"
20#include "psb_reg.h"
21
22/*
23 * Code for the SGX MMU:
24 */
25
26/*
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
29 * SMP system.
30 */
31
32/*
33 * kmap atomic:
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
38 * user.
39 */
40
41/*
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
45 * may fail.
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
48 */
49
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
98}
99
100static inline uint32_t psb_mmu_pd_index(uint32_t offset)
101{
102 return offset >> PSB_PDE_SHIFT;
103}
104
105static inline void psb_clflush(void *addr)
106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108}
109
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
111 void *addr)
112{
113 if (!driver->has_clflush)
114 return;
115
116 mb();
117 psb_clflush(addr);
118 mb();
119}
120
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
122{
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page, KM_USER0);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf, KM_USER0);
136}
137
138static void psb_pages_clflush(struct psb_mmu_driver *driver,
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145
146 for (i = 0; i < num_pages; i++)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{
153 atomic_set(&driver->needs_tlbflush, 0);
154}
155
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{
158 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem);
161}
162
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
164{
165 if (rc_prot)
166 down_write(&driver->sem);
167 if (rc_prot)
168 up_write(&driver->sem);
169}
170
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/
174 psb_pages_clflush(pd->driver, &pd->p, 1);
175 down_write(&pd->driver->sem);
176 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context;
179 up_write(&pd->driver->sem);
180
181}
182
183static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end)
185{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end;
189}
190
191static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
192{
193 uint32_t mask = PSB_PTE_VALID;
194
195 if (type & PSB_MMU_CACHED_MEMORY)
196 mask |= PSB_PTE_CACHED;
197 if (type & PSB_MMU_RO_MEMORY)
198 mask |= PSB_PTE_RO;
199 if (type & PSB_MMU_WO_MEMORY)
200 mask |= PSB_PTE_WO;
201
202 return (pfn << PAGE_SHIFT) | mask;
203}
204
205struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
206 int trap_pagefaults, int invalid_type)
207{
208 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
209 uint32_t *v;
210 int i;
211
212 if (!pd)
213 return NULL;
214
215 pd->p = alloc_page(GFP_DMA32);
216 if (!pd->p)
217 goto out_err1;
218 pd->dummy_pt = alloc_page(GFP_DMA32);
219 if (!pd->dummy_pt)
220 goto out_err2;
221 pd->dummy_page = alloc_page(GFP_DMA32);
222 if (!pd->dummy_page)
223 goto out_err3;
224
225 if (!trap_pagefaults) {
226 pd->invalid_pde =
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
228 invalid_type);
229 pd->invalid_pte =
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else {
233 pd->invalid_pde = 0;
234 pd->invalid_pte = 0;
235 }
236
237 v = kmap(pd->dummy_pt);
238 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
239 v[i] = pd->invalid_pte;
240
241 kunmap(pd->dummy_pt);
242
243 v = kmap(pd->p);
244 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
245 v[i] = pd->invalid_pde;
246
247 kunmap(pd->p);
248
249 clear_page(kmap(pd->dummy_page));
250 kunmap(pd->dummy_page);
251
252 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
253 if (!pd->tables)
254 goto out_err4;
255
256 pd->hw_context = -1;
257 pd->pd_mask = PSB_PTE_VALID;
258 pd->driver = driver;
259
260 return pd;
261
262out_err4:
263 __free_page(pd->dummy_page);
264out_err3:
265 __free_page(pd->dummy_pt);
266out_err2:
267 __free_page(pd->p);
268out_err1:
269 kfree(pd);
270 return NULL;
271}
272
273void psb_mmu_free_pt(struct psb_mmu_pt *pt)
274{
275 __free_page(pt->p);
276 kfree(pt);
277}
278
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{
281 struct psb_mmu_driver *driver = pd->driver;
282 struct psb_mmu_pt *pt;
283 int i;
284
285 down_write(&driver->sem);
286 if (pd->hw_context != -1)
287 psb_mmu_flush_pd_locked(driver, 1);
288
289 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */
291
292 for (i = 0; i < 1024; ++i) {
293 pt = pd->tables[i];
294 if (pt)
295 psb_mmu_free_pt(pt);
296 }
297
298 vfree(pd->tables);
299 __free_page(pd->dummy_page);
300 __free_page(pd->dummy_pt);
301 __free_page(pd->p);
302 kfree(pd);
303 up_write(&driver->sem);
304}
305
306static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
307{
308 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
309 void *v;
310 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
311 uint32_t clflush_count = PAGE_SIZE / clflush_add;
312 spinlock_t *lock = &pd->driver->lock;
313 uint8_t *clf;
314 uint32_t *ptes;
315 int i;
316
317 if (!pt)
318 return NULL;
319
320 pt->p = alloc_page(GFP_DMA32);
321 if (!pt->p) {
322 kfree(pt);
323 return NULL;
324 }
325
326 spin_lock(lock);
327
328 v = kmap_atomic(pt->p, KM_USER0);
329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte;
333
334
335 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb();
337 for (i = 0; i < clflush_count; ++i) {
338 psb_clflush(clf);
339 clf += clflush_add;
340 }
341 mb();
342 }
343
344 kunmap_atomic(v, KM_USER0);
345 spin_unlock(lock);
346
347 pt->count = 0;
348 pt->pd = pd;
349 pt->index = 0;
350
351 return pt;
352}
353
354struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr)
356{
357 uint32_t index = psb_mmu_pd_index(addr);
358 struct psb_mmu_pt *pt;
359 uint32_t *v;
360 spinlock_t *lock = &pd->driver->lock;
361
362 spin_lock(lock);
363 pt = pd->tables[index];
364 while (!pt) {
365 spin_unlock(lock);
366 pt = psb_mmu_alloc_pt(pd);
367 if (!pt)
368 return NULL;
369 spin_lock(lock);
370
371 if (pd->tables[index]) {
372 spin_unlock(lock);
373 psb_mmu_free_pt(pt);
374 spin_lock(lock);
375 pt = pd->tables[index];
376 continue;
377 }
378
379 v = kmap_atomic(pd->p, KM_USER0);
380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0);
384
385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 }
389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0);
391 return pt;
392}
393
394static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
395 unsigned long addr)
396{
397 uint32_t index = psb_mmu_pd_index(addr);
398 struct psb_mmu_pt *pt;
399 spinlock_t *lock = &pd->driver->lock;
400
401 spin_lock(lock);
402 pt = pd->tables[index];
403 if (!pt) {
404 spin_unlock(lock);
405 return NULL;
406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0);
408 return pt;
409}
410
411static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
412{
413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v;
415
416 kunmap_atomic(pt->v, KM_USER0);
417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0);
419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL;
421
422 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver,
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 }
427 kunmap_atomic(pt->v, KM_USER0);
428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt);
430 return;
431 }
432 spin_unlock(&pd->driver->lock);
433}
434
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
436 unsigned long addr, uint32_t pte)
437{
438 pt->v[psb_mmu_pt_index(addr)] = pte;
439}
440
441static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
442 unsigned long addr)
443{
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445}
446
447
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{
452 uint32_t *v;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456
457 down_read(&driver->sem);
458 spin_lock(&driver->lock);
459
460 v = kmap_atomic(pd->p, KM_USER0);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v, KM_USER0);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475
476 up_read(&pd->driver->sem);
477 psb_mmu_flush_pd(pd->driver, 0);
478}
479
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
481{
482 struct psb_mmu_pd *pd;
483
484 /* down_read(&driver->sem); */
485 pd = driver->default_pd;
486 /* up_read(&driver->sem); */
487
488 return pd;
489}
490
491/* Returns the physical address of the PD shared by sgx/msvdx */
492uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
493{
494 struct psb_mmu_pd *pd;
495
496 pd = psb_mmu_get_default_pd(driver);
497 return page_to_pfn(pd->p) << PAGE_SHIFT;
498}
499
500void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
501{
502 psb_mmu_free_pagedir(driver->default_pd);
503 kfree(driver);
504}
505
506struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
507 int trap_pagefaults,
508 int invalid_type,
509 struct drm_psb_private *dev_priv)
510{
511 struct psb_mmu_driver *driver;
512
513 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
514
515 if (!driver)
516 return NULL;
517 driver->dev_priv = dev_priv;
518
519 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
520 invalid_type);
521 if (!driver->default_pd)
522 goto out_err1;
523
524 spin_lock_init(&driver->lock);
525 init_rwsem(&driver->sem);
526 down_write(&driver->sem);
527 driver->register_map = registers;
528 atomic_set(&driver->needs_tlbflush, 1);
529
530 driver->has_clflush = 0;
531
532 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
533 uint32_t tfms, misc, cap0, cap4, clflush_size;
534
535 /*
536 * clflush size is determined at kernel setup for x86_64
537 * but not for i386. We have to do it here.
538 */
539
540 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
541 clflush_size = ((misc >> 8) & 0xff) * 8;
542 driver->has_clflush = 1;
543 driver->clflush_add =
544 PAGE_SIZE * clflush_size / sizeof(uint32_t);
545 driver->clflush_mask = driver->clflush_add - 1;
546 driver->clflush_mask = ~driver->clflush_mask;
547 }
548
549 up_write(&driver->sem);
550 return driver;
551
552out_err1:
553 kfree(driver);
554 return NULL;
555}
556
557static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
558 unsigned long address, uint32_t num_pages,
559 uint32_t desired_tile_stride,
560 uint32_t hw_tile_stride)
561{
562 struct psb_mmu_pt *pt;
563 uint32_t rows = 1;
564 uint32_t i;
565 unsigned long addr;
566 unsigned long end;
567 unsigned long next;
568 unsigned long add;
569 unsigned long row_add;
570 unsigned long clflush_add = pd->driver->clflush_add;
571 unsigned long clflush_mask = pd->driver->clflush_mask;
572
573 if (!pd->driver->has_clflush) {
574 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
575 psb_pages_clflush(pd->driver, &pd->p, num_pages);
576 return;
577 }
578
579 if (hw_tile_stride)
580 rows = num_pages / desired_tile_stride;
581 else
582 desired_tile_stride = num_pages;
583
584 add = desired_tile_stride << PAGE_SHIFT;
585 row_add = hw_tile_stride << PAGE_SHIFT;
586 mb();
587 for (i = 0; i < rows; ++i) {
588
589 addr = address;
590 end = addr + add;
591
592 do {
593 next = psb_pd_addr_end(addr, end);
594 pt = psb_mmu_pt_map_lock(pd, addr);
595 if (!pt)
596 continue;
597 do {
598 psb_clflush(&pt->v
599 [psb_mmu_pt_index(addr)]);
600 } while (addr +=
601 clflush_add,
602 (addr & clflush_mask) < next);
603
604 psb_mmu_pt_unmap_unlock(pt);
605 } while (addr = next, next != end);
606 address += row_add;
607 }
608 mb();
609}
610
611void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
612 unsigned long address, uint32_t num_pages)
613{
614 struct psb_mmu_pt *pt;
615 unsigned long addr;
616 unsigned long end;
617 unsigned long next;
618 unsigned long f_address = address;
619
620 down_read(&pd->driver->sem);
621
622 addr = address;
623 end = addr + (num_pages << PAGE_SHIFT);
624
625 do {
626 next = psb_pd_addr_end(addr, end);
627 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
628 if (!pt)
629 goto out;
630 do {
631 psb_mmu_invalidate_pte(pt, addr);
632 --pt->count;
633 } while (addr += PAGE_SIZE, addr < next);
634 psb_mmu_pt_unmap_unlock(pt);
635
636 } while (addr = next, next != end);
637
638out:
639 if (pd->hw_context != -1)
640 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
641
642 up_read(&pd->driver->sem);
643
644 if (pd->hw_context != -1)
645 psb_mmu_flush(pd->driver, 0);
646
647 return;
648}
649
650void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
651 uint32_t num_pages, uint32_t desired_tile_stride,
652 uint32_t hw_tile_stride)
653{
654 struct psb_mmu_pt *pt;
655 uint32_t rows = 1;
656 uint32_t i;
657 unsigned long addr;
658 unsigned long end;
659 unsigned long next;
660 unsigned long add;
661 unsigned long row_add;
662 unsigned long f_address = address;
663
664 if (hw_tile_stride)
665 rows = num_pages / desired_tile_stride;
666 else
667 desired_tile_stride = num_pages;
668
669 add = desired_tile_stride << PAGE_SHIFT;
670 row_add = hw_tile_stride << PAGE_SHIFT;
671
672 /* down_read(&pd->driver->sem); */
673
674 /* Make sure we only need to flush this processor's cache */
675
676 for (i = 0; i < rows; ++i) {
677
678 addr = address;
679 end = addr + add;
680
681 do {
682 next = psb_pd_addr_end(addr, end);
683 pt = psb_mmu_pt_map_lock(pd, addr);
684 if (!pt)
685 continue;
686 do {
687 psb_mmu_invalidate_pte(pt, addr);
688 --pt->count;
689
690 } while (addr += PAGE_SIZE, addr < next);
691 psb_mmu_pt_unmap_unlock(pt);
692
693 } while (addr = next, next != end);
694 address += row_add;
695 }
696 if (pd->hw_context != -1)
697 psb_mmu_flush_ptes(pd, f_address, num_pages,
698 desired_tile_stride, hw_tile_stride);
699
700 /* up_read(&pd->driver->sem); */
701
702 if (pd->hw_context != -1)
703 psb_mmu_flush(pd->driver, 0);
704}
705
706int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
707 unsigned long address, uint32_t num_pages,
708 int type)
709{
710 struct psb_mmu_pt *pt;
711 uint32_t pte;
712 unsigned long addr;
713 unsigned long end;
714 unsigned long next;
715 unsigned long f_address = address;
716 int ret = 0;
717
718 down_read(&pd->driver->sem);
719
720 addr = address;
721 end = addr + (num_pages << PAGE_SHIFT);
722
723 do {
724 next = psb_pd_addr_end(addr, end);
725 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
726 if (!pt) {
727 ret = -ENOMEM;
728 goto out;
729 }
730 do {
731 pte = psb_mmu_mask_pte(start_pfn++, type);
732 psb_mmu_set_pte(pt, addr, pte);
733 pt->count++;
734 } while (addr += PAGE_SIZE, addr < next);
735 psb_mmu_pt_unmap_unlock(pt);
736
737 } while (addr = next, next != end);
738
739out:
740 if (pd->hw_context != -1)
741 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
742
743 up_read(&pd->driver->sem);
744
745 if (pd->hw_context != -1)
746 psb_mmu_flush(pd->driver, 1);
747
748 return ret;
749}
750
751int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
752 unsigned long address, uint32_t num_pages,
753 uint32_t desired_tile_stride,
754 uint32_t hw_tile_stride, int type)
755{
756 struct psb_mmu_pt *pt;
757 uint32_t rows = 1;
758 uint32_t i;
759 uint32_t pte;
760 unsigned long addr;
761 unsigned long end;
762 unsigned long next;
763 unsigned long add;
764 unsigned long row_add;
765 unsigned long f_address = address;
766 int ret = 0;
767
768 if (hw_tile_stride) {
769 if (num_pages % desired_tile_stride != 0)
770 return -EINVAL;
771 rows = num_pages / desired_tile_stride;
772 } else {
773 desired_tile_stride = num_pages;
774 }
775
776 add = desired_tile_stride << PAGE_SHIFT;
777 row_add = hw_tile_stride << PAGE_SHIFT;
778
779 down_read(&pd->driver->sem);
780
781 for (i = 0; i < rows; ++i) {
782
783 addr = address;
784 end = addr + add;
785
786 do {
787 next = psb_pd_addr_end(addr, end);
788 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
789 if (!pt) {
790 ret = -ENOMEM;
791 goto out;
792 }
793 do {
794 pte =
795 psb_mmu_mask_pte(page_to_pfn(*pages++),
796 type);
797 psb_mmu_set_pte(pt, addr, pte);
798 pt->count++;
799 } while (addr += PAGE_SIZE, addr < next);
800 psb_mmu_pt_unmap_unlock(pt);
801
802 } while (addr = next, next != end);
803
804 address += row_add;
805 }
806out:
807 if (pd->hw_context != -1)
808 psb_mmu_flush_ptes(pd, f_address, num_pages,
809 desired_tile_stride, hw_tile_stride);
810
811 up_read(&pd->driver->sem);
812
813 if (pd->hw_context != -1)
814 psb_mmu_flush(pd->driver, 1);
815
816 return ret;
817}
818
819int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
820 unsigned long *pfn)
821{
822 int ret;
823 struct psb_mmu_pt *pt;
824 uint32_t tmp;
825 spinlock_t *lock = &pd->driver->lock;
826
827 down_read(&pd->driver->sem);
828 pt = psb_mmu_pt_map_lock(pd, virtual);
829 if (!pt) {
830 uint32_t *v;
831
832 spin_lock(lock);
833 v = kmap_atomic(pd->p, KM_USER0);
834 tmp = v[psb_mmu_pd_index(virtual)];
835 kunmap_atomic(v, KM_USER0);
836 spin_unlock(lock);
837
838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
839 !(pd->invalid_pte & PSB_PTE_VALID)) {
840 ret = -EINVAL;
841 goto out;
842 }
843 ret = 0;
844 *pfn = pd->invalid_pte >> PAGE_SHIFT;
845 goto out;
846 }
847 tmp = pt->v[psb_mmu_pt_index(virtual)];
848 if (!(tmp & PSB_PTE_VALID)) {
849 ret = -EINVAL;
850 } else {
851 ret = 0;
852 *pfn = tmp >> PAGE_SHIFT;
853 }
854 psb_mmu_pt_unmap_unlock(pt);
855out:
856 up_read(&pd->driver->sem);
857 return ret;
858}
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644
index 00000000000..2da1f368f14
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -0,0 +1,252 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20/* MID device specific descriptors */
21
22struct oaktrail_vbt {
23 s8 signature[4]; /*4 bytes,"$GCT" */
24 u8 revision;
25 u8 size;
26 u8 checksum;
27 void *oaktrail_gct;
28} __packed;
29
30struct oaktrail_timing_info {
31 u16 pixel_clock;
32 u8 hactive_lo;
33 u8 hblank_lo;
34 u8 hblank_hi:4;
35 u8 hactive_hi:4;
36 u8 vactive_lo;
37 u8 vblank_lo;
38 u8 vblank_hi:4;
39 u8 vactive_hi:4;
40 u8 hsync_offset_lo;
41 u8 hsync_pulse_width_lo;
42 u8 vsync_pulse_width_lo:4;
43 u8 vsync_offset_lo:4;
44 u8 vsync_pulse_width_hi:2;
45 u8 vsync_offset_hi:2;
46 u8 hsync_pulse_width_hi:2;
47 u8 hsync_offset_hi:2;
48 u8 width_mm_lo;
49 u8 height_mm_lo;
50 u8 height_mm_hi:4;
51 u8 width_mm_hi:4;
52 u8 hborder;
53 u8 vborder;
54 u8 unknown0:1;
55 u8 hsync_positive:1;
56 u8 vsync_positive:1;
57 u8 separate_sync:2;
58 u8 stereo:1;
59 u8 unknown6:1;
60 u8 interlaced:1;
61} __packed;
62
63struct gct_r10_timing_info {
64 u16 pixel_clock;
65 u32 hactive_lo:8;
66 u32 hactive_hi:4;
67 u32 hblank_lo:8;
68 u32 hblank_hi:4;
69 u32 hsync_offset_lo:8;
70 u16 hsync_offset_hi:2;
71 u16 hsync_pulse_width_lo:8;
72 u16 hsync_pulse_width_hi:2;
73 u16 hsync_positive:1;
74 u16 rsvd_1:3;
75 u8 vactive_lo:8;
76 u16 vactive_hi:4;
77 u16 vblank_lo:8;
78 u16 vblank_hi:4;
79 u16 vsync_offset_lo:4;
80 u16 vsync_offset_hi:2;
81 u16 vsync_pulse_width_lo:4;
82 u16 vsync_pulse_width_hi:2;
83 u16 vsync_positive:1;
84 u16 rsvd_2:3;
85} __packed;
86
87struct oaktrail_panel_descriptor_v1 {
88 u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
89 /* 0x61190 if MIPI */
90 u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
91 u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
92 u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
93 /* Register 0x61210 */
94 struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
95 u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
96 /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
97 /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
98 u16 Panel_MIPI_Display_Descriptor;
99 /*16 bits, Defined as follows: */
100 /* if MIPI, 0x0000 if LVDS */
101 /* Bit 0, Type, 2 bits, */
102 /* 0: Type-1, */
103 /* 1: Type-2, */
104 /* 2: Type-3, */
105 /* 3: Type-4 */
106 /* Bit 2, Pixel Format, 4 bits */
107 /* Bit0: 16bpp (not supported in LNC), */
108 /* Bit1: 18bpp loosely packed, */
109 /* Bit2: 18bpp packed, */
110 /* Bit3: 24bpp */
111 /* Bit 6, Reserved, 2 bits, 00b */
112 /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
113 /* Bit 14, Reserved, 2 bits, 00b */
114} __packed;
115
116struct oaktrail_panel_descriptor_v2 {
117 u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
118 /* 0x61190 if MIPI */
119 u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
120 u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
121 u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
122 /* Register 0x61210 */
123 struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
124 u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
125 /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
126 u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
127 /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
128 u16 Panel_MIPI_Display_Descriptor;
129 /*16 bits, Defined as follows: */
130 /* if MIPI, 0x0000 if LVDS */
131 /* Bit 0, Type, 2 bits, */
132 /* 0: Type-1, */
133 /* 1: Type-2, */
134 /* 2: Type-3, */
135 /* 3: Type-4 */
136 /* Bit 2, Pixel Format, 4 bits */
137 /* Bit0: 16bpp (not supported in LNC), */
138 /* Bit1: 18bpp loosely packed, */
139 /* Bit2: 18bpp packed, */
140 /* Bit3: 24bpp */
141 /* Bit 6, Reserved, 2 bits, 00b */
142 /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
143 /* Bit 14, Reserved, 2 bits, 00b */
144} __packed;
145
146union oaktrail_panel_rx {
147 struct {
148 u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
149 /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
150 u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
151 /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
152 u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
153 /* 1: Burst and non-burst */
154 /* 2/3: Reserved */
155 u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
156 u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
157 u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
158 u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
159 u16 Rsvd:5;/*5 bits,00000b */
160 } panelrx;
161 u16 panel_receiver;
162} __packed;
163
164struct oaktrail_gct_v1 {
165 union { /*8 bits,Defined as follows: */
166 struct {
167 u8 PanelType:4; /*4 bits, Bit field for panels*/
168 /* 0 - 3: 0 = LVDS, 1 = MIPI*/
169 /*2 bits,Specifies which of the*/
170 u8 BootPanelIndex:2;
171 /* 4 panels to use by default*/
172 u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
173 /* the 4 MIPI DSI receivers to use*/
174 } PD;
175 u8 PanelDescriptor;
176 };
177 struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
178 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
179} __packed;
180
181struct oaktrail_gct_v2 {
182 union { /*8 bits,Defined as follows: */
183 struct {
184 u8 PanelType:4; /*4 bits, Bit field for panels*/
185 /* 0 - 3: 0 = LVDS, 1 = MIPI*/
186 /*2 bits,Specifies which of the*/
187 u8 BootPanelIndex:2;
188 /* 4 panels to use by default*/
189 u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
190 /* the 4 MIPI DSI receivers to use*/
191 } PD;
192 u8 PanelDescriptor;
193 };
194 struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
195 union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
196} __packed;
197
198struct oaktrail_gct_data {
199 u8 bpi; /* boot panel index, number of panel used during boot */
200 u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
201 struct oaktrail_timing_info DTD; /* timing info for the selected panel */
202 u32 Panel_Port_Control;
203 u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
204 u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
205 u32 PP_Cycle_Delay;
206 u16 Panel_Backlight_Inverter_Descriptor;
207 u16 Panel_MIPI_Display_Descriptor;
208} __packed;
209
210#define MODE_SETTING_IN_CRTC 0x1
211#define MODE_SETTING_IN_ENCODER 0x2
212#define MODE_SETTING_ON_GOING 0x3
213#define MODE_SETTING_IN_DSR 0x4
214#define MODE_SETTING_ENCODER_DONE 0x8
215
216#define GCT_R10_HEADER_SIZE 16
217#define GCT_R10_DISPLAY_DESC_SIZE 28
218
219/*
220 * Moorestown HDMI interfaces
221 */
222
223struct oaktrail_hdmi_dev {
224 struct pci_dev *dev;
225 void __iomem *regs;
226 unsigned int mmio, mmio_len;
227 int dpms_mode;
228 struct hdmi_i2c_dev *i2c_dev;
229
230 /* register state */
231 u32 saveDPLL_CTRL;
232 u32 saveDPLL_DIV_CTRL;
233 u32 saveDPLL_ADJUST;
234 u32 saveDPLL_UPDATE;
235 u32 saveDPLL_CLK_ENABLE;
236 u32 savePCH_HTOTAL_B;
237 u32 savePCH_HBLANK_B;
238 u32 savePCH_HSYNC_B;
239 u32 savePCH_VTOTAL_B;
240 u32 savePCH_VBLANK_B;
241 u32 savePCH_VSYNC_B;
242 u32 savePCH_PIPEBCONF;
243 u32 savePCH_PIPEBSRC;
244};
245
246extern void oaktrail_hdmi_setup(struct drm_device *dev);
247extern void oaktrail_hdmi_teardown(struct drm_device *dev);
248extern int oaktrail_hdmi_i2c_init(struct pci_dev *dev);
249extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
250extern void oaktrail_hdmi_save(struct drm_device *dev);
251extern void oaktrail_hdmi_restore(struct drm_device *dev);
252extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644
index 00000000000..9d12a3ee160
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -0,0 +1,604 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#include <linux/i2c.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include "framebuffer.h"
23#include "psb_drv.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_intel_display.h"
27#include "power.h"
28
29struct psb_intel_range_t {
30 int min, max;
31};
32
33struct oaktrail_limit_t {
34 struct psb_intel_range_t dot, m, p1;
35};
36
37struct oaktrail_clock_t {
38 /* derived values */
39 int dot;
40 int m;
41 int p1;
42};
43
44#define MRST_LIMIT_LVDS_100L 0
45#define MRST_LIMIT_LVDS_83 1
46#define MRST_LIMIT_LVDS_100 2
47
48#define MRST_DOT_MIN 19750
49#define MRST_DOT_MAX 120000
50#define MRST_M_MIN_100L 20
51#define MRST_M_MIN_100 10
52#define MRST_M_MIN_83 12
53#define MRST_M_MAX_100L 34
54#define MRST_M_MAX_100 17
55#define MRST_M_MAX_83 20
56#define MRST_P1_MIN 2
57#define MRST_P1_MAX_0 7
58#define MRST_P1_MAX_1 8
59
60static const struct oaktrail_limit_t oaktrail_limits[] = {
61 { /* MRST_LIMIT_LVDS_100L */
62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
63 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
65 },
66 { /* MRST_LIMIT_LVDS_83L */
67 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
68 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
69 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
70 },
71 { /* MRST_LIMIT_LVDS_100 */
72 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
73 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
74 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
75 },
76};
77
78#define MRST_M_MIN 10
79static const u32 oaktrail_m_converts[] = {
80 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
81 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
82 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
83};
84
85static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
86{
87 const struct oaktrail_limit_t *limit = NULL;
88 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private;
90
91 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
92 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) {
94 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
96 break;
97 case 166:
98 limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
99 break;
100 case 200:
101 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
102 break;
103 }
104 } else {
105 limit = NULL;
106 dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
107 }
108
109 return limit;
110}
111
112/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
113static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
114{
115 clock->dot = (refclk * clock->m) / (14 * clock->p1);
116}
117
118void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
119{
120 pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
121 prefix, clock->dot, clock->m, clock->p1);
122}
123
124/**
125 * Returns a set of divisors for the desired target clock with the given refclk,
126 * or FALSE. Divisor values are the actual divisors for
127 */
128static bool
129mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
130 struct oaktrail_clock_t *best_clock)
131{
132 struct oaktrail_clock_t clock;
133 const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
134 int err = target;
135
136 memset(best_clock, 0, sizeof(*best_clock));
137
138 for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
139 for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
140 clock.p1++) {
141 int this_err;
142
143 oaktrail_clock(refclk, &clock);
144
145 this_err = abs(clock.dot - target);
146 if (this_err < err) {
147 *best_clock = clock;
148 err = this_err;
149 }
150 }
151 }
152 dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
153 return err != target;
154}
155
156/**
157 * Sets the power management mode of the pipe and plane.
158 *
159 * This code should probably grow support for turning the cursor off and back
160 * on appropriately at the same time as we're turning the pipe off/on.
161 */
162static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
163{
164 struct drm_device *dev = crtc->dev;
165 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
166 int pipe = psb_intel_crtc->pipe;
167 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
168 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
169 int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
170 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
171 u32 temp;
172 bool enabled;
173
174 if (!gma_power_begin(dev, true))
175 return;
176
177 /* XXX: When our outputs are all unaware of DPMS modes other than off
178 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
179 */
180 switch (mode) {
181 case DRM_MODE_DPMS_ON:
182 case DRM_MODE_DPMS_STANDBY:
183 case DRM_MODE_DPMS_SUSPEND:
184 /* Enable the DPLL */
185 temp = REG_READ(dpll_reg);
186 if ((temp & DPLL_VCO_ENABLE) == 0) {
187 REG_WRITE(dpll_reg, temp);
188 REG_READ(dpll_reg);
189 /* Wait for the clocks to stabilize. */
190 udelay(150);
191 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
192 REG_READ(dpll_reg);
193 /* Wait for the clocks to stabilize. */
194 udelay(150);
195 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
196 REG_READ(dpll_reg);
197 /* Wait for the clocks to stabilize. */
198 udelay(150);
199 }
200 /* Enable the pipe */
201 temp = REG_READ(pipeconf_reg);
202 if ((temp & PIPEACONF_ENABLE) == 0)
203 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
204 /* Enable the plane */
205 temp = REG_READ(dspcntr_reg);
206 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
207 REG_WRITE(dspcntr_reg,
208 temp | DISPLAY_PLANE_ENABLE);
209 /* Flush the plane changes */
210 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
211 }
212
213 psb_intel_crtc_load_lut(crtc);
214
215 /* Give the overlay scaler a chance to enable
216 if it's on this pipe */
217 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
218 break;
219 case DRM_MODE_DPMS_OFF:
220 /* Give the overlay scaler a chance to disable
221 * if it's on this pipe */
222 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
223
224 /* Disable the VGA plane that we never use */
225 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
226 /* Disable display plane */
227 temp = REG_READ(dspcntr_reg);
228 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
229 REG_WRITE(dspcntr_reg,
230 temp & ~DISPLAY_PLANE_ENABLE);
231 /* Flush the plane changes */
232 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
233 REG_READ(dspbase_reg);
234 }
235
236 /* Next, disable display pipes */
237 temp = REG_READ(pipeconf_reg);
238 if ((temp & PIPEACONF_ENABLE) != 0) {
239 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
240 REG_READ(pipeconf_reg);
241 }
242 /* Wait for for the pipe disable to take effect. */
243 psb_intel_wait_for_vblank(dev);
244
245 temp = REG_READ(dpll_reg);
246 if ((temp & DPLL_VCO_ENABLE) != 0) {
247 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
248 REG_READ(dpll_reg);
249 }
250
251 /* Wait for the clocks to turn off. */
252 udelay(150);
253 break;
254 }
255
256 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
257
258 /*Set FIFO Watermarks*/
259 REG_WRITE(DSPARB, 0x3FFF);
260 REG_WRITE(DSPFW1, 0x3F88080A);
261 REG_WRITE(DSPFW2, 0x0b060808);
262 REG_WRITE(DSPFW3, 0x0);
263 REG_WRITE(DSPFW4, 0x08030404);
264 REG_WRITE(DSPFW5, 0x04040404);
265 REG_WRITE(DSPFW6, 0x78);
266 REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
267 /* Must write Bit 14 of the Chicken Bit Register */
268
269 gma_power_end(dev);
270}
271
272/**
273 * Return the pipe currently connected to the panel fitter,
274 * or -1 if the panel fitter is not present or not in use
275 */
276static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
277{
278 u32 pfit_control;
279
280 pfit_control = REG_READ(PFIT_CONTROL);
281
282 /* See if the panel fitter is in use */
283 if ((pfit_control & PFIT_ENABLE) == 0)
284 return -1;
285 return (pfit_control >> 29) & 3;
286}
287
288static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
289 struct drm_display_mode *mode,
290 struct drm_display_mode *adjusted_mode,
291 int x, int y,
292 struct drm_framebuffer *old_fb)
293{
294 struct drm_device *dev = crtc->dev;
295 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
296 struct drm_psb_private *dev_priv = dev->dev_private;
297 int pipe = psb_intel_crtc->pipe;
298 int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
299 int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
300 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
301 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
302 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
303 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
304 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
305 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
306 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
307 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
308 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
309 int refclk = 0;
310 struct oaktrail_clock_t clock;
311 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
312 bool ok, is_sdvo = false;
313 bool is_crt = false, is_lvds = false, is_tv = false;
314 bool is_mipi = false;
315 struct drm_mode_config *mode_config = &dev->mode_config;
316 struct psb_intel_encoder *psb_intel_encoder = NULL;
317 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
318 struct drm_connector *connector;
319
320 if (!gma_power_begin(dev, true))
321 return 0;
322
323 memcpy(&psb_intel_crtc->saved_mode,
324 mode,
325 sizeof(struct drm_display_mode));
326 memcpy(&psb_intel_crtc->saved_adjusted_mode,
327 adjusted_mode,
328 sizeof(struct drm_display_mode));
329
330 list_for_each_entry(connector, &mode_config->connector_list, head) {
331 if (!connector->encoder || connector->encoder->crtc != crtc)
332 continue;
333
334 psb_intel_encoder = psb_intel_attached_encoder(connector);
335
336 switch (psb_intel_encoder->type) {
337 case INTEL_OUTPUT_LVDS:
338 is_lvds = true;
339 break;
340 case INTEL_OUTPUT_SDVO:
341 is_sdvo = true;
342 break;
343 case INTEL_OUTPUT_TVOUT:
344 is_tv = true;
345 break;
346 case INTEL_OUTPUT_ANALOG:
347 is_crt = true;
348 break;
349 case INTEL_OUTPUT_MIPI:
350 is_mipi = true;
351 break;
352 }
353 }
354
355 /* Disable the VGA plane that we never use */
356 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
357
358 /* Disable the panel fitter if it was on our pipe */
359 if (oaktrail_panel_fitter_pipe(dev) == pipe)
360 REG_WRITE(PFIT_CONTROL, 0);
361
362 REG_WRITE(pipesrc_reg,
363 ((mode->crtc_hdisplay - 1) << 16) |
364 (mode->crtc_vdisplay - 1));
365
366 if (psb_intel_encoder)
367 drm_connector_property_get_value(connector,
368 dev->mode_config.scaling_mode_property, &scalingType);
369
370 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
371 /* Moorestown doesn't have register support for centering so
372 * we need to mess with the h/vblank and h/vsync start and
373 * ends to get centering */
374 int offsetX = 0, offsetY = 0;
375
376 offsetX = (adjusted_mode->crtc_hdisplay -
377 mode->crtc_hdisplay) / 2;
378 offsetY = (adjusted_mode->crtc_vdisplay -
379 mode->crtc_vdisplay) / 2;
380
381 REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
382 ((adjusted_mode->crtc_htotal - 1) << 16));
383 REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
384 ((adjusted_mode->crtc_vtotal - 1) << 16));
385 REG_WRITE(hblank_reg,
386 (adjusted_mode->crtc_hblank_start - offsetX - 1) |
387 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
388 REG_WRITE(hsync_reg,
389 (adjusted_mode->crtc_hsync_start - offsetX - 1) |
390 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
391 REG_WRITE(vblank_reg,
392 (adjusted_mode->crtc_vblank_start - offsetY - 1) |
393 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
394 REG_WRITE(vsync_reg,
395 (adjusted_mode->crtc_vsync_start - offsetY - 1) |
396 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
397 } else {
398 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
399 ((adjusted_mode->crtc_htotal - 1) << 16));
400 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
401 ((adjusted_mode->crtc_vtotal - 1) << 16));
402 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
403 ((adjusted_mode->crtc_hblank_end - 1) << 16));
404 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
405 ((adjusted_mode->crtc_hsync_end - 1) << 16));
406 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
407 ((adjusted_mode->crtc_vblank_end - 1) << 16));
408 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
409 ((adjusted_mode->crtc_vsync_end - 1) << 16));
410 }
411
412 /* Flush the plane changes */
413 {
414 struct drm_crtc_helper_funcs *crtc_funcs =
415 crtc->helper_private;
416 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
417 }
418
419 /* setup pipeconf */
420 pipeconf = REG_READ(pipeconf_reg);
421
422 /* Set up the display plane register */
423 dspcntr = REG_READ(dspcntr_reg);
424 dspcntr |= DISPPLANE_GAMMA_ENABLE;
425
426 if (pipe == 0)
427 dspcntr |= DISPPLANE_SEL_PIPE_A;
428 else
429 dspcntr |= DISPPLANE_SEL_PIPE_B;
430
431 dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
432 dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
433
434 if (is_mipi)
435 goto oaktrail_crtc_mode_set_exit;
436
437 refclk = dev_priv->core_freq * 1000;
438
439 dpll = 0; /*BIT16 = 0 for 100MHz reference */
440
441 ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
442
443 if (!ok) {
444 dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
445 } else {
446 dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
447 "m = %x, p1 = %x.\n", clock.dot, clock.m,
448 clock.p1);
449 }
450
451 fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
452
453 dpll |= DPLL_VGA_MODE_DIS;
454
455
456 dpll |= DPLL_VCO_ENABLE;
457
458 if (is_lvds)
459 dpll |= DPLLA_MODE_LVDS;
460 else
461 dpll |= DPLLB_MODE_DAC_SERIAL;
462
463 if (is_sdvo) {
464 int sdvo_pixel_multiply =
465 adjusted_mode->clock / mode->clock;
466
467 dpll |= DPLL_DVO_HIGH_SPEED;
468 dpll |=
469 (sdvo_pixel_multiply -
470 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
471 }
472
473
474 /* compute bitmask from p1 value */
475 dpll |= (1 << (clock.p1 - 2)) << 17;
476
477 dpll |= DPLL_VCO_ENABLE;
478
479 mrstPrintPll("chosen", &clock);
480
481 if (dpll & DPLL_VCO_ENABLE) {
482 REG_WRITE(fp_reg, fp);
483 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
484 REG_READ(dpll_reg);
485 /* Check the DPLLA lock bit PIPEACONF[29] */
486 udelay(150);
487 }
488
489 REG_WRITE(fp_reg, fp);
490 REG_WRITE(dpll_reg, dpll);
491 REG_READ(dpll_reg);
492 /* Wait for the clocks to stabilize. */
493 udelay(150);
494
495 /* write it again -- the BIOS does, after all */
496 REG_WRITE(dpll_reg, dpll);
497 REG_READ(dpll_reg);
498 /* Wait for the clocks to stabilize. */
499 udelay(150);
500
501 REG_WRITE(pipeconf_reg, pipeconf);
502 REG_READ(pipeconf_reg);
503 psb_intel_wait_for_vblank(dev);
504
505 REG_WRITE(dspcntr_reg, dspcntr);
506 psb_intel_wait_for_vblank(dev);
507
508oaktrail_crtc_mode_set_exit:
509 gma_power_end(dev);
510 return 0;
511}
512
513static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
514 struct drm_display_mode *mode,
515 struct drm_display_mode *adjusted_mode)
516{
517 return true;
518}
519
520int oaktrail_pipe_set_base(struct drm_crtc *crtc,
521 int x, int y, struct drm_framebuffer *old_fb)
522{
523 struct drm_device *dev = crtc->dev;
524 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
525 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
526 int pipe = psb_intel_crtc->pipe;
527 unsigned long start, offset;
528
529 int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
530 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
531 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
532 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
533 u32 dspcntr;
534 int ret = 0;
535
536 /* no fb bound */
537 if (!crtc->fb) {
538 dev_dbg(dev->dev, "No FB bound\n");
539 return 0;
540 }
541
542 if (!gma_power_begin(dev, true))
543 return 0;
544
545 start = psbfb->gtt->offset;
546 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
547
548 REG_WRITE(dspstride, crtc->fb->pitches[0]);
549
550 dspcntr = REG_READ(dspcntr_reg);
551 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
552
553 switch (crtc->fb->bits_per_pixel) {
554 case 8:
555 dspcntr |= DISPPLANE_8BPP;
556 break;
557 case 16:
558 if (crtc->fb->depth == 15)
559 dspcntr |= DISPPLANE_15_16BPP;
560 else
561 dspcntr |= DISPPLANE_16BPP;
562 break;
563 case 24:
564 case 32:
565 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
566 break;
567 default:
568 dev_err(dev->dev, "Unknown color depth\n");
569 ret = -EINVAL;
570 goto pipe_set_base_exit;
571 }
572 REG_WRITE(dspcntr_reg, dspcntr);
573
574 REG_WRITE(dspbase, offset);
575 REG_READ(dspbase);
576 REG_WRITE(dspsurf, start);
577 REG_READ(dspsurf);
578
579pipe_set_base_exit:
580 gma_power_end(dev);
581 return ret;
582}
583
584static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
585{
586 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
587 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
588}
589
590static void oaktrail_crtc_commit(struct drm_crtc *crtc)
591{
592 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
593 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
594}
595
596const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
597 .dpms = oaktrail_crtc_dpms,
598 .mode_fixup = oaktrail_crtc_mode_fixup,
599 .mode_set = oaktrail_crtc_mode_set,
600 .mode_set_base = oaktrail_pipe_set_base,
601 .prepare = oaktrail_crtc_prepare,
602 .commit = oaktrail_crtc_commit,
603};
604
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644
index 00000000000..63aea2f010d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -0,0 +1,512 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <linux/module.h>
22#include <linux/dmi.h>
23#include <drm/drmP.h>
24#include <drm/drm.h>
25#include "gma_drm.h"
26#include "psb_drv.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include <asm/mrst.h>
30#include <asm/intel_scu_ipc.h>
31#include "mid_bios.h"
32#include "intel_bios.h"
33
34static int oaktrail_output_init(struct drm_device *dev)
35{
36 struct drm_psb_private *dev_priv = dev->dev_private;
37 if (dev_priv->iLVDS_enable)
38 oaktrail_lvds_init(dev, &dev_priv->mode_dev);
39 else
40 dev_err(dev->dev, "DSI is not supported\n");
41 if (dev_priv->hdmi_priv)
42 oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
43 return 0;
44}
45
46/*
47 * Provide the low level interfaces for the Moorestown backlight
48 */
49
50#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
51
52#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
53#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
54#define BLC_PWM_FREQ_CALC_CONSTANT 32
55#define MHz 1000000
56#define BLC_ADJUSTMENT_MAX 100
57
58static struct backlight_device *oaktrail_backlight_device;
59static int oaktrail_brightness;
60
61static int oaktrail_set_brightness(struct backlight_device *bd)
62{
63 struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
64 struct drm_psb_private *dev_priv = dev->dev_private;
65 int level = bd->props.brightness;
66 u32 blc_pwm_ctl;
67 u32 max_pwm_blc;
68
69 /* Percentage 1-100% being valid */
70 if (level < 1)
71 level = 1;
72
73 if (gma_power_begin(dev, 0)) {
74 /* Calculate and set the brightness value */
75 max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
76 blc_pwm_ctl = level * max_pwm_blc / 100;
77
78 /* Adjust the backlight level with the percent in
79 * dev_priv->blc_adj1;
80 */
81 blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
82 blc_pwm_ctl = blc_pwm_ctl / 100;
83
84 /* Adjust the backlight level with the percent in
85 * dev_priv->blc_adj2;
86 */
87 blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
88 blc_pwm_ctl = blc_pwm_ctl / 100;
89
90 /* force PWM bit on */
91 REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
92 REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
93 gma_power_end(dev);
94 }
95 oaktrail_brightness = level;
96 return 0;
97}
98
99static int oaktrail_get_brightness(struct backlight_device *bd)
100{
101 /* return locally cached var instead of HW read (due to DPST etc.) */
102 /* FIXME: ideally return actual value in case firmware fiddled with
103 it */
104 return oaktrail_brightness;
105}
106
107static int device_backlight_init(struct drm_device *dev)
108{
109 struct drm_psb_private *dev_priv = dev->dev_private;
110 unsigned long core_clock;
111 u16 bl_max_freq;
112 uint32_t value;
113 uint32_t blc_pwm_precision_factor;
114
115 dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
116 dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
117 bl_max_freq = 256;
118 /* this needs to be set elsewhere */
119 blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
120
121 core_clock = dev_priv->core_freq;
122
123 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
124 value *= blc_pwm_precision_factor;
125 value /= bl_max_freq;
126 value /= blc_pwm_precision_factor;
127
128 if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
129 return -ERANGE;
130
131 if (gma_power_begin(dev, false)) {
132 REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
133 REG_WRITE(BLC_PWM_CTL, value | (value << 16));
134 gma_power_end(dev);
135 }
136 return 0;
137}
138
139static const struct backlight_ops oaktrail_ops = {
140 .get_brightness = oaktrail_get_brightness,
141 .update_status = oaktrail_set_brightness,
142};
143
144int oaktrail_backlight_init(struct drm_device *dev)
145{
146 struct drm_psb_private *dev_priv = dev->dev_private;
147 int ret;
148 struct backlight_properties props;
149
150 memset(&props, 0, sizeof(struct backlight_properties));
151 props.max_brightness = 100;
152 props.type = BACKLIGHT_PLATFORM;
153
154 oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
155 NULL, (void *)dev, &oaktrail_ops, &props);
156
157 if (IS_ERR(oaktrail_backlight_device))
158 return PTR_ERR(oaktrail_backlight_device);
159
160 ret = device_backlight_init(dev);
161 if (ret < 0) {
162 backlight_device_unregister(oaktrail_backlight_device);
163 return ret;
164 }
165 oaktrail_backlight_device->props.brightness = 100;
166 oaktrail_backlight_device->props.max_brightness = 100;
167 backlight_update_status(oaktrail_backlight_device);
168 dev_priv->backlight_device = oaktrail_backlight_device;
169 return 0;
170}
171
172#endif
173
174/*
175 * Provide the Moorestown specific chip logic and low level methods
176 * for power management
177 */
178
179static void oaktrail_init_pm(struct drm_device *dev)
180{
181}
182
183/**
184 * oaktrail_save_display_registers - save registers lost on suspend
185 * @dev: our DRM device
186 *
187 * Save the state we need in order to be able to restore the interface
188 * upon resume from suspend
189 */
190static int oaktrail_save_display_registers(struct drm_device *dev)
191{
192 struct drm_psb_private *dev_priv = dev->dev_private;
193 int i;
194 u32 pp_stat;
195
196 /* Display arbitration control + watermarks */
197 dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
198 dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
199 dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
200 dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
201 dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
202 dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
203 dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
204 dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
205
206 /* Pipe & plane A info */
207 dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
208 dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
209 dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
210 dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
211 dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
212 dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
213 dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
214 dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
215 dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
216 dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
217 dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
218 dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
219 dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
220 dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
221 dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
222 dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
223 dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
224 dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
225
226 /* Save cursor regs */
227 dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
228 dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
229 dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
230
231 /* Save palette (gamma) */
232 for (i = 0; i < 256; i++)
233 dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
234
235 if (dev_priv->hdmi_priv)
236 oaktrail_hdmi_save(dev);
237
238 /* Save performance state */
239 dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
240
241 /* LVDS state */
242 dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
243 dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
244 dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
245 dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
246 dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
247 dev_priv->saveLVDS = PSB_RVDC32(LVDS);
248 dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
249 dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
250 dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
251 dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
252
253 /* HW overlay */
254 dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
255 dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
256 dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
257 dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
258 dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
259 dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
260 dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
261
262 /* DPST registers */
263 dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
264 PSB_RVDC32(HISTOGRAM_INT_CONTROL);
265 dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
266 PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
267 dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
268
269 if (dev_priv->iLVDS_enable) {
270 /* Shut down the panel */
271 PSB_WVDC32(0, PP_CONTROL);
272
273 do {
274 pp_stat = PSB_RVDC32(PP_STATUS);
275 } while (pp_stat & 0x80000000);
276
277 /* Turn off the plane */
278 PSB_WVDC32(0x58000000, DSPACNTR);
279 /* Trigger the plane disable */
280 PSB_WVDC32(0, DSPASURF);
281
282 /* Wait ~4 ticks */
283 msleep(4);
284
285 /* Turn off pipe */
286 PSB_WVDC32(0x0, PIPEACONF);
287 /* Wait ~8 ticks */
288 msleep(8);
289
290 /* Turn off PLLs */
291 PSB_WVDC32(0, MRST_DPLL_A);
292 }
293 return 0;
294}
295
296/**
297 * oaktrail_restore_display_registers - restore lost register state
298 * @dev: our DRM device
299 *
300 * Restore register state that was lost during suspend and resume.
301 */
302static int oaktrail_restore_display_registers(struct drm_device *dev)
303{
304 struct drm_psb_private *dev_priv = dev->dev_private;
305 u32 pp_stat;
306 int i;
307
308 /* Display arbitration + watermarks */
309 PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
310 PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
311 PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
312 PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
313 PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
314 PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
315 PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
316 PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
317
318 /* Make sure VGA plane is off. it initializes to on after reset!*/
319 PSB_WVDC32(0x80000000, VGACNTRL);
320
321 /* set the plls */
322 PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
323 PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
324
325 /* Actually enable it */
326 PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
327 DRM_UDELAY(150);
328
329 /* Restore mode */
330 PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
331 PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
332 PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
333 PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
334 PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
335 PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
336 PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
337 PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
338
339 /* Restore performance mode*/
340 PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
341
342 /* Enable the pipe*/
343 if (dev_priv->iLVDS_enable)
344 PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
345
346 /* Set up the plane*/
347 PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
348 PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
349 PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
350
351 /* Enable the plane */
352 PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
353 PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
354
355 /* Enable Cursor A */
356 PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
357 PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
358 PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
359
360 /* Restore palette (gamma) */
361 for (i = 0; i < 256; i++)
362 PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
363
364 if (dev_priv->hdmi_priv)
365 oaktrail_hdmi_restore(dev);
366
367 if (dev_priv->iLVDS_enable) {
368 PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
369 PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
370 PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
371 PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
372 PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
373 PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
374 PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
375 PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
376 PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
377 PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
378 }
379
380 /* Wait for cycle delay */
381 do {
382 pp_stat = PSB_RVDC32(PP_STATUS);
383 } while (pp_stat & 0x08000000);
384
385 /* Wait for panel power up */
386 do {
387 pp_stat = PSB_RVDC32(PP_STATUS);
388 } while (pp_stat & 0x10000000);
389
390 /* Restore HW overlay */
391 PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
392 PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
393 PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
394 PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
395 PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
396 PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
397 PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
398
399 /* DPST registers */
400 PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
401 HISTOGRAM_INT_CONTROL);
402 PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
403 HISTOGRAM_LOGIC_CONTROL);
404 PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
405
406 return 0;
407}
408
409/**
410 * oaktrail_power_down - power down the display island
411 * @dev: our DRM device
412 *
413 * Power down the display interface of our device
414 */
415static int oaktrail_power_down(struct drm_device *dev)
416{
417 struct drm_psb_private *dev_priv = dev->dev_private;
418 u32 pwr_mask ;
419 u32 pwr_sts;
420
421 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
422 outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
423
424 while (true) {
425 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
426 if ((pwr_sts & pwr_mask) == pwr_mask)
427 break;
428 else
429 udelay(10);
430 }
431 return 0;
432}
433
434/*
435 * oaktrail_power_up
436 *
437 * Restore power to the specified island(s) (powergating)
438 */
439static int oaktrail_power_up(struct drm_device *dev)
440{
441 struct drm_psb_private *dev_priv = dev->dev_private;
442 u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
443 u32 pwr_sts, pwr_cnt;
444
445 pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
446 pwr_cnt &= ~pwr_mask;
447 outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
448
449 while (true) {
450 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
451 if ((pwr_sts & pwr_mask) == 0)
452 break;
453 else
454 udelay(10);
455 }
456 return 0;
457}
458
459
460static int oaktrail_chip_setup(struct drm_device *dev)
461{
462 struct drm_psb_private *dev_priv = dev->dev_private;
463 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
464 int ret;
465
466 ret = mid_chip_setup(dev);
467 if (ret < 0)
468 return ret;
469 if (vbt->size == 0) {
470 /* Now pull the BIOS data */
471 gma_intel_opregion_init(dev);
472 psb_intel_init_bios(dev);
473 }
474 return 0;
475}
476
477static void oaktrail_teardown(struct drm_device *dev)
478{
479 struct drm_psb_private *dev_priv = dev->dev_private;
480 struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
481
482 oaktrail_hdmi_teardown(dev);
483 if (vbt->size == 0)
484 psb_intel_destroy_bios(dev);
485}
486
487const struct psb_ops oaktrail_chip_ops = {
488 .name = "Oaktrail",
489 .accel_2d = 1,
490 .pipes = 2,
491 .crtcs = 2,
492 .sgx_offset = MRST_SGX_OFFSET,
493
494 .chip_setup = oaktrail_chip_setup,
495 .chip_teardown = oaktrail_teardown,
496 .crtc_helper = &oaktrail_helper_funcs,
497 .crtc_funcs = &psb_intel_crtc_funcs,
498
499 .output_init = oaktrail_output_init,
500
501#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
502 .backlight_init = oaktrail_backlight_init,
503#endif
504
505 .init_pm = oaktrail_init_pm,
506 .save_regs = oaktrail_save_display_registers,
507 .restore_regs = oaktrail_restore_display_registers,
508 .power_down = oaktrail_power_down,
509 .power_up = oaktrail_power_up,
510
511 .i2c_bus = 1,
512};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644
index 00000000000..36878a60080
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -0,0 +1,859 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Li Peng <peng.li@intel.com>
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "psb_drv.h"
32
33#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
34#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
35
36#define HDMI_HCR 0x1000
37#define HCR_ENABLE_HDCP (1 << 5)
38#define HCR_ENABLE_AUDIO (1 << 2)
39#define HCR_ENABLE_PIXEL (1 << 1)
40#define HCR_ENABLE_TMDS (1 << 0)
41
42#define HDMI_HICR 0x1004
43#define HDMI_HSR 0x1008
44#define HDMI_HISR 0x100C
45#define HDMI_DETECT_HDP (1 << 0)
46
47#define HDMI_VIDEO_REG 0x3000
48#define HDMI_UNIT_EN (1 << 7)
49#define HDMI_MODE_OUTPUT (1 << 0)
50#define HDMI_HBLANK_A 0x3100
51
52#define HDMI_AUDIO_CTRL 0x4000
53#define HDMI_ENABLE_AUDIO (1 << 0)
54
55#define PCH_HTOTAL_B 0x3100
56#define PCH_HBLANK_B 0x3104
57#define PCH_HSYNC_B 0x3108
58#define PCH_VTOTAL_B 0x310C
59#define PCH_VBLANK_B 0x3110
60#define PCH_VSYNC_B 0x3114
61#define PCH_PIPEBSRC 0x311C
62
63#define PCH_PIPEB_DSL 0x3800
64#define PCH_PIPEB_SLC 0x3804
65#define PCH_PIPEBCONF 0x3808
66#define PCH_PIPEBSTAT 0x3824
67
68#define CDVO_DFT 0x5000
69#define CDVO_SLEWRATE 0x5004
70#define CDVO_STRENGTH 0x5008
71#define CDVO_RCOMP 0x500C
72
73#define DPLL_CTRL 0x6000
74#define DPLL_PDIV_SHIFT 16
75#define DPLL_PDIV_MASK (0xf << 16)
76#define DPLL_PWRDN (1 << 4)
77#define DPLL_RESET (1 << 3)
78#define DPLL_FASTEN (1 << 2)
79#define DPLL_ENSTAT (1 << 1)
80#define DPLL_DITHEN (1 << 0)
81
82#define DPLL_DIV_CTRL 0x6004
83#define DPLL_CLKF_MASK 0xffffffc0
84#define DPLL_CLKR_MASK (0x3f)
85
86#define DPLL_CLK_ENABLE 0x6008
87#define DPLL_EN_DISP (1 << 31)
88#define DPLL_SEL_HDMI (1 << 8)
89#define DPLL_EN_HDMI (1 << 1)
90#define DPLL_EN_VGA (1 << 0)
91
92#define DPLL_ADJUST 0x600C
93#define DPLL_STATUS 0x6010
94#define DPLL_UPDATE 0x6014
95#define DPLL_DFT 0x6020
96
97struct intel_range {
98 int min, max;
99};
100
101struct oaktrail_hdmi_limit {
102 struct intel_range vco, np, nr, nf;
103};
104
105struct oaktrail_hdmi_clock {
106 int np;
107 int nr;
108 int nf;
109 int dot;
110};
111
112#define VCO_MIN 320000
113#define VCO_MAX 1650000
114#define NP_MIN 1
115#define NP_MAX 15
116#define NR_MIN 1
117#define NR_MAX 64
118#define NF_MIN 2
119#define NF_MAX 4095
120
121static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
122 .vco = { .min = VCO_MIN, .max = VCO_MAX },
123 .np = { .min = NP_MIN, .max = NP_MAX },
124 .nr = { .min = NR_MIN, .max = NR_MAX },
125 .nf = { .min = NF_MIN, .max = NF_MAX },
126};
127
128static void wait_for_vblank(struct drm_device *dev)
129{
130 /* FIXME: Can we do this as a sleep ? */
131 /* Wait for 20ms, i.e. one cycle at 50hz. */
132 mdelay(20);
133}
134
135static void scu_busy_loop(void *scu_base)
136{
137 u32 status = 0;
138 u32 loop_count = 0;
139
140 status = readl(scu_base + 0x04);
141 while (status & 1) {
142 udelay(1); /* scu processing time is in few u secods */
143 status = readl(scu_base + 0x04);
144 loop_count++;
145 /* break if scu doesn't reset busy bit after huge retry */
146 if (loop_count > 1000) {
147 DRM_DEBUG_KMS("SCU IPC timed out");
148 return;
149 }
150 }
151}
152
153static void oaktrail_hdmi_reset(struct drm_device *dev)
154{
155 void *base;
156 /* FIXME: at least make these defines */
157 unsigned int scu_ipc_mmio = 0xff11c000;
158 int scu_len = 1024;
159
160 base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
161 if (base == NULL) {
162 DRM_ERROR("failed to map SCU mmio\n");
163 return;
164 }
165
166 /* scu ipc: assert hdmi controller reset */
167 writel(0xff11d118, base + 0x0c);
168 writel(0x7fffffdf, base + 0x80);
169 writel(0x42005, base + 0x0);
170 scu_busy_loop(base);
171
172 /* scu ipc: de-assert hdmi controller reset */
173 writel(0xff11d118, base + 0x0c);
174 writel(0x7fffffff, base + 0x80);
175 writel(0x42005, base + 0x0);
176 scu_busy_loop(base);
177
178 iounmap(base);
179}
180
181static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
182{
183 struct drm_psb_private *dev_priv = dev->dev_private;
184 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
185
186 HDMI_WRITE(HDMI_HCR, 0x67);
187 HDMI_READ(HDMI_HCR);
188
189 HDMI_WRITE(0x51a8, 0x10);
190 HDMI_READ(0x51a8);
191
192 HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
193 HDMI_READ(HDMI_AUDIO_CTRL);
194}
195
196static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
197{
198 struct drm_psb_private *dev_priv = dev->dev_private;
199 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
200
201 HDMI_WRITE(0x51a8, 0x0);
202 HDMI_READ(0x51a8);
203
204 HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
205 HDMI_READ(HDMI_AUDIO_CTRL);
206
207 HDMI_WRITE(HDMI_HCR, 0x47);
208 HDMI_READ(HDMI_HCR);
209}
210
211void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
212{
213 struct drm_device *dev = crtc->dev;
214 u32 temp;
215
216 switch (mode) {
217 case DRM_MODE_DPMS_OFF:
218 /* Disable VGACNTRL */
219 REG_WRITE(VGACNTRL, 0x80000000);
220
221 /* Disable plane */
222 temp = REG_READ(DSPBCNTR);
223 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
224 REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
225 REG_READ(DSPBCNTR);
226 /* Flush the plane changes */
227 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
228 REG_READ(DSPBSURF);
229 }
230
231 /* Disable pipe B */
232 temp = REG_READ(PIPEBCONF);
233 if ((temp & PIPEACONF_ENABLE) != 0) {
234 REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
235 REG_READ(PIPEBCONF);
236 }
237
238 /* Disable LNW Pipes, etc */
239 temp = REG_READ(PCH_PIPEBCONF);
240 if ((temp & PIPEACONF_ENABLE) != 0) {
241 REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
242 REG_READ(PCH_PIPEBCONF);
243 }
244 /* wait for pipe off */
245 udelay(150);
246 /* Disable dpll */
247 temp = REG_READ(DPLL_CTRL);
248 if ((temp & DPLL_PWRDN) == 0) {
249 REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
250 REG_WRITE(DPLL_STATUS, 0x1);
251 }
252 /* wait for dpll off */
253 udelay(150);
254 break;
255 case DRM_MODE_DPMS_ON:
256 case DRM_MODE_DPMS_STANDBY:
257 case DRM_MODE_DPMS_SUSPEND:
258 /* Enable dpll */
259 temp = REG_READ(DPLL_CTRL);
260 if ((temp & DPLL_PWRDN) != 0) {
261 REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
262 temp = REG_READ(DPLL_CLK_ENABLE);
263 REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
264 REG_READ(DPLL_CLK_ENABLE);
265 }
266 /* wait for dpll warm up */
267 udelay(150);
268
269 /* Enable pipe B */
270 temp = REG_READ(PIPEBCONF);
271 if ((temp & PIPEACONF_ENABLE) == 0) {
272 REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
273 REG_READ(PIPEBCONF);
274 }
275
276 /* Enable LNW Pipe B */
277 temp = REG_READ(PCH_PIPEBCONF);
278 if ((temp & PIPEACONF_ENABLE) == 0) {
279 REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
280 REG_READ(PCH_PIPEBCONF);
281 }
282 wait_for_vblank(dev);
283
284 /* Enable plane */
285 temp = REG_READ(DSPBCNTR);
286 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
287 REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
288 /* Flush the plane changes */
289 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
290 REG_READ(DSPBSURF);
291 }
292 psb_intel_crtc_load_lut(crtc);
293 }
294 /* DSPARB */
295 REG_WRITE(DSPARB, 0x00003fbf);
296 /* FW1 */
297 REG_WRITE(0x70034, 0x3f880a0a);
298 /* FW2 */
299 REG_WRITE(0x70038, 0x0b060808);
300 /* FW4 */
301 REG_WRITE(0x70050, 0x08030404);
302 /* FW5 */
303 REG_WRITE(0x70054, 0x04040404);
304 /* LNC Chicken Bits */
305 REG_WRITE(0x70400, 0x4000);
306}
307
308
309static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
310{
311 static int dpms_mode = -1;
312
313 struct drm_device *dev = encoder->dev;
314 struct drm_psb_private *dev_priv = dev->dev_private;
315 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
316 u32 temp;
317
318 if (dpms_mode == mode)
319 return;
320
321 if (mode != DRM_MODE_DPMS_ON)
322 temp = 0x0;
323 else
324 temp = 0x99;
325
326 dpms_mode = mode;
327 HDMI_WRITE(HDMI_VIDEO_REG, temp);
328}
329
330static unsigned int htotal_calculate(struct drm_display_mode *mode)
331{
332 u32 htotal, new_crtc_htotal;
333
334 htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
335
336 /*
337 * 1024 x 768 new_crtc_htotal = 0x1024;
338 * 1280 x 1024 new_crtc_htotal = 0x0c34;
339 */
340 new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
341
342 return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
343}
344
345static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
346 int refclk, struct oaktrail_hdmi_clock *best_clock)
347{
348 int np_min, np_max, nr_min, nr_max;
349 int np, nr, nf;
350
351 np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
352 np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
353 if (np_min < oaktrail_hdmi_limit.np.min)
354 np_min = oaktrail_hdmi_limit.np.min;
355 if (np_max > oaktrail_hdmi_limit.np.max)
356 np_max = oaktrail_hdmi_limit.np.max;
357
358 nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
359 nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
360 if (nr_min < oaktrail_hdmi_limit.nr.min)
361 nr_min = oaktrail_hdmi_limit.nr.min;
362 if (nr_max > oaktrail_hdmi_limit.nr.max)
363 nr_max = oaktrail_hdmi_limit.nr.max;
364
365 np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
366 nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
367 nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
368 DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
369
370 /*
371 * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
372 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
373 */
374 best_clock->np = np;
375 best_clock->nr = nr - 1;
376 best_clock->nf = (nf << 14);
377}
378
379int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
380 struct drm_display_mode *mode,
381 struct drm_display_mode *adjusted_mode,
382 int x, int y,
383 struct drm_framebuffer *old_fb)
384{
385 struct drm_device *dev = crtc->dev;
386 struct drm_psb_private *dev_priv = dev->dev_private;
387 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
388 int pipe = 1;
389 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
390 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
391 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
392 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
393 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
394 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
395 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
396 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
397 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
398 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
399 int refclk;
400 struct oaktrail_hdmi_clock clock;
401 u32 dspcntr, pipeconf, dpll, temp;
402 int dspcntr_reg = DSPBCNTR;
403
404 /* Disable the VGA plane that we never use */
405 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
406
407 /* XXX: Disable the panel fitter if it was on our pipe */
408
409 /* Disable dpll if necessary */
410 dpll = REG_READ(DPLL_CTRL);
411 if ((dpll & DPLL_PWRDN) == 0) {
412 REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
413 REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
414 REG_WRITE(DPLL_STATUS, 0x1);
415 }
416 udelay(150);
417
418 /* reset controller: FIXME - can we sort out the ioremap mess ? */
419 iounmap(hdmi_dev->regs);
420 oaktrail_hdmi_reset(dev);
421
422 /* program and enable dpll */
423 refclk = 25000;
424 oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
425
426 /* Setting DPLL */
427 dpll = REG_READ(DPLL_CTRL);
428 dpll &= ~DPLL_PDIV_MASK;
429 dpll &= ~(DPLL_PWRDN | DPLL_RESET);
430 REG_WRITE(DPLL_CTRL, 0x00000008);
431 REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
432 REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
433 REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
434 REG_WRITE(DPLL_UPDATE, 0x80000000);
435 REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
436 udelay(150);
437
438 hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
439 if (hdmi_dev->regs == NULL) {
440 DRM_ERROR("failed to do hdmi mmio mapping\n");
441 return -ENOMEM;
442 }
443
444 /* configure HDMI */
445 HDMI_WRITE(0x1004, 0x1fd);
446 HDMI_WRITE(0x2000, 0x1);
447 HDMI_WRITE(0x2008, 0x0);
448 HDMI_WRITE(0x3130, 0x8);
449 HDMI_WRITE(0x101c, 0x1800810);
450
451 temp = htotal_calculate(adjusted_mode);
452 REG_WRITE(htot_reg, temp);
453 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
454 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
455 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
456 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
457 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
458 REG_WRITE(pipesrc_reg,
459 ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
460
461 REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
462 REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
463 REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
464 REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
465 REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
466 REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
467 REG_WRITE(PCH_PIPEBSRC,
468 ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
469
470 temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
471 HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
472
473 REG_WRITE(dspsize_reg,
474 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
475 REG_WRITE(dsppos_reg, 0);
476
477 /* Flush the plane changes */
478 {
479 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
480 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
481 }
482
483 /* Set up the display plane register */
484 dspcntr = REG_READ(dspcntr_reg);
485 dspcntr |= DISPPLANE_GAMMA_ENABLE;
486 dspcntr |= DISPPLANE_SEL_PIPE_B;
487 dspcntr |= DISPLAY_PLANE_ENABLE;
488
489 /* setup pipeconf */
490 pipeconf = REG_READ(pipeconf_reg);
491 pipeconf |= PIPEACONF_ENABLE;
492
493 REG_WRITE(pipeconf_reg, pipeconf);
494 REG_READ(pipeconf_reg);
495
496 REG_WRITE(PCH_PIPEBCONF, pipeconf);
497 REG_READ(PCH_PIPEBCONF);
498 wait_for_vblank(dev);
499
500 REG_WRITE(dspcntr_reg, dspcntr);
501 wait_for_vblank(dev);
502
503 return 0;
504}
505
506static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
507 struct drm_display_mode *mode)
508{
509 if (mode->clock > 165000)
510 return MODE_CLOCK_HIGH;
511 if (mode->clock < 20000)
512 return MODE_CLOCK_LOW;
513
514 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
515 return MODE_NO_DBLESCAN;
516
517 return MODE_OK;
518}
519
520static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
521 struct drm_display_mode *mode,
522 struct drm_display_mode *adjusted_mode)
523{
524 return true;
525}
526
527static enum drm_connector_status
528oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
529{
530 enum drm_connector_status status;
531 struct drm_device *dev = connector->dev;
532 struct drm_psb_private *dev_priv = dev->dev_private;
533 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
534 u32 temp;
535
536 temp = HDMI_READ(HDMI_HSR);
537 DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
538
539 if ((temp & HDMI_DETECT_HDP) != 0)
540 status = connector_status_connected;
541 else
542 status = connector_status_disconnected;
543
544 return status;
545}
546
547static const unsigned char raw_edid[] = {
548 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
549 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
550 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
551 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
552 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
553 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
554 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
555 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
556 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
557 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
558 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
559};
560
561static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
562{
563 struct drm_device *dev = connector->dev;
564 struct drm_psb_private *dev_priv = dev->dev_private;
565 struct i2c_adapter *i2c_adap;
566 struct edid *edid;
567 struct drm_display_mode *mode, *t;
568 int i = 0, ret = 0;
569
570 i2c_adap = i2c_get_adapter(3);
571 if (i2c_adap == NULL) {
572 DRM_ERROR("No ddc adapter available!\n");
573 edid = (struct edid *)raw_edid;
574 } else {
575 edid = (struct edid *)raw_edid;
576 /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
577 }
578
579 if (edid) {
580 drm_mode_connector_update_edid_property(connector, edid);
581 ret = drm_add_edid_modes(connector, edid);
582 connector->display_info.raw_edid = NULL;
583 }
584
585 /*
586 * prune modes that require frame buffer bigger than stolen mem
587 */
588 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
589 if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
590 i++;
591 drm_mode_remove(connector, mode);
592 }
593 }
594 return ret - i;
595}
596
597static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
598 struct drm_display_mode *mode,
599 struct drm_display_mode *adjusted_mode)
600{
601 struct drm_device *dev = encoder->dev;
602
603 oaktrail_hdmi_audio_enable(dev);
604 return;
605}
606
607static void oaktrail_hdmi_destroy(struct drm_connector *connector)
608{
609 return;
610}
611
612static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
613 .dpms = oaktrail_hdmi_dpms,
614 .mode_fixup = oaktrail_hdmi_mode_fixup,
615 .prepare = psb_intel_encoder_prepare,
616 .mode_set = oaktrail_hdmi_mode_set,
617 .commit = psb_intel_encoder_commit,
618};
619
620static const struct drm_connector_helper_funcs
621 oaktrail_hdmi_connector_helper_funcs = {
622 .get_modes = oaktrail_hdmi_get_modes,
623 .mode_valid = oaktrail_hdmi_mode_valid,
624 .best_encoder = psb_intel_best_encoder,
625};
626
627static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
628 .dpms = drm_helper_connector_dpms,
629 .detect = oaktrail_hdmi_detect,
630 .fill_modes = drm_helper_probe_single_connector_modes,
631 .destroy = oaktrail_hdmi_destroy,
632};
633
634static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
635{
636 drm_encoder_cleanup(encoder);
637}
638
639static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
640 .destroy = oaktrail_hdmi_enc_destroy,
641};
642
643void oaktrail_hdmi_init(struct drm_device *dev,
644 struct psb_intel_mode_device *mode_dev)
645{
646 struct psb_intel_encoder *psb_intel_encoder;
647 struct psb_intel_connector *psb_intel_connector;
648 struct drm_connector *connector;
649 struct drm_encoder *encoder;
650
651 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
652 if (!psb_intel_encoder)
653 return;
654
655 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
656 if (!psb_intel_connector)
657 goto failed_connector;
658
659 connector = &psb_intel_connector->base;
660 encoder = &psb_intel_encoder->base;
661 drm_connector_init(dev, connector,
662 &oaktrail_hdmi_connector_funcs,
663 DRM_MODE_CONNECTOR_DVID);
664
665 drm_encoder_init(dev, encoder,
666 &oaktrail_hdmi_enc_funcs,
667 DRM_MODE_ENCODER_TMDS);
668
669 psb_intel_connector_attach_encoder(psb_intel_connector,
670 psb_intel_encoder);
671
672 psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
673 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
674 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
675
676 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
677 connector->interlace_allowed = false;
678 connector->doublescan_allowed = false;
679 drm_sysfs_connector_add(connector);
680
681 return;
682
683failed_connector:
684 kfree(psb_intel_encoder);
685}
686
687static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
688 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
689 {}
690};
691
692void oaktrail_hdmi_setup(struct drm_device *dev)
693{
694 struct drm_psb_private *dev_priv = dev->dev_private;
695 struct pci_dev *pdev;
696 struct oaktrail_hdmi_dev *hdmi_dev;
697 int ret;
698
699 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
700 if (!pdev)
701 return;
702
703 hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
704 if (!hdmi_dev) {
705 dev_err(dev->dev, "failed to allocate memory\n");
706 goto out;
707 }
708
709
710 ret = pci_enable_device(pdev);
711 if (ret) {
712 dev_err(dev->dev, "failed to enable hdmi controller\n");
713 goto free;
714 }
715
716 hdmi_dev->mmio = pci_resource_start(pdev, 0);
717 hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
718 hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
719 if (!hdmi_dev->regs) {
720 dev_err(dev->dev, "failed to map hdmi mmio\n");
721 goto free;
722 }
723
724 hdmi_dev->dev = pdev;
725 pci_set_drvdata(pdev, hdmi_dev);
726
727 /* Initialize i2c controller */
728 ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
729 if (ret)
730 dev_err(dev->dev, "HDMI I2C initialization failed\n");
731
732 dev_priv->hdmi_priv = hdmi_dev;
733 oaktrail_hdmi_audio_disable(dev);
734 return;
735
736free:
737 kfree(hdmi_dev);
738out:
739 return;
740}
741
742void oaktrail_hdmi_teardown(struct drm_device *dev)
743{
744 struct drm_psb_private *dev_priv = dev->dev_private;
745 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
746 struct pci_dev *pdev;
747
748 if (hdmi_dev) {
749 pdev = hdmi_dev->dev;
750 pci_set_drvdata(pdev, NULL);
751 oaktrail_hdmi_i2c_exit(pdev);
752 iounmap(hdmi_dev->regs);
753 kfree(hdmi_dev);
754 pci_dev_put(pdev);
755 }
756}
757
758/* save HDMI register state */
759void oaktrail_hdmi_save(struct drm_device *dev)
760{
761 struct drm_psb_private *dev_priv = dev->dev_private;
762 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
763 int i;
764
765 /* dpll */
766 hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
767 hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
768 hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
769 hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
770 hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
771
772 /* pipe B */
773 dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
774 dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
775 dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
776 dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
777 dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
778 dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
779 dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
780 dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
781
782 hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
783 hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
784 hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
785 hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
786 hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
787 hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
788 hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
789 hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
790
791 /* plane */
792 dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
793 dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
794 dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
795 dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
796 dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
797 dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
798
799 /* cursor B */
800 dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
801 dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
802 dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
803
804 /* save palette */
805 for (i = 0; i < 256; i++)
806 dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
807}
808
809/* restore HDMI register state */
810void oaktrail_hdmi_restore(struct drm_device *dev)
811{
812 struct drm_psb_private *dev_priv = dev->dev_private;
813 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
814 int i;
815
816 /* dpll */
817 PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
818 PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
819 PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
820 PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
821 PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
822 DRM_UDELAY(150);
823
824 /* pipe */
825 PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
826 PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
827 PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
828 PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
829 PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
830 PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
831 PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
832
833 PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
834 PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
835 PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
836 PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
837 PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
838 PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
839 PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
840
841 PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
842 PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
843
844 /* plane */
845 PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
846 PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
847 PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
848 PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
849 PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
850
851 /* cursor B */
852 PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
853 PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
854 PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
855
856 /* restore palette */
857 for (i = 0; i < 256; i++)
858 PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
859}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644
index 00000000000..705440874ac
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -0,0 +1,328 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Li Peng <peng.li@intel.com>
25 */
26
27#include <linux/export.h>
28#include <linux/mutex.h>
29#include <linux/pci.h>
30#include <linux/i2c.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include "psb_drv.h"
34
35#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
36#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
37
38#define HDMI_HCR 0x1000
39#define HCR_DETECT_HDP (1 << 6)
40#define HCR_ENABLE_HDCP (1 << 5)
41#define HCR_ENABLE_AUDIO (1 << 2)
42#define HCR_ENABLE_PIXEL (1 << 1)
43#define HCR_ENABLE_TMDS (1 << 0)
44#define HDMI_HICR 0x1004
45#define HDMI_INTR_I2C_ERROR (1 << 4)
46#define HDMI_INTR_I2C_FULL (1 << 3)
47#define HDMI_INTR_I2C_DONE (1 << 2)
48#define HDMI_INTR_HPD (1 << 0)
49#define HDMI_HSR 0x1008
50#define HDMI_HISR 0x100C
51#define HDMI_HI2CRDB0 0x1200
52#define HDMI_HI2CHCR 0x1240
53#define HI2C_HDCP_WRITE (0 << 2)
54#define HI2C_HDCP_RI_READ (1 << 2)
55#define HI2C_HDCP_READ (2 << 2)
56#define HI2C_EDID_READ (3 << 2)
57#define HI2C_READ_CONTINUE (1 << 1)
58#define HI2C_ENABLE_TRANSACTION (1 << 0)
59
60#define HDMI_ICRH 0x1100
61#define HDMI_HI2CTDR0 0x1244
62#define HDMI_HI2CTDR1 0x1248
63
64#define I2C_STAT_INIT 0
65#define I2C_READ_DONE 1
66#define I2C_TRANSACTION_DONE 2
67
68struct hdmi_i2c_dev {
69 struct i2c_adapter *adap;
70 struct mutex i2c_lock;
71 struct completion complete;
72 int status;
73 struct i2c_msg *msg;
74 int buf_offset;
75};
76
77static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
78{
79 u32 temp;
80
81 temp = HDMI_READ(HDMI_HICR);
82 temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
83 HDMI_WRITE(HDMI_HICR, temp);
84 HDMI_READ(HDMI_HICR);
85}
86
87static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
88{
89 HDMI_WRITE(HDMI_HICR, 0x0);
90 HDMI_READ(HDMI_HICR);
91}
92
93static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
94{
95 struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
96 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
97 u32 temp;
98
99 i2c_dev->status = I2C_STAT_INIT;
100 i2c_dev->msg = pmsg;
101 i2c_dev->buf_offset = 0;
102 INIT_COMPLETION(i2c_dev->complete);
103
104 /* Enable I2C transaction */
105 temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
106 HDMI_WRITE(HDMI_HI2CHCR, temp);
107 HDMI_READ(HDMI_HI2CHCR);
108
109 while (i2c_dev->status != I2C_TRANSACTION_DONE)
110 wait_for_completion_interruptible_timeout(&i2c_dev->complete,
111 10 * HZ);
112
113 return 0;
114}
115
116static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
117{
118 /*
119 * XXX: i2c write seems isn't useful for EDID probe, don't do anything
120 */
121 return 0;
122}
123
124static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
125 struct i2c_msg *pmsg,
126 int num)
127{
128 struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
129 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
130 int i, err = 0;
131
132 mutex_lock(&i2c_dev->i2c_lock);
133
134 /* Enable i2c unit */
135 HDMI_WRITE(HDMI_ICRH, 0x00008760);
136
137 /* Enable irq */
138 hdmi_i2c_irq_enable(hdmi_dev);
139 for (i = 0; i < num; i++) {
140 if (pmsg->len && pmsg->buf) {
141 if (pmsg->flags & I2C_M_RD)
142 err = xfer_read(adap, pmsg);
143 else
144 err = xfer_write(adap, pmsg);
145 }
146 pmsg++; /* next message */
147 }
148
149 /* Disable irq */
150 hdmi_i2c_irq_disable(hdmi_dev);
151
152 mutex_unlock(&i2c_dev->i2c_lock);
153
154 return i;
155}
156
157static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
158{
159 return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
160}
161
162static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
163 .master_xfer = oaktrail_hdmi_i2c_access,
164 .functionality = oaktrail_hdmi_i2c_func,
165};
166
167static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
168 .name = "oaktrail_hdmi_i2c",
169 .nr = 3,
170 .owner = THIS_MODULE,
171 .class = I2C_CLASS_DDC,
172 .algo = &oaktrail_hdmi_i2c_algorithm,
173};
174
175static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
176{
177 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
178 struct i2c_msg *msg = i2c_dev->msg;
179 u8 *buf = msg->buf;
180 u32 temp;
181 int i, offset;
182
183 offset = i2c_dev->buf_offset;
184 for (i = 0; i < 0x10; i++) {
185 temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
186 memcpy(buf + (offset + i * 4), &temp, 4);
187 }
188 i2c_dev->buf_offset += (0x10 * 4);
189
190 /* clearing read buffer full intr */
191 temp = HDMI_READ(HDMI_HISR);
192 HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
193 HDMI_READ(HDMI_HISR);
194
195 /* continue read transaction */
196 temp = HDMI_READ(HDMI_HI2CHCR);
197 HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
198 HDMI_READ(HDMI_HI2CHCR);
199
200 i2c_dev->status = I2C_READ_DONE;
201 return;
202}
203
204static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
205{
206 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
207 u32 temp;
208
209 /* clear transaction done intr */
210 temp = HDMI_READ(HDMI_HISR);
211 HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
212 HDMI_READ(HDMI_HISR);
213
214
215 temp = HDMI_READ(HDMI_HI2CHCR);
216 HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
217 HDMI_READ(HDMI_HI2CHCR);
218
219 i2c_dev->status = I2C_TRANSACTION_DONE;
220 return;
221}
222
223static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
224{
225 struct oaktrail_hdmi_dev *hdmi_dev = dev;
226 struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
227 u32 stat;
228
229 stat = HDMI_READ(HDMI_HISR);
230
231 if (stat & HDMI_INTR_HPD) {
232 HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
233 HDMI_READ(HDMI_HISR);
234 }
235
236 if (stat & HDMI_INTR_I2C_FULL)
237 hdmi_i2c_read(hdmi_dev);
238
239 if (stat & HDMI_INTR_I2C_DONE)
240 hdmi_i2c_transaction_done(hdmi_dev);
241
242 complete(&i2c_dev->complete);
243
244 return IRQ_HANDLED;
245}
246
247/*
248 * choose alternate function 2 of GPIO pin 52, 53,
249 * which is used by HDMI I2C logic
250 */
251static void oaktrail_hdmi_i2c_gpio_fix(void)
252{
253 void *base;
254 unsigned int gpio_base = 0xff12c000;
255 int gpio_len = 0x1000;
256 u32 temp;
257
258 base = ioremap((resource_size_t)gpio_base, gpio_len);
259 if (base == NULL) {
260 DRM_ERROR("gpio ioremap fail\n");
261 return;
262 }
263
264 temp = readl(base + 0x44);
265 DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
266 writel((temp | 0x00000a00), (base + 0x44));
267 temp = readl(base + 0x44);
268 DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
269
270 iounmap(base);
271}
272
273int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
274{
275 struct oaktrail_hdmi_dev *hdmi_dev;
276 struct hdmi_i2c_dev *i2c_dev;
277 int ret;
278
279 hdmi_dev = pci_get_drvdata(dev);
280
281 i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
282 if (i2c_dev == NULL) {
283 DRM_ERROR("Can't allocate interface\n");
284 ret = -ENOMEM;
285 goto exit;
286 }
287
288 i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
289 i2c_dev->status = I2C_STAT_INIT;
290 init_completion(&i2c_dev->complete);
291 mutex_init(&i2c_dev->i2c_lock);
292 i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
293 hdmi_dev->i2c_dev = i2c_dev;
294
295 /* Enable HDMI I2C function on gpio */
296 oaktrail_hdmi_i2c_gpio_fix();
297
298 /* request irq */
299 ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
300 oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
301 if (ret) {
302 DRM_ERROR("Failed to request IRQ for I2C controller\n");
303 goto err;
304 }
305
306 /* Adapter registration */
307 ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
308 return ret;
309
310err:
311 kfree(i2c_dev);
312exit:
313 return ret;
314}
315
316void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
317{
318 struct oaktrail_hdmi_dev *hdmi_dev;
319 struct hdmi_i2c_dev *i2c_dev;
320
321 hdmi_dev = pci_get_drvdata(dev);
322 if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
323 DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
324
325 i2c_dev = hdmi_dev->i2c_dev;
326 kfree(i2c_dev);
327 free_irq(dev->irq, hdmi_dev);
328}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644
index 00000000000..238bbe10530
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright © 2006-2009 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <drm/drmP.h>
25#include <asm/mrst.h>
26
27#include "intel_bios.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31#include "power.h"
32#include <linux/pm_runtime.h>
33
34/* The max/min PWM frequency in BPCR[31:17] - */
35/* The smallest number is 1 (not 0) that can fit in the
36 * 15-bit field of the and then*/
37/* shifts to the left by one bit to get the actual 16-bit
38 * value that the 15-bits correspond to.*/
39#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
40#define BRIGHTNESS_MAX_LEVEL 100
41
42/**
43 * Sets the power state for the panel.
44 */
45static void oaktrail_lvds_set_power(struct drm_device *dev,
46 struct psb_intel_encoder *psb_intel_encoder,
47 bool on)
48{
49 u32 pp_status;
50 struct drm_psb_private *dev_priv = dev->dev_private;
51
52 if (!gma_power_begin(dev, true))
53 return;
54
55 if (on) {
56 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
57 POWER_TARGET_ON);
58 do {
59 pp_status = REG_READ(PP_STATUS);
60 } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
61 dev_priv->is_lvds_on = true;
62 if (dev_priv->ops->lvds_bl_power)
63 dev_priv->ops->lvds_bl_power(dev, true);
64 } else {
65 if (dev_priv->ops->lvds_bl_power)
66 dev_priv->ops->lvds_bl_power(dev, false);
67 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
68 ~POWER_TARGET_ON);
69 do {
70 pp_status = REG_READ(PP_STATUS);
71 } while (pp_status & PP_ON);
72 dev_priv->is_lvds_on = false;
73 pm_request_idle(&dev->pdev->dev);
74 }
75 gma_power_end(dev);
76}
77
78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
79{
80 struct drm_device *dev = encoder->dev;
81 struct psb_intel_encoder *psb_intel_encoder =
82 to_psb_intel_encoder(encoder);
83
84 if (mode == DRM_MODE_DPMS_ON)
85 oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
86 else
87 oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
88
89 /* XXX: We never power down the LVDS pairs. */
90}
91
92static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
93 struct drm_display_mode *mode,
94 struct drm_display_mode *adjusted_mode)
95{
96 struct drm_device *dev = encoder->dev;
97 struct drm_psb_private *dev_priv = dev->dev_private;
98 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
99 struct drm_mode_config *mode_config = &dev->mode_config;
100 struct drm_connector *connector = NULL;
101 struct drm_crtc *crtc = encoder->crtc;
102 u32 lvds_port;
103 uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
104
105 if (!gma_power_begin(dev, true))
106 return;
107
108 /*
109 * The LVDS pin pair will already have been turned on in the
110 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
111 * settings.
112 */
113 lvds_port = (REG_READ(LVDS) &
114 (~LVDS_PIPEB_SELECT)) |
115 LVDS_PORT_EN |
116 LVDS_BORDER_EN;
117
118 /* If the firmware says dither on Moorestown, or the BIOS does
119 on Oaktrail then enable dithering */
120 if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
121 lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
122
123 REG_WRITE(LVDS, lvds_port);
124
125 /* Find the connector we're trying to set up */
126 list_for_each_entry(connector, &mode_config->connector_list, head) {
127 if (!connector->encoder || connector->encoder->crtc != crtc)
128 continue;
129 }
130
131 if (!connector) {
132 DRM_ERROR("Couldn't find connector when setting mode");
133 return;
134 }
135
136 drm_connector_property_get_value(
137 connector,
138 dev->mode_config.scaling_mode_property,
139 &v);
140
141 if (v == DRM_MODE_SCALE_NO_SCALE)
142 REG_WRITE(PFIT_CONTROL, 0);
143 else if (v == DRM_MODE_SCALE_ASPECT) {
144 if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
145 (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
146 if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
147 (mode->hdisplay * adjusted_mode->crtc_vdisplay))
148 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
149 else if ((adjusted_mode->crtc_hdisplay *
150 mode->vdisplay) > (mode->hdisplay *
151 adjusted_mode->crtc_vdisplay))
152 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
153 PFIT_SCALING_MODE_PILLARBOX);
154 else
155 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
156 PFIT_SCALING_MODE_LETTERBOX);
157 } else
158 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
159 } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
160 REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
161
162 gma_power_end(dev);
163}
164
165static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
166{
167 struct drm_device *dev = encoder->dev;
168 struct drm_psb_private *dev_priv = dev->dev_private;
169 struct psb_intel_encoder *psb_intel_encoder =
170 to_psb_intel_encoder(encoder);
171 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
172
173 if (!gma_power_begin(dev, true))
174 return;
175
176 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
177 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
178 BACKLIGHT_DUTY_CYCLE_MASK);
179 oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
180 gma_power_end(dev);
181}
182
183static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
184{
185 struct drm_psb_private *dev_priv = dev->dev_private;
186 u32 ret;
187
188 if (gma_power_begin(dev, false)) {
189 ret = ((REG_READ(BLC_PWM_CTL) &
190 BACKLIGHT_MODULATION_FREQ_MASK) >>
191 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
192
193 gma_power_end(dev);
194 } else
195 ret = ((dev_priv->saveBLC_PWM_CTL &
196 BACKLIGHT_MODULATION_FREQ_MASK) >>
197 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
198
199 return ret;
200}
201
202static void oaktrail_lvds_commit(struct drm_encoder *encoder)
203{
204 struct drm_device *dev = encoder->dev;
205 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct psb_intel_encoder *psb_intel_encoder =
207 to_psb_intel_encoder(encoder);
208 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
209
210 if (mode_dev->backlight_duty_cycle == 0)
211 mode_dev->backlight_duty_cycle =
212 oaktrail_lvds_get_max_backlight(dev);
213 oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
214}
215
216static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
217 .dpms = oaktrail_lvds_dpms,
218 .mode_fixup = psb_intel_lvds_mode_fixup,
219 .prepare = oaktrail_lvds_prepare,
220 .mode_set = oaktrail_lvds_mode_set,
221 .commit = oaktrail_lvds_commit,
222};
223
224static struct drm_display_mode lvds_configuration_modes[] = {
225 /* hard coded fixed mode for TPO LTPS LPJ040K001A */
226 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
227 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
228 /* hard coded fixed mode for LVDS 800x480 */
229 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
230 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
231 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
232 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
233 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
234 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
235 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
236 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
237 /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
238 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
239 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
240 /* hard coded fixed mode for LVDS 1024x768 */
241 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
242 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
243 /* hard coded fixed mode for LVDS 1366x768 */
244 { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
245 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
246};
247
248/* Returns the panel fixed mode from configuration. */
249
250static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
251 struct psb_intel_mode_device *mode_dev)
252{
253 struct drm_display_mode *mode = NULL;
254 struct drm_psb_private *dev_priv = dev->dev_private;
255 struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
256
257 mode_dev->panel_fixed_mode = NULL;
258
259 /* Use the firmware provided data on Moorestown */
260 if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
261 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
262 if (!mode)
263 return;
264
265 mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
266 mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
267 mode->hsync_start = mode->hdisplay + \
268 ((ti->hsync_offset_hi << 8) | \
269 ti->hsync_offset_lo);
270 mode->hsync_end = mode->hsync_start + \
271 ((ti->hsync_pulse_width_hi << 8) | \
272 ti->hsync_pulse_width_lo);
273 mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
274 ti->hblank_lo);
275 mode->vsync_start = \
276 mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
277 ti->vsync_offset_lo);
278 mode->vsync_end = \
279 mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
280 ti->vsync_pulse_width_lo);
281 mode->vtotal = mode->vdisplay + \
282 ((ti->vblank_hi << 8) | ti->vblank_lo);
283 mode->clock = ti->pixel_clock * 10;
284#if 0
285 printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
286 printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
287 printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
288 printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
289 printk(KERN_INFO "htotal is %d\n", mode->htotal);
290 printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
291 printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
292 printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
293 printk(KERN_INFO "clock is %d\n", mode->clock);
294#endif
295 mode_dev->panel_fixed_mode = mode;
296 }
297
298 /* Use the BIOS VBT mode if available */
299 if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
300 mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
301 mode_dev->vbt_mode);
302
303 /* Then try the LVDS VBT mode */
304 if (mode_dev->panel_fixed_mode == NULL)
305 if (dev_priv->lfp_lvds_vbt_mode)
306 mode_dev->panel_fixed_mode =
307 drm_mode_duplicate(dev,
308 dev_priv->lfp_lvds_vbt_mode);
309 /* Then guess */
310 if (mode_dev->panel_fixed_mode == NULL)
311 mode_dev->panel_fixed_mode
312 = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
313
314 drm_mode_set_name(mode_dev->panel_fixed_mode);
315 drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
316}
317
318/**
319 * oaktrail_lvds_init - setup LVDS connectors on this device
320 * @dev: drm device
321 *
322 * Create the connector, register the LVDS DDC bus, and try to figure out what
323 * modes we can display on the LVDS panel (if present).
324 */
325void oaktrail_lvds_init(struct drm_device *dev,
326 struct psb_intel_mode_device *mode_dev)
327{
328 struct psb_intel_encoder *psb_intel_encoder;
329 struct psb_intel_connector *psb_intel_connector;
330 struct drm_connector *connector;
331 struct drm_encoder *encoder;
332 struct drm_psb_private *dev_priv = dev->dev_private;
333 struct edid *edid;
334 int ret = 0;
335 struct i2c_adapter *i2c_adap;
336 struct drm_display_mode *scan; /* *modes, *bios_mode; */
337
338 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
339 if (!psb_intel_encoder)
340 return;
341
342 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
343 if (!psb_intel_connector)
344 goto failed_connector;
345
346 connector = &psb_intel_connector->base;
347 encoder = &psb_intel_encoder->base;
348 dev_priv->is_lvds_on = true;
349 drm_connector_init(dev, connector,
350 &psb_intel_lvds_connector_funcs,
351 DRM_MODE_CONNECTOR_LVDS);
352
353 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
354 DRM_MODE_ENCODER_LVDS);
355
356 psb_intel_connector_attach_encoder(psb_intel_connector,
357 psb_intel_encoder);
358 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
359
360 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
361 drm_connector_helper_add(connector,
362 &psb_intel_lvds_connector_helper_funcs);
363 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
364 connector->interlace_allowed = false;
365 connector->doublescan_allowed = false;
366
367 drm_connector_attach_property(connector,
368 dev->mode_config.scaling_mode_property,
369 DRM_MODE_SCALE_FULLSCREEN);
370 drm_connector_attach_property(connector,
371 dev_priv->backlight_property,
372 BRIGHTNESS_MAX_LEVEL);
373
374 mode_dev->panel_wants_dither = false;
375 if (dev_priv->vbt_data.size != 0x00)
376 mode_dev->panel_wants_dither = (dev_priv->gct_data.
377 Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
378 if (dev_priv->lvds_dither)
379 mode_dev->panel_wants_dither = 1;
380
381 /*
382 * LVDS discovery:
383 * 1) check for EDID on DDC
384 * 2) check for VBT data
385 * 3) check to see if LVDS is already on
386 * if none of the above, no panel
387 * 4) make sure lid is open
388 * if closed, act like it's not there for now
389 */
390
391 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
392 if (i2c_adap == NULL)
393 dev_err(dev->dev, "No ddc adapter available!\n");
394 /*
395 * Attempt to get the fixed panel mode from DDC. Assume that the
396 * preferred mode is the right one.
397 */
398 if (i2c_adap) {
399 edid = drm_get_edid(connector, i2c_adap);
400 if (edid) {
401 drm_mode_connector_update_edid_property(connector,
402 edid);
403 ret = drm_add_edid_modes(connector, edid);
404 kfree(edid);
405 }
406
407 list_for_each_entry(scan, &connector->probed_modes, head) {
408 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
409 mode_dev->panel_fixed_mode =
410 drm_mode_duplicate(dev, scan);
411 goto out; /* FIXME: check for quirks */
412 }
413 }
414 }
415 /*
416 * If we didn't get EDID, try geting panel timing
417 * from configuration data
418 */
419 oaktrail_lvds_get_configuration_mode(dev, mode_dev);
420
421 if (mode_dev->panel_fixed_mode) {
422 mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
423 goto out; /* FIXME: check for quirks */
424 }
425
426 /* If we still don't have a mode after all that, give up. */
427 if (!mode_dev->panel_fixed_mode) {
428 dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
429 goto failed_find;
430 }
431
432out:
433 drm_sysfs_connector_add(connector);
434 return;
435
436failed_find:
437 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
438 if (psb_intel_encoder->ddc_bus)
439 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
440
441/* failed_ddc: */
442
443 drm_encoder_cleanup(encoder);
444 drm_connector_cleanup(connector);
445 kfree(psb_intel_connector);
446failed_connector:
447 kfree(psb_intel_encoder);
448}
449
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644
index 00000000000..94025693bae
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.c
@@ -0,0 +1,316 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Benjamin Defnet <benjamin.r.defnet@intel.com>
26 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
27 * Massively reworked
28 * Alan Cox <alan@linux.intel.com>
29 */
30
31#include "power.h"
32#include "psb_drv.h"
33#include "psb_reg.h"
34#include "psb_intel_reg.h"
35#include <linux/mutex.h>
36#include <linux/pm_runtime.h>
37
38static struct mutex power_mutex; /* Serialize power ops */
39static spinlock_t power_ctrl_lock; /* Serialize power claim */
40
41/**
42 * gma_power_init - initialise power manager
43 * @dev: our device
44 *
45 * Set up for power management tracking of our hardware.
46 */
47void gma_power_init(struct drm_device *dev)
48{
49 struct drm_psb_private *dev_priv = dev->dev_private;
50
51 /* FIXME: Move APM/OSPM base into relevant device code */
52 dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
53 dev_priv->ospm_base &= 0xffff;
54
55 dev_priv->display_power = true; /* We start active */
56 dev_priv->display_count = 0; /* Currently no users */
57 dev_priv->suspended = false; /* And not suspended */
58 spin_lock_init(&power_ctrl_lock);
59 mutex_init(&power_mutex);
60
61 dev_priv->ops->init_pm(dev);
62}
63
64/**
65 * gma_power_uninit - end power manager
66 * @dev: device to end for
67 *
68 * Undo the effects of gma_power_init
69 */
70void gma_power_uninit(struct drm_device *dev)
71{
72 pm_runtime_disable(&dev->pdev->dev);
73 pm_runtime_set_suspended(&dev->pdev->dev);
74}
75
76/**
77 * gma_suspend_display - suspend the display logic
78 * @dev: our DRM device
79 *
80 * Suspend the display logic of the graphics interface
81 */
82static void gma_suspend_display(struct drm_device *dev)
83{
84 struct drm_psb_private *dev_priv = dev->dev_private;
85
86 if (dev_priv->suspended)
87 return;
88 dev_priv->ops->save_regs(dev);
89 dev_priv->ops->power_down(dev);
90 dev_priv->display_power = false;
91}
92
93/**
94 * gma_resume_display - resume display side logic
95 *
96 * Resume the display hardware restoring state and enabling
97 * as necessary.
98 */
99static void gma_resume_display(struct pci_dev *pdev)
100{
101 struct drm_device *dev = pci_get_drvdata(pdev);
102 struct drm_psb_private *dev_priv = dev->dev_private;
103
104 if (dev_priv->suspended == false)
105 return;
106
107 /* turn on the display power island */
108 dev_priv->ops->power_up(dev);
109 dev_priv->suspended = false;
110 dev_priv->display_power = true;
111
112 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
113 pci_write_config_word(pdev, PSB_GMCH_CTRL,
114 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
115 dev_priv->ops->restore_regs(dev);
116}
117
118/**
119 * gma_suspend_pci - suspend PCI side
120 * @pdev: PCI device
121 *
122 * Perform the suspend processing on our PCI device state
123 */
124static void gma_suspend_pci(struct pci_dev *pdev)
125{
126 struct drm_device *dev = pci_get_drvdata(pdev);
127 struct drm_psb_private *dev_priv = dev->dev_private;
128 int bsm, vbt;
129
130 if (dev_priv->suspended)
131 return;
132
133 pci_save_state(pdev);
134 pci_read_config_dword(pdev, 0x5C, &bsm);
135 dev_priv->saveBSM = bsm;
136 pci_read_config_dword(pdev, 0xFC, &vbt);
137 dev_priv->saveVBT = vbt;
138 pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
139 pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
140
141 pci_disable_device(pdev);
142 pci_set_power_state(pdev, PCI_D3hot);
143
144 dev_priv->suspended = true;
145}
146
147/**
148 * gma_resume_pci - resume helper
149 * @dev: our PCI device
150 *
151 * Perform the resume processing on our PCI device state - rewrite
152 * register state and re-enable the PCI device
153 */
154static bool gma_resume_pci(struct pci_dev *pdev)
155{
156 struct drm_device *dev = pci_get_drvdata(pdev);
157 struct drm_psb_private *dev_priv = dev->dev_private;
158 int ret;
159
160 if (!dev_priv->suspended)
161 return true;
162
163 pci_set_power_state(pdev, PCI_D0);
164 pci_restore_state(pdev);
165 pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
166 pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
167 /* restoring MSI address and data in PCIx space */
168 pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
169 pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
170 ret = pci_enable_device(pdev);
171
172 if (ret != 0)
173 dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
174 else
175 dev_priv->suspended = false;
176 return !dev_priv->suspended;
177}
178
179/**
180 * gma_power_suspend - bus callback for suspend
181 * @pdev: our PCI device
182 * @state: suspend type
183 *
184 * Called back by the PCI layer during a suspend of the system. We
185 * perform the necessary shut down steps and save enough state that
186 * we can undo this when resume is called.
187 */
188int gma_power_suspend(struct device *_dev)
189{
190 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
191 struct drm_device *dev = pci_get_drvdata(pdev);
192 struct drm_psb_private *dev_priv = dev->dev_private;
193
194 mutex_lock(&power_mutex);
195 if (!dev_priv->suspended) {
196 if (dev_priv->display_count) {
197 mutex_unlock(&power_mutex);
198 return -EBUSY;
199 }
200 psb_irq_uninstall(dev);
201 gma_suspend_display(dev);
202 gma_suspend_pci(pdev);
203 }
204 mutex_unlock(&power_mutex);
205 return 0;
206}
207
208/**
209 * gma_power_resume - resume power
210 * @pdev: PCI device
211 *
212 * Resume the PCI side of the graphics and then the displays
213 */
214int gma_power_resume(struct device *_dev)
215{
216 struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
217 struct drm_device *dev = pci_get_drvdata(pdev);
218
219 mutex_lock(&power_mutex);
220 gma_resume_pci(pdev);
221 gma_resume_display(pdev);
222 psb_irq_preinstall(dev);
223 psb_irq_postinstall(dev);
224 mutex_unlock(&power_mutex);
225 return 0;
226}
227
228/**
229 * gma_power_is_on - returne true if power is on
230 * @dev: our DRM device
231 *
232 * Returns true if the display island power is on at this moment
233 */
234bool gma_power_is_on(struct drm_device *dev)
235{
236 struct drm_psb_private *dev_priv = dev->dev_private;
237 return dev_priv->display_power;
238}
239
240/**
241 * gma_power_begin - begin requiring power
242 * @dev: our DRM device
243 * @force_on: true to force power on
244 *
245 * Begin an action that requires the display power island is enabled.
246 * We refcount the islands.
247 */
248bool gma_power_begin(struct drm_device *dev, bool force_on)
249{
250 struct drm_psb_private *dev_priv = dev->dev_private;
251 int ret;
252 unsigned long flags;
253
254 spin_lock_irqsave(&power_ctrl_lock, flags);
255 /* Power already on ? */
256 if (dev_priv->display_power) {
257 dev_priv->display_count++;
258 pm_runtime_get(&dev->pdev->dev);
259 spin_unlock_irqrestore(&power_ctrl_lock, flags);
260 return true;
261 }
262 if (force_on == false)
263 goto out_false;
264
265 /* Ok power up needed */
266 ret = gma_resume_pci(dev->pdev);
267 if (ret == 0) {
268 psb_irq_preinstall(dev);
269 psb_irq_postinstall(dev);
270 pm_runtime_get(&dev->pdev->dev);
271 dev_priv->display_count++;
272 spin_unlock_irqrestore(&power_ctrl_lock, flags);
273 return true;
274 }
275out_false:
276 spin_unlock_irqrestore(&power_ctrl_lock, flags);
277 return false;
278}
279
280/**
281 * gma_power_end - end use of power
282 * @dev: Our DRM device
283 *
284 * Indicate that one of our gma_power_begin() requested periods when
285 * the diplay island power is needed has completed.
286 */
287void gma_power_end(struct drm_device *dev)
288{
289 struct drm_psb_private *dev_priv = dev->dev_private;
290 unsigned long flags;
291 spin_lock_irqsave(&power_ctrl_lock, flags);
292 dev_priv->display_count--;
293 WARN_ON(dev_priv->display_count < 0);
294 spin_unlock_irqrestore(&power_ctrl_lock, flags);
295 pm_runtime_put(&dev->pdev->dev);
296}
297
298int psb_runtime_suspend(struct device *dev)
299{
300 return gma_power_suspend(dev);
301}
302
303int psb_runtime_resume(struct device *dev)
304{
305 return gma_power_resume(dev);;
306}
307
308int psb_runtime_idle(struct device *dev)
309{
310 struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
311 struct drm_psb_private *dev_priv = drmdev->dev_private;
312 if (dev_priv->display_count)
313 return 0;
314 else
315 return 1;
316}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644
index 00000000000..1969d2ecb32
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.h
@@ -0,0 +1,67 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Benjamin Defnet <benjamin.r.defnet@intel.com>
26 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
27 * Massively reworked
28 * Alan Cox <alan@linux.intel.com>
29 */
30#ifndef _PSB_POWERMGMT_H_
31#define _PSB_POWERMGMT_H_
32
33#include <linux/pci.h>
34#include <drm/drmP.h>
35
36void gma_power_init(struct drm_device *dev);
37void gma_power_uninit(struct drm_device *dev);
38
39/*
40 * The kernel bus power management will call these functions
41 */
42int gma_power_suspend(struct device *dev);
43int gma_power_resume(struct device *dev);
44
45/*
46 * These are the functions the driver should use to wrap all hw access
47 * (i.e. register reads and writes)
48 */
49bool gma_power_begin(struct drm_device *dev, bool force);
50void gma_power_end(struct drm_device *dev);
51
52/*
53 * Use this function to do an instantaneous check for if the hw is on.
54 * Only use this in cases where you know the mutex is already held such
55 * as in irq install/uninstall and you need to
56 * prevent a deadlock situation. Otherwise use gma_power_begin().
57 */
58bool gma_power_is_on(struct drm_device *dev);
59
60/*
61 * GFX-Runtime PM callbacks
62 */
63int psb_runtime_suspend(struct device *dev);
64int psb_runtime_resume(struct device *dev);
65int psb_runtime_idle(struct device *dev);
66
67#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644
index 00000000000..e5f5906172b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -0,0 +1,328 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/backlight.h>
21#include <drm/drmP.h>
22#include <drm/drm.h>
23#include "gma_drm.h"
24#include "psb_drv.h"
25#include "psb_reg.h"
26#include "psb_intel_reg.h"
27#include "intel_bios.h"
28
29
30static int psb_output_init(struct drm_device *dev)
31{
32 struct drm_psb_private *dev_priv = dev->dev_private;
33 psb_intel_lvds_init(dev, &dev_priv->mode_dev);
34 psb_intel_sdvo_init(dev, SDVOB);
35 return 0;
36}
37
38#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
39
40/*
41 * Poulsbo Backlight Interfaces
42 */
43
44#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
45#define BLC_PWM_FREQ_CALC_CONSTANT 32
46#define MHz 1000000
47
48#define PSB_BLC_PWM_PRECISION_FACTOR 10
49#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
50#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
51
52#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
53#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
54
55static int psb_brightness;
56static struct backlight_device *psb_backlight_device;
57
58static int psb_get_brightness(struct backlight_device *bd)
59{
60 /* return locally cached var instead of HW read (due to DPST etc.) */
61 /* FIXME: ideally return actual value in case firmware fiddled with
62 it */
63 return psb_brightness;
64}
65
66
67static int psb_backlight_setup(struct drm_device *dev)
68{
69 struct drm_psb_private *dev_priv = dev->dev_private;
70 unsigned long core_clock;
71 /* u32 bl_max_freq; */
72 /* unsigned long value; */
73 u16 bl_max_freq;
74 uint32_t value;
75 uint32_t blc_pwm_precision_factor;
76
77 /* get bl_max_freq and pol from dev_priv*/
78 if (!dev_priv->lvds_bl) {
79 dev_err(dev->dev, "Has no valid LVDS backlight info\n");
80 return -ENOENT;
81 }
82 bl_max_freq = dev_priv->lvds_bl->freq;
83 blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
84
85 core_clock = dev_priv->core_freq;
86
87 value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
88 value *= blc_pwm_precision_factor;
89 value /= bl_max_freq;
90 value /= blc_pwm_precision_factor;
91
92 if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
93 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
94 return -ERANGE;
95 else {
96 value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
97 REG_WRITE(BLC_PWM_CTL,
98 (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
99 }
100 return 0;
101}
102
103static int psb_set_brightness(struct backlight_device *bd)
104{
105 struct drm_device *dev = bl_get_data(psb_backlight_device);
106 int level = bd->props.brightness;
107
108 /* Percentage 1-100% being valid */
109 if (level < 1)
110 level = 1;
111
112 psb_intel_lvds_set_brightness(dev, level);
113 psb_brightness = level;
114 return 0;
115}
116
117static const struct backlight_ops psb_ops = {
118 .get_brightness = psb_get_brightness,
119 .update_status = psb_set_brightness,
120};
121
122static int psb_backlight_init(struct drm_device *dev)
123{
124 struct drm_psb_private *dev_priv = dev->dev_private;
125 int ret;
126 struct backlight_properties props;
127
128 memset(&props, 0, sizeof(struct backlight_properties));
129 props.max_brightness = 100;
130 props.type = BACKLIGHT_PLATFORM;
131
132 psb_backlight_device = backlight_device_register("psb-bl",
133 NULL, (void *)dev, &psb_ops, &props);
134 if (IS_ERR(psb_backlight_device))
135 return PTR_ERR(psb_backlight_device);
136
137 ret = psb_backlight_setup(dev);
138 if (ret < 0) {
139 backlight_device_unregister(psb_backlight_device);
140 psb_backlight_device = NULL;
141 return ret;
142 }
143 psb_backlight_device->props.brightness = 100;
144 psb_backlight_device->props.max_brightness = 100;
145 backlight_update_status(psb_backlight_device);
146 dev_priv->backlight_device = psb_backlight_device;
147 return 0;
148}
149
150#endif
151
152/*
153 * Provide the Poulsbo specific chip logic and low level methods
154 * for power management
155 */
156
157static void psb_init_pm(struct drm_device *dev)
158{
159 struct drm_psb_private *dev_priv = dev->dev_private;
160
161 u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
162 gating &= ~3; /* Disable 2D clock gating */
163 gating |= 1;
164 PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
165 PSB_RSGX32(PSB_CR_CLKGATECTL);
166}
167
168/**
169 * psb_save_display_registers - save registers lost on suspend
170 * @dev: our DRM device
171 *
172 * Save the state we need in order to be able to restore the interface
173 * upon resume from suspend
174 */
175static int psb_save_display_registers(struct drm_device *dev)
176{
177 struct drm_psb_private *dev_priv = dev->dev_private;
178 struct drm_crtc *crtc;
179 struct drm_connector *connector;
180
181 /* Display arbitration control + watermarks */
182 dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
183 dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
184 dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
185 dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
186 dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
187 dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
188 dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
189 dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
190
191 /* Save crtc and output state */
192 mutex_lock(&dev->mode_config.mutex);
193 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
194 if (drm_helper_crtc_in_use(crtc))
195 crtc->funcs->save(crtc);
196 }
197
198 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
199 connector->funcs->save(connector);
200
201 mutex_unlock(&dev->mode_config.mutex);
202 return 0;
203}
204
205/**
206 * psb_restore_display_registers - restore lost register state
207 * @dev: our DRM device
208 *
209 * Restore register state that was lost during suspend and resume.
210 */
211static int psb_restore_display_registers(struct drm_device *dev)
212{
213 struct drm_psb_private *dev_priv = dev->dev_private;
214 struct drm_crtc *crtc;
215 struct drm_connector *connector;
216
217 /* Display arbitration + watermarks */
218 PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
219 PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
220 PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
221 PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
222 PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
223 PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
224 PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
225 PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
226
227 /*make sure VGA plane is off. it initializes to on after reset!*/
228 PSB_WVDC32(0x80000000, VGACNTRL);
229
230 mutex_lock(&dev->mode_config.mutex);
231 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
232 if (drm_helper_crtc_in_use(crtc))
233 crtc->funcs->restore(crtc);
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
236 connector->funcs->restore(connector);
237
238 mutex_unlock(&dev->mode_config.mutex);
239 return 0;
240}
241
242static int psb_power_down(struct drm_device *dev)
243{
244 return 0;
245}
246
247static int psb_power_up(struct drm_device *dev)
248{
249 return 0;
250}
251
252static void psb_get_core_freq(struct drm_device *dev)
253{
254 uint32_t clock;
255 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
256 struct drm_psb_private *dev_priv = dev->dev_private;
257
258 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
259 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
260
261 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
262 pci_read_config_dword(pci_root, 0xD4, &clock);
263 pci_dev_put(pci_root);
264
265 switch (clock & 0x07) {
266 case 0:
267 dev_priv->core_freq = 100;
268 break;
269 case 1:
270 dev_priv->core_freq = 133;
271 break;
272 case 2:
273 dev_priv->core_freq = 150;
274 break;
275 case 3:
276 dev_priv->core_freq = 178;
277 break;
278 case 4:
279 dev_priv->core_freq = 200;
280 break;
281 case 5:
282 case 6:
283 case 7:
284 dev_priv->core_freq = 266;
285 default:
286 dev_priv->core_freq = 0;
287 }
288}
289
290static int psb_chip_setup(struct drm_device *dev)
291{
292 psb_get_core_freq(dev);
293 gma_intel_setup_gmbus(dev);
294 gma_intel_opregion_init(dev);
295 psb_intel_init_bios(dev);
296 return 0;
297}
298
299static void psb_chip_teardown(struct drm_device *dev)
300{
301 gma_intel_teardown_gmbus(dev);
302}
303
304const struct psb_ops psb_chip_ops = {
305 .name = "Poulsbo",
306 .accel_2d = 1,
307 .pipes = 2,
308 .crtcs = 2,
309 .sgx_offset = PSB_SGX_OFFSET,
310 .chip_setup = psb_chip_setup,
311 .chip_teardown = psb_chip_teardown,
312
313 .crtc_helper = &psb_intel_helper_funcs,
314 .crtc_funcs = &psb_intel_crtc_funcs,
315
316 .output_init = psb_output_init,
317
318#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
319 .backlight_init = psb_backlight_init,
320#endif
321
322 .init_pm = psb_init_pm,
323 .save_regs = psb_save_display_registers,
324 .restore_regs = psb_restore_display_registers,
325 .power_down = psb_power_down,
326 .power_up = psb_power_up,
327};
328
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 00000000000..f14768f2b36
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,703 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 **************************************************************************/
21
22#include <drm/drmP.h>
23#include <drm/drm.h>
24#include "gma_drm.h"
25#include "psb_drv.h"
26#include "framebuffer.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include "intel_bios.h"
30#include "mid_bios.h"
31#include <drm/drm_pciids.h>
32#include "power.h"
33#include <linux/cpu.h>
34#include <linux/notifier.h>
35#include <linux/spinlock.h>
36#include <linux/pm_runtime.h>
37#include <acpi/video.h>
38#include <linux/module.h>
39
40static int drm_psb_trap_pagefaults;
41
42static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
43
44MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
45module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
46
47
48static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
49 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
50 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
51#if defined(CONFIG_DRM_GMA600)
52 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
53 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
54 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
55 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
56 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
57 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
58 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
59 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
60 /* Atom E620 */
61 { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
62#endif
63#if defined(CONFIG_DRM_GMA3600)
64 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
65 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
66 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
67 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
68 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
69 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
70 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
71 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
72#endif
73 { 0, 0, 0}
74};
75MODULE_DEVICE_TABLE(pci, pciidlist);
76
77/*
78 * Standard IOCTLs.
79 */
80
81#define DRM_IOCTL_PSB_ADB \
82 DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
83#define DRM_IOCTL_PSB_MODE_OPERATION \
84 DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
85 struct drm_psb_mode_operation_arg)
86#define DRM_IOCTL_PSB_STOLEN_MEMORY \
87 DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
88 struct drm_psb_stolen_memory_arg)
89#define DRM_IOCTL_PSB_GAMMA \
90 DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
91 struct drm_psb_dpst_lut_arg)
92#define DRM_IOCTL_PSB_DPST_BL \
93 DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
94 uint32_t)
95#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
96 DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
97 struct drm_psb_get_pipe_from_crtc_id_arg)
98#define DRM_IOCTL_PSB_GEM_CREATE \
99 DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
100 struct drm_psb_gem_create)
101#define DRM_IOCTL_PSB_GEM_MMAP \
102 DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
103 struct drm_psb_gem_mmap)
104
105static int psb_adb_ioctl(struct drm_device *dev, void *data,
106 struct drm_file *file_priv);
107static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
108 struct drm_file *file_priv);
109static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv);
111static int psb_gamma_ioctl(struct drm_device *dev, void *data,
112 struct drm_file *file_priv);
113static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
114 struct drm_file *file_priv);
115
116#define PSB_IOCTL_DEF(ioctl, func, flags) \
117 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
118
119static struct drm_ioctl_desc psb_ioctls[] = {
120 PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
121 PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
122 DRM_AUTH),
123 PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
124 DRM_AUTH),
125 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
126 PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
127 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
128 psb_intel_get_pipe_from_crtc_id, 0),
129 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
130 DRM_UNLOCKED | DRM_AUTH),
131 PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
132 DRM_UNLOCKED | DRM_AUTH),
133};
134
135static void psb_lastclose(struct drm_device *dev)
136{
137 return;
138}
139
140static void psb_do_takedown(struct drm_device *dev)
141{
142}
143
144static int psb_do_init(struct drm_device *dev)
145{
146 struct drm_psb_private *dev_priv = dev->dev_private;
147 struct psb_gtt *pg = &dev_priv->gtt;
148
149 uint32_t stolen_gtt;
150
151 int ret = -ENOMEM;
152
153 if (pg->mmu_gatt_start & 0x0FFFFFFF) {
154 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
155 ret = -EINVAL;
156 goto out_err;
157 }
158
159
160 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
161 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
162 stolen_gtt =
163 (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
164
165 dev_priv->gatt_free_offset = pg->mmu_gatt_start +
166 (stolen_gtt << PAGE_SHIFT) * 1024;
167
168 if (1 || drm_debug) {
169 uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
170 uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
171 DRM_INFO("SGX core id = 0x%08x\n", core_id);
172 DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
173 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
174 _PSB_CC_REVISION_MAJOR_SHIFT,
175 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
176 _PSB_CC_REVISION_MINOR_SHIFT);
177 DRM_INFO
178 ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
179 (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
180 _PSB_CC_REVISION_MAINTENANCE_SHIFT,
181 (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
182 _PSB_CC_REVISION_DESIGNER_SHIFT);
183 }
184
185
186 spin_lock_init(&dev_priv->irqmask_lock);
187 spin_lock_init(&dev_priv->lock_2d);
188
189 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
190 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
191 PSB_RSGX32(PSB_CR_BIF_BANK1);
192 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
193 PSB_CR_BIF_CTRL);
194 psb_spank(dev_priv);
195
196 /* mmu_gatt ?? */
197 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
198 return 0;
199out_err:
200 psb_do_takedown(dev);
201 return ret;
202}
203
204static int psb_driver_unload(struct drm_device *dev)
205{
206 struct drm_psb_private *dev_priv = dev->dev_private;
207
208 /* Kill vblank etc here */
209
210 gma_backlight_exit(dev);
211
212 psb_modeset_cleanup(dev);
213
214 if (dev_priv) {
215 psb_lid_timer_takedown(dev_priv);
216 gma_intel_opregion_exit(dev);
217
218 if (dev_priv->ops->chip_teardown)
219 dev_priv->ops->chip_teardown(dev);
220 psb_do_takedown(dev);
221
222
223 if (dev_priv->pf_pd) {
224 psb_mmu_free_pagedir(dev_priv->pf_pd);
225 dev_priv->pf_pd = NULL;
226 }
227 if (dev_priv->mmu) {
228 struct psb_gtt *pg = &dev_priv->gtt;
229
230 down_read(&pg->sem);
231 psb_mmu_remove_pfn_sequence(
232 psb_mmu_get_default_pd
233 (dev_priv->mmu),
234 pg->mmu_gatt_start,
235 dev_priv->vram_stolen_size >> PAGE_SHIFT);
236 up_read(&pg->sem);
237 psb_mmu_driver_takedown(dev_priv->mmu);
238 dev_priv->mmu = NULL;
239 }
240 psb_gtt_takedown(dev);
241 if (dev_priv->scratch_page) {
242 __free_page(dev_priv->scratch_page);
243 dev_priv->scratch_page = NULL;
244 }
245 if (dev_priv->vdc_reg) {
246 iounmap(dev_priv->vdc_reg);
247 dev_priv->vdc_reg = NULL;
248 }
249 if (dev_priv->sgx_reg) {
250 iounmap(dev_priv->sgx_reg);
251 dev_priv->sgx_reg = NULL;
252 }
253
254 kfree(dev_priv);
255 dev->dev_private = NULL;
256
257 /*destroy VBT data*/
258 psb_intel_destroy_bios(dev);
259 }
260
261 gma_power_uninit(dev);
262
263 return 0;
264}
265
266
267static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
268{
269 struct drm_psb_private *dev_priv;
270 unsigned long resource_start;
271 struct psb_gtt *pg;
272 unsigned long irqflags;
273 int ret = -ENOMEM;
274 uint32_t tt_pages;
275 struct drm_connector *connector;
276 struct psb_intel_encoder *psb_intel_encoder;
277
278 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
279 if (dev_priv == NULL)
280 return -ENOMEM;
281
282 dev_priv->ops = (struct psb_ops *)chipset;
283 dev_priv->dev = dev;
284 dev->dev_private = (void *) dev_priv;
285
286 if (!IS_PSB(dev)) {
287 if (pci_enable_msi(dev->pdev))
288 dev_warn(dev->dev, "Enabling MSI failed!\n");
289 }
290
291 dev_priv->num_pipe = dev_priv->ops->pipes;
292
293 resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
294
295 dev_priv->vdc_reg =
296 ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
297 if (!dev_priv->vdc_reg)
298 goto out_err;
299
300 dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
301 PSB_SGX_SIZE);
302 if (!dev_priv->sgx_reg)
303 goto out_err;
304
305 ret = dev_priv->ops->chip_setup(dev);
306 if (ret)
307 goto out_err;
308
309 /* Init OSPM support */
310 gma_power_init(dev);
311
312 ret = -ENOMEM;
313
314 dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
315 if (!dev_priv->scratch_page)
316 goto out_err;
317
318 set_pages_uc(dev_priv->scratch_page, 1);
319
320 ret = psb_gtt_init(dev, 0);
321 if (ret)
322 goto out_err;
323
324 dev_priv->mmu = psb_mmu_driver_init((void *)0,
325 drm_psb_trap_pagefaults, 0,
326 dev_priv);
327 if (!dev_priv->mmu)
328 goto out_err;
329
330 pg = &dev_priv->gtt;
331
332 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
333 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
334
335
336 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
337 if (!dev_priv->pf_pd)
338 goto out_err;
339
340 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
341 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
342
343 ret = psb_do_init(dev);
344 if (ret)
345 return ret;
346
347 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
348 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
349
350/* igd_opregion_init(&dev_priv->opregion_dev); */
351 acpi_video_register();
352 if (dev_priv->lid_state)
353 psb_lid_timer_init(dev_priv);
354
355 ret = drm_vblank_init(dev, dev_priv->num_pipe);
356 if (ret)
357 goto out_err;
358
359 /*
360 * Install interrupt handlers prior to powering off SGX or else we will
361 * crash.
362 */
363 dev_priv->vdc_irq_mask = 0;
364 dev_priv->pipestat[0] = 0;
365 dev_priv->pipestat[1] = 0;
366 dev_priv->pipestat[2] = 0;
367 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
368 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
369 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
370 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
371 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
372 if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
373 drm_irq_install(dev);
374
375 dev->vblank_disable_allowed = 1;
376
377 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
378
379 dev->driver->get_vblank_counter = psb_get_vblank_counter;
380
381 psb_modeset_init(dev);
382 psb_fbdev_init(dev);
383 drm_kms_helper_poll_init(dev);
384
385 /* Only add backlight support if we have LVDS output */
386 list_for_each_entry(connector, &dev->mode_config.connector_list,
387 head) {
388 psb_intel_encoder = psb_intel_attached_encoder(connector);
389
390 switch (psb_intel_encoder->type) {
391 case INTEL_OUTPUT_LVDS:
392 case INTEL_OUTPUT_MIPI:
393 ret = gma_backlight_init(dev);
394 break;
395 }
396 }
397
398 if (ret)
399 return ret;
400#if 0
401 /*enable runtime pm at last*/
402 pm_runtime_enable(&dev->pdev->dev);
403 pm_runtime_set_active(&dev->pdev->dev);
404#endif
405 /*Intel drm driver load is done, continue doing pvr load*/
406 return 0;
407out_err:
408 psb_driver_unload(dev);
409 return ret;
410}
411
412int psb_driver_device_is_agp(struct drm_device *dev)
413{
414 return 0;
415}
416
417static inline void get_brightness(struct backlight_device *bd)
418{
419#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
420 if (bd) {
421 bd->props.brightness = bd->ops->get_brightness(bd);
422 backlight_update_status(bd);
423 }
424#endif
425}
426
427static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
428 struct drm_file *file_priv)
429{
430 struct drm_psb_private *dev_priv = psb_priv(dev);
431 uint32_t *arg = data;
432
433 dev_priv->blc_adj2 = *arg;
434 get_brightness(dev_priv->backlight_device);
435 return 0;
436}
437
438static int psb_adb_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *file_priv)
440{
441 struct drm_psb_private *dev_priv = psb_priv(dev);
442 uint32_t *arg = data;
443
444 dev_priv->blc_adj1 = *arg;
445 get_brightness(dev_priv->backlight_device);
446 return 0;
447}
448
449static int psb_gamma_ioctl(struct drm_device *dev, void *data,
450 struct drm_file *file_priv)
451{
452 struct drm_psb_dpst_lut_arg *lut_arg = data;
453 struct drm_mode_object *obj;
454 struct drm_crtc *crtc;
455 struct drm_connector *connector;
456 struct psb_intel_crtc *psb_intel_crtc;
457 int i = 0;
458 int32_t obj_id;
459
460 obj_id = lut_arg->output_id;
461 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
462 if (!obj) {
463 dev_dbg(dev->dev, "Invalid Connector object.\n");
464 return -EINVAL;
465 }
466
467 connector = obj_to_connector(obj);
468 crtc = connector->encoder->crtc;
469 psb_intel_crtc = to_psb_intel_crtc(crtc);
470
471 for (i = 0; i < 256; i++)
472 psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
473
474 psb_intel_crtc_load_lut(crtc);
475
476 return 0;
477}
478
479static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
480 struct drm_file *file_priv)
481{
482 uint32_t obj_id;
483 uint16_t op;
484 struct drm_mode_modeinfo *umode;
485 struct drm_display_mode *mode = NULL;
486 struct drm_psb_mode_operation_arg *arg;
487 struct drm_mode_object *obj;
488 struct drm_connector *connector;
489 struct drm_connector_helper_funcs *connector_funcs;
490 int ret = 0;
491 int resp = MODE_OK;
492
493 arg = (struct drm_psb_mode_operation_arg *)data;
494 obj_id = arg->obj_id;
495 op = arg->operation;
496
497 switch (op) {
498 case PSB_MODE_OPERATION_MODE_VALID:
499 umode = &arg->mode;
500
501 mutex_lock(&dev->mode_config.mutex);
502
503 obj = drm_mode_object_find(dev, obj_id,
504 DRM_MODE_OBJECT_CONNECTOR);
505 if (!obj) {
506 ret = -EINVAL;
507 goto mode_op_out;
508 }
509
510 connector = obj_to_connector(obj);
511
512 mode = drm_mode_create(dev);
513 if (!mode) {
514 ret = -ENOMEM;
515 goto mode_op_out;
516 }
517
518 /* drm_crtc_convert_umode(mode, umode); */
519 {
520 mode->clock = umode->clock;
521 mode->hdisplay = umode->hdisplay;
522 mode->hsync_start = umode->hsync_start;
523 mode->hsync_end = umode->hsync_end;
524 mode->htotal = umode->htotal;
525 mode->hskew = umode->hskew;
526 mode->vdisplay = umode->vdisplay;
527 mode->vsync_start = umode->vsync_start;
528 mode->vsync_end = umode->vsync_end;
529 mode->vtotal = umode->vtotal;
530 mode->vscan = umode->vscan;
531 mode->vrefresh = umode->vrefresh;
532 mode->flags = umode->flags;
533 mode->type = umode->type;
534 strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
535 mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
536 }
537
538 connector_funcs = (struct drm_connector_helper_funcs *)
539 connector->helper_private;
540
541 if (connector_funcs->mode_valid) {
542 resp = connector_funcs->mode_valid(connector, mode);
543 arg->data = resp;
544 }
545
546 /*do some clean up work*/
547 if (mode)
548 drm_mode_destroy(dev, mode);
549mode_op_out:
550 mutex_unlock(&dev->mode_config.mutex);
551 return ret;
552
553 default:
554 dev_dbg(dev->dev, "Unsupported psb mode operation\n");
555 return -EOPNOTSUPP;
556 }
557
558 return 0;
559}
560
561static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *file_priv)
563{
564 struct drm_psb_private *dev_priv = psb_priv(dev);
565 struct drm_psb_stolen_memory_arg *arg = data;
566
567 arg->base = dev_priv->stolen_base;
568 arg->size = dev_priv->vram_stolen_size;
569
570 return 0;
571}
572
573static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
574{
575 return 0;
576}
577
578static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
579{
580}
581
582static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
583 unsigned long arg)
584{
585 struct drm_file *file_priv = filp->private_data;
586 struct drm_device *dev = file_priv->minor->dev;
587 struct drm_psb_private *dev_priv = dev->dev_private;
588 static unsigned int runtime_allowed;
589
590 if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
591 runtime_allowed++;
592 pm_runtime_allow(&dev->pdev->dev);
593 dev_priv->rpm_enabled = 1;
594 }
595 return drm_ioctl(filp, cmd, arg);
596 /* FIXME: do we need to wrap the other side of this */
597}
598
599
600/* When a client dies:
601 * - Check for and clean up flipped page state
602 */
603void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
604{
605}
606
607static void psb_remove(struct pci_dev *pdev)
608{
609 struct drm_device *dev = pci_get_drvdata(pdev);
610 drm_put_dev(dev);
611}
612
613static const struct dev_pm_ops psb_pm_ops = {
614 .resume = gma_power_resume,
615 .suspend = gma_power_suspend,
616 .runtime_suspend = psb_runtime_suspend,
617 .runtime_resume = psb_runtime_resume,
618 .runtime_idle = psb_runtime_idle,
619};
620
621static struct vm_operations_struct psb_gem_vm_ops = {
622 .fault = psb_gem_fault,
623 .open = drm_gem_vm_open,
624 .close = drm_gem_vm_close,
625};
626
627static const struct file_operations psb_gem_fops = {
628 .owner = THIS_MODULE,
629 .open = drm_open,
630 .release = drm_release,
631 .unlocked_ioctl = psb_unlocked_ioctl,
632 .mmap = drm_gem_mmap,
633 .poll = drm_poll,
634 .fasync = drm_fasync,
635 .read = drm_read,
636};
637
638static struct drm_driver driver = {
639 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
640 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
641 .load = psb_driver_load,
642 .unload = psb_driver_unload,
643
644 .ioctls = psb_ioctls,
645 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
646 .device_is_agp = psb_driver_device_is_agp,
647 .irq_preinstall = psb_irq_preinstall,
648 .irq_postinstall = psb_irq_postinstall,
649 .irq_uninstall = psb_irq_uninstall,
650 .irq_handler = psb_irq_handler,
651 .enable_vblank = psb_enable_vblank,
652 .disable_vblank = psb_disable_vblank,
653 .get_vblank_counter = psb_get_vblank_counter,
654 .lastclose = psb_lastclose,
655 .open = psb_driver_open,
656 .preclose = psb_driver_preclose,
657 .postclose = psb_driver_close,
658 .reclaim_buffers = drm_core_reclaim_buffers,
659
660 .gem_init_object = psb_gem_init_object,
661 .gem_free_object = psb_gem_free_object,
662 .gem_vm_ops = &psb_gem_vm_ops,
663 .dumb_create = psb_gem_dumb_create,
664 .dumb_map_offset = psb_gem_dumb_map_gtt,
665 .dumb_destroy = psb_gem_dumb_destroy,
666 .fops = &psb_gem_fops,
667 .name = DRIVER_NAME,
668 .desc = DRIVER_DESC,
669 .date = PSB_DRM_DRIVER_DATE,
670 .major = PSB_DRM_DRIVER_MAJOR,
671 .minor = PSB_DRM_DRIVER_MINOR,
672 .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
673};
674
675static struct pci_driver psb_pci_driver = {
676 .name = DRIVER_NAME,
677 .id_table = pciidlist,
678 .probe = psb_probe,
679 .remove = psb_remove,
680 .driver.pm = &psb_pm_ops,
681};
682
683static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
684{
685 return drm_get_pci_dev(pdev, ent, &driver);
686}
687
688static int __init psb_init(void)
689{
690 return drm_pci_init(&driver, &psb_pci_driver);
691}
692
693static void __exit psb_exit(void)
694{
695 drm_pci_exit(&driver, &psb_pci_driver);
696}
697
698late_initcall(psb_init);
699module_exit(psb_exit);
700
701MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
702MODULE_DESCRIPTION(DRIVER_DESC);
703MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644
index 00000000000..eb1568a0da9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -0,0 +1,956 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#ifndef _PSB_DRV_H_
21#define _PSB_DRV_H_
22
23#include <linux/kref.h>
24
25#include <drm/drmP.h>
26#include "drm_global.h"
27#include "gem_glue.h"
28#include "gma_drm.h"
29#include "psb_reg.h"
30#include "psb_intel_drv.h"
31#include "gtt.h"
32#include "power.h"
33#include "oaktrail.h"
34
35/* Append new drm mode definition here, align with libdrm definition */
36#define DRM_MODE_SCALE_NO_SCALE 2
37
38enum {
39 CHIP_PSB_8108 = 0, /* Poulsbo */
40 CHIP_PSB_8109 = 1, /* Poulsbo */
41 CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
42 CHIP_MFLD_0130 = 3, /* Medfield */
43};
44
45#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
46#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
47#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
48
49/*
50 * Driver definitions
51 */
52
53#define DRIVER_NAME "gma500"
54#define DRIVER_DESC "DRM driver for the Intel GMA500"
55
56#define PSB_DRM_DRIVER_DATE "2011-06-06"
57#define PSB_DRM_DRIVER_MAJOR 1
58#define PSB_DRM_DRIVER_MINOR 0
59#define PSB_DRM_DRIVER_PATCHLEVEL 0
60
61/*
62 * Hardware offsets
63 */
64#define PSB_VDC_OFFSET 0x00000000
65#define PSB_VDC_SIZE 0x000080000
66#define MRST_MMIO_SIZE 0x0000C0000
67#define MDFLD_MMIO_SIZE 0x000100000
68#define PSB_SGX_SIZE 0x8000
69#define PSB_SGX_OFFSET 0x00040000
70#define MRST_SGX_OFFSET 0x00080000
71/*
72 * PCI resource identifiers
73 */
74#define PSB_MMIO_RESOURCE 0
75#define PSB_GATT_RESOURCE 2
76#define PSB_GTT_RESOURCE 3
77/*
78 * PCI configuration
79 */
80#define PSB_GMCH_CTRL 0x52
81#define PSB_BSM 0x5C
82#define _PSB_GMCH_ENABLED 0x4
83#define PSB_PGETBL_CTL 0x2020
84#define _PSB_PGETBL_ENABLED 0x00000001
85#define PSB_SGX_2D_SLAVE_PORT 0x4000
86
87/* To get rid of */
88#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
89#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
90
91/*
92 * SGX side MMU definitions (these can probably go)
93 */
94
95/*
96 * Flags for external memory type field.
97 */
98#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
99#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
100#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
101/*
102 * PTE's and PDE's
103 */
104#define PSB_PDE_MASK 0x003FFFFF
105#define PSB_PDE_SHIFT 22
106#define PSB_PTE_SHIFT 12
107/*
108 * Cache control
109 */
110#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
111#define PSB_PTE_WO 0x0002 /* Write only */
112#define PSB_PTE_RO 0x0004 /* Read only */
113#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
114
115/*
116 * VDC registers and bits
117 */
118#define PSB_MSVDX_CLOCKGATING 0x2064
119#define PSB_TOPAZ_CLOCKGATING 0x2068
120#define PSB_HWSTAM 0x2098
121#define PSB_INSTPM 0x20C0
122#define PSB_INT_IDENTITY_R 0x20A4
123#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
124#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
125#define _PSB_DPST_PIPEB_FLAG (1<<4)
126#define _MDFLD_PIPEB_EVENT_FLAG (1<<4)
127#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
128#define _PSB_DPST_PIPEA_FLAG (1<<6)
129#define _PSB_PIPEA_EVENT_FLAG (1<<6)
130#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
131#define _MDFLD_MIPIA_FLAG (1<<16)
132#define _MDFLD_MIPIC_FLAG (1<<17)
133#define _PSB_IRQ_SGX_FLAG (1<<18)
134#define _PSB_IRQ_MSVDX_FLAG (1<<19)
135#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
136
137#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
138 _PSB_VSYNC_PIPEB_FLAG)
139
140/* This flag includes all the display IRQ bits excepts the vblank irqs. */
141#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
142 _MDFLD_PIPEB_EVENT_FLAG | \
143 _PSB_PIPEA_EVENT_FLAG | \
144 _PSB_VSYNC_PIPEA_FLAG | \
145 _MDFLD_MIPIA_FLAG | \
146 _MDFLD_MIPIC_FLAG)
147#define PSB_INT_IDENTITY_R 0x20A4
148#define PSB_INT_MASK_R 0x20A8
149#define PSB_INT_ENABLE_R 0x20A0
150
151#define _PSB_MMU_ER_MASK 0x0001FF00
152#define _PSB_MMU_ER_HOST (1 << 16)
153#define GPIOA 0x5010
154#define GPIOB 0x5014
155#define GPIOC 0x5018
156#define GPIOD 0x501c
157#define GPIOE 0x5020
158#define GPIOF 0x5024
159#define GPIOG 0x5028
160#define GPIOH 0x502c
161#define GPIO_CLOCK_DIR_MASK (1 << 0)
162#define GPIO_CLOCK_DIR_IN (0 << 1)
163#define GPIO_CLOCK_DIR_OUT (1 << 1)
164#define GPIO_CLOCK_VAL_MASK (1 << 2)
165#define GPIO_CLOCK_VAL_OUT (1 << 3)
166#define GPIO_CLOCK_VAL_IN (1 << 4)
167#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
168#define GPIO_DATA_DIR_MASK (1 << 8)
169#define GPIO_DATA_DIR_IN (0 << 9)
170#define GPIO_DATA_DIR_OUT (1 << 9)
171#define GPIO_DATA_VAL_MASK (1 << 10)
172#define GPIO_DATA_VAL_OUT (1 << 11)
173#define GPIO_DATA_VAL_IN (1 << 12)
174#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
175
176#define VCLK_DIVISOR_VGA0 0x6000
177#define VCLK_DIVISOR_VGA1 0x6004
178#define VCLK_POST_DIV 0x6010
179
180#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
181#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
182#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
183#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
184#define PSB_COMM_USER_IRQ (1024 >> 2)
185#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
186#define PSB_COMM_FW (2048 >> 2)
187
188#define PSB_UIRQ_VISTEST 1
189#define PSB_UIRQ_OOM_REPLY 2
190#define PSB_UIRQ_FIRE_TA_REPLY 3
191#define PSB_UIRQ_FIRE_RASTER_REPLY 4
192
193#define PSB_2D_SIZE (256*1024*1024)
194#define PSB_MAX_RELOC_PAGES 1024
195
196#define PSB_LOW_REG_OFFS 0x0204
197#define PSB_HIGH_REG_OFFS 0x0600
198
199#define PSB_NUM_VBLANKS 2
200
201
202#define PSB_2D_SIZE (256*1024*1024)
203#define PSB_MAX_RELOC_PAGES 1024
204
205#define PSB_LOW_REG_OFFS 0x0204
206#define PSB_HIGH_REG_OFFS 0x0600
207
208#define PSB_NUM_VBLANKS 2
209#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
210#define PSB_LID_DELAY (DRM_HZ / 10)
211
212#define MDFLD_PNW_B0 0x04
213#define MDFLD_PNW_C0 0x08
214
215#define MDFLD_DSR_2D_3D_0 (1 << 0)
216#define MDFLD_DSR_2D_3D_2 (1 << 1)
217#define MDFLD_DSR_CURSOR_0 (1 << 2)
218#define MDFLD_DSR_CURSOR_2 (1 << 3)
219#define MDFLD_DSR_OVERLAY_0 (1 << 4)
220#define MDFLD_DSR_OVERLAY_2 (1 << 5)
221#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
222#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
223#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
224#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
225
226#define MDFLD_DSR_RR 45
227#define MDFLD_DPU_ENABLE (1 << 31)
228#define MDFLD_DSR_FULLSCREEN (1 << 30)
229#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
230
231#define PSB_PWR_STATE_ON 1
232#define PSB_PWR_STATE_OFF 2
233
234#define PSB_PMPOLICY_NOPM 0
235#define PSB_PMPOLICY_CLOCKGATING 1
236#define PSB_PMPOLICY_POWERDOWN 2
237
238#define PSB_PMSTATE_POWERUP 0
239#define PSB_PMSTATE_CLOCKGATED 1
240#define PSB_PMSTATE_POWERDOWN 2
241#define PSB_PCIx_MSI_ADDR_LOC 0x94
242#define PSB_PCIx_MSI_DATA_LOC 0x98
243
244/* Medfield crystal settings */
245#define KSEL_CRYSTAL_19 1
246#define KSEL_BYPASS_19 5
247#define KSEL_BYPASS_25 6
248#define KSEL_BYPASS_83_100 7
249
250struct opregion_header;
251struct opregion_acpi;
252struct opregion_swsci;
253struct opregion_asle;
254
255struct psb_intel_opregion {
256 struct opregion_header *header;
257 struct opregion_acpi *acpi;
258 struct opregion_swsci *swsci;
259 struct opregion_asle *asle;
260 int enabled;
261};
262
263struct sdvo_device_mapping {
264 u8 initialized;
265 u8 dvo_port;
266 u8 slave_addr;
267 u8 dvo_wiring;
268 u8 i2c_pin;
269 u8 i2c_speed;
270 u8 ddc_pin;
271};
272
273struct intel_gmbus {
274 struct i2c_adapter adapter;
275 struct i2c_adapter *force_bit;
276 u32 reg0;
277};
278
279struct psb_ops;
280
281#define PSB_NUM_PIPE 3
282
283struct drm_psb_private {
284 struct drm_device *dev;
285 const struct psb_ops *ops;
286
287 struct psb_gtt gtt;
288
289 /* GTT Memory manager */
290 struct psb_gtt_mm *gtt_mm;
291 struct page *scratch_page;
292 u32 *gtt_map;
293 uint32_t stolen_base;
294 void *vram_addr;
295 unsigned long vram_stolen_size;
296 int gtt_initialized;
297 u16 gmch_ctrl; /* Saved GTT setup */
298 u32 pge_ctl;
299
300 struct mutex gtt_mutex;
301 struct resource *gtt_mem; /* Our PCI resource */
302
303 struct psb_mmu_driver *mmu;
304 struct psb_mmu_pd *pf_pd;
305
306 /*
307 * Register base
308 */
309
310 uint8_t *sgx_reg;
311 uint8_t *vdc_reg;
312 uint32_t gatt_free_offset;
313
314 /*
315 * Fencing / irq.
316 */
317
318 uint32_t vdc_irq_mask;
319 uint32_t pipestat[PSB_NUM_PIPE];
320
321 spinlock_t irqmask_lock;
322
323 /*
324 * Power
325 */
326
327 bool suspended;
328 bool display_power;
329 int display_count;
330
331 /*
332 * Modesetting
333 */
334 struct psb_intel_mode_device mode_dev;
335
336 struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
337 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
338 uint32_t num_pipe;
339
340 /*
341 * OSPM info (Power management base) (can go ?)
342 */
343 uint32_t ospm_base;
344
345 /*
346 * Sizes info
347 */
348
349 u32 fuse_reg_value;
350 u32 video_device_fuse;
351
352 /* PCI revision ID for B0:D2:F0 */
353 uint8_t platform_rev_id;
354
355 /* gmbus */
356 struct intel_gmbus *gmbus;
357
358 /* Used by SDVO */
359 int crt_ddc_pin;
360 /* FIXME: The mappings should be parsed from bios but for now we can
361 pretend there are no mappings available */
362 struct sdvo_device_mapping sdvo_mappings[2];
363 u32 hotplug_supported_mask;
364 struct drm_property *broadcast_rgb_property;
365 struct drm_property *force_audio_property;
366
367 /*
368 * LVDS info
369 */
370 int backlight_duty_cycle; /* restore backlight to this value */
371 bool panel_wants_dither;
372 struct drm_display_mode *panel_fixed_mode;
373 struct drm_display_mode *lfp_lvds_vbt_mode;
374 struct drm_display_mode *sdvo_lvds_vbt_mode;
375
376 struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
377 struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
378
379 /* Feature bits from the VBIOS */
380 unsigned int int_tv_support:1;
381 unsigned int lvds_dither:1;
382 unsigned int lvds_vbt:1;
383 unsigned int int_crt_support:1;
384 unsigned int lvds_use_ssc:1;
385 int lvds_ssc_freq;
386 bool is_lvds_on;
387 bool is_mipi_on;
388 u32 mipi_ctrl_display;
389
390 unsigned int core_freq;
391 uint32_t iLVDS_enable;
392
393 /* Runtime PM state */
394 int rpm_enabled;
395
396 /* MID specific */
397 struct oaktrail_vbt vbt_data;
398 struct oaktrail_gct_data gct_data;
399
400 /* MIPI Panel type etc */
401 int panel_id;
402 bool dual_mipi; /* dual display - DPI & DBI */
403 bool dpi_panel_on; /* The DPI panel power is on */
404 bool dpi_panel_on2; /* The DPI panel power is on */
405 bool dbi_panel_on; /* The DBI panel power is on */
406 bool dbi_panel_on2; /* The DBI panel power is on */
407 u32 dsr_fb_update; /* DSR FB update counter */
408
409 /* Moorestown HDMI state */
410 struct oaktrail_hdmi_dev *hdmi_priv;
411
412 /* Moorestown pipe config register value cache */
413 uint32_t pipeconf;
414 uint32_t pipeconf1;
415 uint32_t pipeconf2;
416
417 /* Moorestown plane control register value cache */
418 uint32_t dspcntr;
419 uint32_t dspcntr1;
420 uint32_t dspcntr2;
421
422 /* Moorestown MM backlight cache */
423 uint8_t saveBKLTCNT;
424 uint8_t saveBKLTREQ;
425 uint8_t saveBKLTBRTL;
426
427 /*
428 * Register state
429 */
430 uint32_t saveDSPACNTR;
431 uint32_t saveDSPBCNTR;
432 uint32_t savePIPEACONF;
433 uint32_t savePIPEBCONF;
434 uint32_t savePIPEASRC;
435 uint32_t savePIPEBSRC;
436 uint32_t saveFPA0;
437 uint32_t saveFPA1;
438 uint32_t saveDPLL_A;
439 uint32_t saveDPLL_A_MD;
440 uint32_t saveHTOTAL_A;
441 uint32_t saveHBLANK_A;
442 uint32_t saveHSYNC_A;
443 uint32_t saveVTOTAL_A;
444 uint32_t saveVBLANK_A;
445 uint32_t saveVSYNC_A;
446 uint32_t saveDSPASTRIDE;
447 uint32_t saveDSPASIZE;
448 uint32_t saveDSPAPOS;
449 uint32_t saveDSPABASE;
450 uint32_t saveDSPASURF;
451 uint32_t saveDSPASTATUS;
452 uint32_t saveFPB0;
453 uint32_t saveFPB1;
454 uint32_t saveDPLL_B;
455 uint32_t saveDPLL_B_MD;
456 uint32_t saveHTOTAL_B;
457 uint32_t saveHBLANK_B;
458 uint32_t saveHSYNC_B;
459 uint32_t saveVTOTAL_B;
460 uint32_t saveVBLANK_B;
461 uint32_t saveVSYNC_B;
462 uint32_t saveDSPBSTRIDE;
463 uint32_t saveDSPBSIZE;
464 uint32_t saveDSPBPOS;
465 uint32_t saveDSPBBASE;
466 uint32_t saveDSPBSURF;
467 uint32_t saveDSPBSTATUS;
468 uint32_t saveVCLK_DIVISOR_VGA0;
469 uint32_t saveVCLK_DIVISOR_VGA1;
470 uint32_t saveVCLK_POST_DIV;
471 uint32_t saveVGACNTRL;
472 uint32_t saveADPA;
473 uint32_t saveLVDS;
474 uint32_t saveDVOA;
475 uint32_t saveDVOB;
476 uint32_t saveDVOC;
477 uint32_t savePP_ON;
478 uint32_t savePP_OFF;
479 uint32_t savePP_CONTROL;
480 uint32_t savePP_CYCLE;
481 uint32_t savePFIT_CONTROL;
482 uint32_t savePaletteA[256];
483 uint32_t savePaletteB[256];
484 uint32_t saveBLC_PWM_CTL2;
485 uint32_t saveBLC_PWM_CTL;
486 uint32_t saveCLOCKGATING;
487 uint32_t saveDSPARB;
488 uint32_t saveDSPATILEOFF;
489 uint32_t saveDSPBTILEOFF;
490 uint32_t saveDSPAADDR;
491 uint32_t saveDSPBADDR;
492 uint32_t savePFIT_AUTO_RATIOS;
493 uint32_t savePFIT_PGM_RATIOS;
494 uint32_t savePP_ON_DELAYS;
495 uint32_t savePP_OFF_DELAYS;
496 uint32_t savePP_DIVISOR;
497 uint32_t saveBSM;
498 uint32_t saveVBT;
499 uint32_t saveBCLRPAT_A;
500 uint32_t saveBCLRPAT_B;
501 uint32_t saveDSPALINOFF;
502 uint32_t saveDSPBLINOFF;
503 uint32_t savePERF_MODE;
504 uint32_t saveDSPFW1;
505 uint32_t saveDSPFW2;
506 uint32_t saveDSPFW3;
507 uint32_t saveDSPFW4;
508 uint32_t saveDSPFW5;
509 uint32_t saveDSPFW6;
510 uint32_t saveCHICKENBIT;
511 uint32_t saveDSPACURSOR_CTRL;
512 uint32_t saveDSPBCURSOR_CTRL;
513 uint32_t saveDSPACURSOR_BASE;
514 uint32_t saveDSPBCURSOR_BASE;
515 uint32_t saveDSPACURSOR_POS;
516 uint32_t saveDSPBCURSOR_POS;
517 uint32_t save_palette_a[256];
518 uint32_t save_palette_b[256];
519 uint32_t saveOV_OVADD;
520 uint32_t saveOV_OGAMC0;
521 uint32_t saveOV_OGAMC1;
522 uint32_t saveOV_OGAMC2;
523 uint32_t saveOV_OGAMC3;
524 uint32_t saveOV_OGAMC4;
525 uint32_t saveOV_OGAMC5;
526 uint32_t saveOVC_OVADD;
527 uint32_t saveOVC_OGAMC0;
528 uint32_t saveOVC_OGAMC1;
529 uint32_t saveOVC_OGAMC2;
530 uint32_t saveOVC_OGAMC3;
531 uint32_t saveOVC_OGAMC4;
532 uint32_t saveOVC_OGAMC5;
533
534 /* MSI reg save */
535 uint32_t msi_addr;
536 uint32_t msi_data;
537
538 /* Medfield specific register save state */
539 uint32_t saveHDMIPHYMISCCTL;
540 uint32_t saveHDMIB_CONTROL;
541 uint32_t saveDSPCCNTR;
542 uint32_t savePIPECCONF;
543 uint32_t savePIPECSRC;
544 uint32_t saveHTOTAL_C;
545 uint32_t saveHBLANK_C;
546 uint32_t saveHSYNC_C;
547 uint32_t saveVTOTAL_C;
548 uint32_t saveVBLANK_C;
549 uint32_t saveVSYNC_C;
550 uint32_t saveDSPCSTRIDE;
551 uint32_t saveDSPCSIZE;
552 uint32_t saveDSPCPOS;
553 uint32_t saveDSPCSURF;
554 uint32_t saveDSPCSTATUS;
555 uint32_t saveDSPCLINOFF;
556 uint32_t saveDSPCTILEOFF;
557 uint32_t saveDSPCCURSOR_CTRL;
558 uint32_t saveDSPCCURSOR_BASE;
559 uint32_t saveDSPCCURSOR_POS;
560 uint32_t save_palette_c[256];
561 uint32_t saveOV_OVADD_C;
562 uint32_t saveOV_OGAMC0_C;
563 uint32_t saveOV_OGAMC1_C;
564 uint32_t saveOV_OGAMC2_C;
565 uint32_t saveOV_OGAMC3_C;
566 uint32_t saveOV_OGAMC4_C;
567 uint32_t saveOV_OGAMC5_C;
568
569 /* DSI register save */
570 uint32_t saveDEVICE_READY_REG;
571 uint32_t saveINTR_EN_REG;
572 uint32_t saveDSI_FUNC_PRG_REG;
573 uint32_t saveHS_TX_TIMEOUT_REG;
574 uint32_t saveLP_RX_TIMEOUT_REG;
575 uint32_t saveTURN_AROUND_TIMEOUT_REG;
576 uint32_t saveDEVICE_RESET_REG;
577 uint32_t saveDPI_RESOLUTION_REG;
578 uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
579 uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
580 uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
581 uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
582 uint32_t saveVERT_SYNC_PAD_COUNT_REG;
583 uint32_t saveVERT_BACK_PORCH_COUNT_REG;
584 uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
585 uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
586 uint32_t saveINIT_COUNT_REG;
587 uint32_t saveMAX_RET_PAK_REG;
588 uint32_t saveVIDEO_FMT_REG;
589 uint32_t saveEOT_DISABLE_REG;
590 uint32_t saveLP_BYTECLK_REG;
591 uint32_t saveHS_LS_DBI_ENABLE_REG;
592 uint32_t saveTXCLKESC_REG;
593 uint32_t saveDPHY_PARAM_REG;
594 uint32_t saveMIPI_CONTROL_REG;
595 uint32_t saveMIPI;
596 uint32_t saveMIPI_C;
597
598 /* DPST register save */
599 uint32_t saveHISTOGRAM_INT_CONTROL_REG;
600 uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
601 uint32_t savePWM_CONTROL_LOGIC;
602
603 /*
604 * DSI info.
605 */
606 void * dbi_dsr_info;
607 void * dbi_dpu_info;
608 void * dsi_configs[2];
609 /*
610 * LID-Switch
611 */
612 spinlock_t lid_lock;
613 struct timer_list lid_timer;
614 struct psb_intel_opregion opregion;
615 u32 *lid_state;
616 u32 lid_last_state;
617
618 /*
619 * Watchdog
620 */
621
622 uint32_t apm_reg;
623 uint16_t apm_base;
624
625 /*
626 * Used for modifying backlight from
627 * xrandr -- consider removing and using HAL instead
628 */
629 struct backlight_device *backlight_device;
630 struct drm_property *backlight_property;
631 uint32_t blc_adj1;
632 uint32_t blc_adj2;
633
634 void *fbdev;
635
636 /* 2D acceleration */
637 spinlock_t lock_2d;
638};
639
640
641/*
642 * Operations for each board type
643 */
644
645struct psb_ops {
646 const char *name;
647 unsigned int accel_2d:1;
648 int pipes; /* Number of output pipes */
649 int crtcs; /* Number of CRTCs */
650 int sgx_offset; /* Base offset of SGX device */
651
652 /* Sub functions */
653 struct drm_crtc_helper_funcs const *crtc_helper;
654 struct drm_crtc_funcs const *crtc_funcs;
655
656 /* Setup hooks */
657 int (*chip_setup)(struct drm_device *dev);
658 void (*chip_teardown)(struct drm_device *dev);
659
660 /* Display management hooks */
661 int (*output_init)(struct drm_device *dev);
662 /* Power management hooks */
663 void (*init_pm)(struct drm_device *dev);
664 int (*save_regs)(struct drm_device *dev);
665 int (*restore_regs)(struct drm_device *dev);
666 int (*power_up)(struct drm_device *dev);
667 int (*power_down)(struct drm_device *dev);
668
669 void (*lvds_bl_power)(struct drm_device *dev, bool on);
670#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
671 /* Backlight */
672 int (*backlight_init)(struct drm_device *dev);
673#endif
674 int i2c_bus; /* I2C bus identifier for Moorestown */
675};
676
677
678
679struct psb_mmu_driver;
680
681extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
682extern int drm_pick_crtcs(struct drm_device *dev);
683
684static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
685{
686 return (struct drm_psb_private *) dev->dev_private;
687}
688
689/*
690 * MMU stuff.
691 */
692
693extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
694 int trap_pagefaults,
695 int invalid_type,
696 struct drm_psb_private *dev_priv);
697extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
698extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
699 *driver);
700extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
701 uint32_t gtt_start, uint32_t gtt_pages);
702extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
703 int trap_pagefaults,
704 int invalid_type);
705extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
706extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
707extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
708 unsigned long address,
709 uint32_t num_pages);
710extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
711 uint32_t start_pfn,
712 unsigned long address,
713 uint32_t num_pages, int type);
714extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
715 unsigned long *pfn);
716
717/*
718 * Enable / disable MMU for different requestors.
719 */
720
721
722extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
723extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
724 unsigned long address, uint32_t num_pages,
725 uint32_t desired_tile_stride,
726 uint32_t hw_tile_stride, int type);
727extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
728 unsigned long address, uint32_t num_pages,
729 uint32_t desired_tile_stride,
730 uint32_t hw_tile_stride);
731/*
732 *psb_irq.c
733 */
734
735extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
736extern int psb_irq_enable_dpst(struct drm_device *dev);
737extern int psb_irq_disable_dpst(struct drm_device *dev);
738extern void psb_irq_preinstall(struct drm_device *dev);
739extern int psb_irq_postinstall(struct drm_device *dev);
740extern void psb_irq_uninstall(struct drm_device *dev);
741extern void psb_irq_turn_on_dpst(struct drm_device *dev);
742extern void psb_irq_turn_off_dpst(struct drm_device *dev);
743
744extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
745extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
746extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
747extern int psb_enable_vblank(struct drm_device *dev, int crtc);
748extern void psb_disable_vblank(struct drm_device *dev, int crtc);
749void
750psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
751
752void
753psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
754
755extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
756
757/*
758 * intel_opregion.c
759 */
760extern int gma_intel_opregion_init(struct drm_device *dev);
761extern int gma_intel_opregion_exit(struct drm_device *dev);
762
763/*
764 * framebuffer.c
765 */
766extern int psbfb_probed(struct drm_device *dev);
767extern int psbfb_remove(struct drm_device *dev,
768 struct drm_framebuffer *fb);
769/*
770 * accel_2d.c
771 */
772extern void psbfb_copyarea(struct fb_info *info,
773 const struct fb_copyarea *region);
774extern int psbfb_sync(struct fb_info *info);
775extern void psb_spank(struct drm_psb_private *dev_priv);
776
777/*
778 * psb_reset.c
779 */
780
781extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
782extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
783extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
784
785/* modesetting */
786extern void psb_modeset_init(struct drm_device *dev);
787extern void psb_modeset_cleanup(struct drm_device *dev);
788extern int psb_fbdev_init(struct drm_device *dev);
789
790/* backlight.c */
791int gma_backlight_init(struct drm_device *dev);
792void gma_backlight_exit(struct drm_device *dev);
793
794/* oaktrail_crtc.c */
795extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
796
797/* oaktrail_lvds.c */
798extern void oaktrail_lvds_init(struct drm_device *dev,
799 struct psb_intel_mode_device *mode_dev);
800
801/* psb_intel_display.c */
802extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
803extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
804
805/* psb_intel_lvds.c */
806extern const struct drm_connector_helper_funcs
807 psb_intel_lvds_connector_helper_funcs;
808extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
809
810/* gem.c */
811extern int psb_gem_init_object(struct drm_gem_object *obj);
812extern void psb_gem_free_object(struct drm_gem_object *obj);
813extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
814 struct drm_file *file);
815extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
816 struct drm_mode_create_dumb *args);
817extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
818 uint32_t handle);
819extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
820 uint32_t handle, uint64_t *offset);
821extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
822extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
823 struct drm_file *file);
824extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
825 struct drm_file *file);
826
827/* psb_device.c */
828extern const struct psb_ops psb_chip_ops;
829
830/* oaktrail_device.c */
831extern const struct psb_ops oaktrail_chip_ops;
832
833/* cdv_device.c */
834extern const struct psb_ops cdv_chip_ops;
835
836/*
837 * Debug print bits setting
838 */
839#define PSB_D_GENERAL (1 << 0)
840#define PSB_D_INIT (1 << 1)
841#define PSB_D_IRQ (1 << 2)
842#define PSB_D_ENTRY (1 << 3)
843/* debug the get H/V BP/FP count */
844#define PSB_D_HV (1 << 4)
845#define PSB_D_DBI_BF (1 << 5)
846#define PSB_D_PM (1 << 6)
847#define PSB_D_RENDER (1 << 7)
848#define PSB_D_REG (1 << 8)
849#define PSB_D_MSVDX (1 << 9)
850#define PSB_D_TOPAZ (1 << 10)
851
852extern int drm_psb_no_fb;
853extern int drm_idle_check_interval;
854
855/*
856 * Utilities
857 */
858
859static inline u32 MRST_MSG_READ32(uint port, uint offset)
860{
861 int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
862 uint32_t ret_val = 0;
863 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
864 pci_write_config_dword(pci_root, 0xD0, mcr);
865 pci_read_config_dword(pci_root, 0xD4, &ret_val);
866 pci_dev_put(pci_root);
867 return ret_val;
868}
869static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
870{
871 int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
872 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
873 pci_write_config_dword(pci_root, 0xD4, value);
874 pci_write_config_dword(pci_root, 0xD0, mcr);
875 pci_dev_put(pci_root);
876}
877static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
878{
879 int mcr = (0x10<<24) | (port << 16) | (offset << 8);
880 uint32_t ret_val = 0;
881 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
882 pci_write_config_dword(pci_root, 0xD0, mcr);
883 pci_read_config_dword(pci_root, 0xD4, &ret_val);
884 pci_dev_put(pci_root);
885 return ret_val;
886}
887static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
888{
889 int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
890 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
891 pci_write_config_dword(pci_root, 0xD4, value);
892 pci_write_config_dword(pci_root, 0xD0, mcr);
893 pci_dev_put(pci_root);
894}
895
896static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
897{
898 struct drm_psb_private *dev_priv = dev->dev_private;
899 return ioread32(dev_priv->vdc_reg + reg);
900}
901
902#define REG_READ(reg) REGISTER_READ(dev, (reg))
903
904static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
905 uint32_t val)
906{
907 struct drm_psb_private *dev_priv = dev->dev_private;
908 iowrite32((val), dev_priv->vdc_reg + (reg));
909}
910
911#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
912
913static inline void REGISTER_WRITE16(struct drm_device *dev,
914 uint32_t reg, uint32_t val)
915{
916 struct drm_psb_private *dev_priv = dev->dev_private;
917 iowrite16((val), dev_priv->vdc_reg + (reg));
918}
919
920#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
921
922static inline void REGISTER_WRITE8(struct drm_device *dev,
923 uint32_t reg, uint32_t val)
924{
925 struct drm_psb_private *dev_priv = dev->dev_private;
926 iowrite8((val), dev_priv->vdc_reg + (reg));
927}
928
929#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
930
931#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
932#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
933
934/* #define TRAP_SGX_PM_FAULT 1 */
935#ifdef TRAP_SGX_PM_FAULT
936#define PSB_RSGX32(_offs) \
937({ \
938 if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
939 printk(KERN_ERR \
940 "access sgx when it's off!! (READ) %s, %d\n", \
941 __FILE__, __LINE__); \
942 melay(1000); \
943 } \
944 ioread32(dev_priv->sgx_reg + (_offs)); \
945})
946#else
947#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
948#endif
949#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
950
951#define MSVDX_REG_DUMP 0
952
953#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
954#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
955
956#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644
index 00000000000..49e983508d5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -0,0 +1,1446 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 */
20
21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23
24#include <drm/drmP.h>
25#include "framebuffer.h"
26#include "psb_drv.h"
27#include "psb_intel_drv.h"
28#include "psb_intel_reg.h"
29#include "psb_intel_display.h"
30#include "power.h"
31
32struct psb_intel_clock_t {
33 /* given values */
34 int n;
35 int m1, m2;
36 int p1, p2;
37 /* derived values */
38 int dot;
39 int vco;
40 int m;
41 int p;
42};
43
44struct psb_intel_range_t {
45 int min, max;
46};
47
48struct psb_intel_p2_t {
49 int dot_limit;
50 int p2_slow, p2_fast;
51};
52
53#define INTEL_P2_NUM 2
54
55struct psb_intel_limit_t {
56 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
57 struct psb_intel_p2_t p2;
58};
59
60#define I8XX_DOT_MIN 25000
61#define I8XX_DOT_MAX 350000
62#define I8XX_VCO_MIN 930000
63#define I8XX_VCO_MAX 1400000
64#define I8XX_N_MIN 3
65#define I8XX_N_MAX 16
66#define I8XX_M_MIN 96
67#define I8XX_M_MAX 140
68#define I8XX_M1_MIN 18
69#define I8XX_M1_MAX 26
70#define I8XX_M2_MIN 6
71#define I8XX_M2_MAX 16
72#define I8XX_P_MIN 4
73#define I8XX_P_MAX 128
74#define I8XX_P1_MIN 2
75#define I8XX_P1_MAX 33
76#define I8XX_P1_LVDS_MIN 1
77#define I8XX_P1_LVDS_MAX 6
78#define I8XX_P2_SLOW 4
79#define I8XX_P2_FAST 2
80#define I8XX_P2_LVDS_SLOW 14
81#define I8XX_P2_LVDS_FAST 14 /* No fast option */
82#define I8XX_P2_SLOW_LIMIT 165000
83
84#define I9XX_DOT_MIN 20000
85#define I9XX_DOT_MAX 400000
86#define I9XX_VCO_MIN 1400000
87#define I9XX_VCO_MAX 2800000
88#define I9XX_N_MIN 3
89#define I9XX_N_MAX 8
90#define I9XX_M_MIN 70
91#define I9XX_M_MAX 120
92#define I9XX_M1_MIN 10
93#define I9XX_M1_MAX 20
94#define I9XX_M2_MIN 5
95#define I9XX_M2_MAX 9
96#define I9XX_P_SDVO_DAC_MIN 5
97#define I9XX_P_SDVO_DAC_MAX 80
98#define I9XX_P_LVDS_MIN 7
99#define I9XX_P_LVDS_MAX 98
100#define I9XX_P1_MIN 1
101#define I9XX_P1_MAX 8
102#define I9XX_P2_SDVO_DAC_SLOW 10
103#define I9XX_P2_SDVO_DAC_FAST 5
104#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
105#define I9XX_P2_LVDS_SLOW 14
106#define I9XX_P2_LVDS_FAST 7
107#define I9XX_P2_LVDS_SLOW_LIMIT 112000
108
109#define INTEL_LIMIT_I8XX_DVO_DAC 0
110#define INTEL_LIMIT_I8XX_LVDS 1
111#define INTEL_LIMIT_I9XX_SDVO_DAC 2
112#define INTEL_LIMIT_I9XX_LVDS 3
113
114static const struct psb_intel_limit_t psb_intel_limits[] = {
115 { /* INTEL_LIMIT_I8XX_DVO_DAC */
116 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
117 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
118 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
119 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
120 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
121 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
122 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
123 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
124 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
125 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
126 },
127 { /* INTEL_LIMIT_I8XX_LVDS */
128 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
129 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
130 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
131 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
132 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
133 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
134 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
135 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
136 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
137 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
138 },
139 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
140 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
141 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
142 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
143 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
144 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
145 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
146 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
147 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
148 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
149 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
150 I9XX_P2_SDVO_DAC_FAST},
151 },
152 { /* INTEL_LIMIT_I9XX_LVDS */
153 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
154 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
155 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
156 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
157 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
158 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
159 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
160 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
161 /* The single-channel range is 25-112Mhz, and dual-channel
162 * is 80-224Mhz. Prefer single channel as much as possible.
163 */
164 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
165 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
166 },
167};
168
169static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
170{
171 const struct psb_intel_limit_t *limit;
172
173 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
174 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
175 else
176 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
177 return limit;
178}
179
180/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
181
182static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
183{
184 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
185 clock->p = clock->p1 * clock->p2;
186 clock->vco = refclk * clock->m / (clock->n + 2);
187 clock->dot = clock->vco / clock->p;
188}
189
190/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
191
192static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
193{
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2);
197 clock->dot = clock->vco / clock->p;
198}
199
200static void psb_intel_clock(struct drm_device *dev, int refclk,
201 struct psb_intel_clock_t *clock)
202{
203 return i9xx_clock(refclk, clock);
204}
205
206/**
207 * Returns whether any output on the specified pipe is of the specified type
208 */
209bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
210{
211 struct drm_device *dev = crtc->dev;
212 struct drm_mode_config *mode_config = &dev->mode_config;
213 struct drm_connector *l_entry;
214
215 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
216 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
217 struct psb_intel_encoder *psb_intel_encoder =
218 psb_intel_attached_encoder(l_entry);
219 if (psb_intel_encoder->type == type)
220 return true;
221 }
222 }
223 return false;
224}
225
226#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
227/**
228 * Returns whether the given set of divisors are valid for a given refclk with
229 * the given connectors.
230 */
231
232static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
233 struct psb_intel_clock_t *clock)
234{
235 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
236
237 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
238 INTELPllInvalid("p1 out of range\n");
239 if (clock->p < limit->p.min || limit->p.max < clock->p)
240 INTELPllInvalid("p out of range\n");
241 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
242 INTELPllInvalid("m2 out of range\n");
243 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
244 INTELPllInvalid("m1 out of range\n");
245 if (clock->m1 <= clock->m2)
246 INTELPllInvalid("m1 <= m2\n");
247 if (clock->m < limit->m.min || limit->m.max < clock->m)
248 INTELPllInvalid("m out of range\n");
249 if (clock->n < limit->n.min || limit->n.max < clock->n)
250 INTELPllInvalid("n out of range\n");
251 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
252 INTELPllInvalid("vco out of range\n");
253 /* XXX: We may need to be checking "Dot clock"
254 * depending on the multiplier, connector, etc.,
255 * rather than just a single range.
256 */
257 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
258 INTELPllInvalid("dot out of range\n");
259
260 return true;
261}
262
263/**
264 * Returns a set of divisors for the desired target clock with the given
265 * refclk, or FALSE. The returned values represent the clock equation:
266 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
267 */
268static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
269 int refclk,
270 struct psb_intel_clock_t *best_clock)
271{
272 struct drm_device *dev = crtc->dev;
273 struct psb_intel_clock_t clock;
274 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
275 int err = target;
276
277 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
278 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
279 /*
280 * For LVDS, if the panel is on, just rely on its current
281 * settings for dual-channel. We haven't figured out how to
282 * reliably set up different single/dual channel state, if we
283 * even can.
284 */
285 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
286 LVDS_CLKB_POWER_UP)
287 clock.p2 = limit->p2.p2_fast;
288 else
289 clock.p2 = limit->p2.p2_slow;
290 } else {
291 if (target < limit->p2.dot_limit)
292 clock.p2 = limit->p2.p2_slow;
293 else
294 clock.p2 = limit->p2.p2_fast;
295 }
296
297 memset(best_clock, 0, sizeof(*best_clock));
298
299 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
300 clock.m1++) {
301 for (clock.m2 = limit->m2.min;
302 clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
303 clock.m2++) {
304 for (clock.n = limit->n.min;
305 clock.n <= limit->n.max; clock.n++) {
306 for (clock.p1 = limit->p1.min;
307 clock.p1 <= limit->p1.max;
308 clock.p1++) {
309 int this_err;
310
311 psb_intel_clock(dev, refclk, &clock);
312
313 if (!psb_intel_PLL_is_valid
314 (crtc, &clock))
315 continue;
316
317 this_err = abs(clock.dot - target);
318 if (this_err < err) {
319 *best_clock = clock;
320 err = this_err;
321 }
322 }
323 }
324 }
325 }
326
327 return err != target;
328}
329
330void psb_intel_wait_for_vblank(struct drm_device *dev)
331{
332 /* Wait for 20ms, i.e. one cycle at 50hz. */
333 mdelay(20);
334}
335
336int psb_intel_pipe_set_base(struct drm_crtc *crtc,
337 int x, int y, struct drm_framebuffer *old_fb)
338{
339 struct drm_device *dev = crtc->dev;
340 /* struct drm_i915_master_private *master_priv; */
341 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
342 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
343 int pipe = psb_intel_crtc->pipe;
344 unsigned long start, offset;
345 int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
346 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
347 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
348 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
349 u32 dspcntr;
350 int ret = 0;
351
352 if (!gma_power_begin(dev, true))
353 return 0;
354
355 /* no fb bound */
356 if (!crtc->fb) {
357 dev_dbg(dev->dev, "No FB bound\n");
358 goto psb_intel_pipe_cleaner;
359 }
360
361 /* We are displaying this buffer, make sure it is actually loaded
362 into the GTT */
363 ret = psb_gtt_pin(psbfb->gtt);
364 if (ret < 0)
365 goto psb_intel_pipe_set_base_exit;
366 start = psbfb->gtt->offset;
367
368 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
369
370 REG_WRITE(dspstride, crtc->fb->pitches[0]);
371
372 dspcntr = REG_READ(dspcntr_reg);
373 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
374
375 switch (crtc->fb->bits_per_pixel) {
376 case 8:
377 dspcntr |= DISPPLANE_8BPP;
378 break;
379 case 16:
380 if (crtc->fb->depth == 15)
381 dspcntr |= DISPPLANE_15_16BPP;
382 else
383 dspcntr |= DISPPLANE_16BPP;
384 break;
385 case 24:
386 case 32:
387 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
388 break;
389 default:
390 dev_err(dev->dev, "Unknown color depth\n");
391 ret = -EINVAL;
392 psb_gtt_unpin(psbfb->gtt);
393 goto psb_intel_pipe_set_base_exit;
394 }
395 REG_WRITE(dspcntr_reg, dspcntr);
396
397
398 if (0 /* FIXMEAC - check what PSB needs */) {
399 REG_WRITE(dspbase, offset);
400 REG_READ(dspbase);
401 REG_WRITE(dspsurf, start);
402 REG_READ(dspsurf);
403 } else {
404 REG_WRITE(dspbase, start + offset);
405 REG_READ(dspbase);
406 }
407
408psb_intel_pipe_cleaner:
409 /* If there was a previous display we can now unpin it */
410 if (old_fb)
411 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
412
413psb_intel_pipe_set_base_exit:
414 gma_power_end(dev);
415 return ret;
416}
417
418/**
419 * Sets the power management mode of the pipe and plane.
420 *
421 * This code should probably grow support for turning the cursor off and back
422 * on appropriately at the same time as we're turning the pipe off/on.
423 */
424static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
425{
426 struct drm_device *dev = crtc->dev;
427 /* struct drm_i915_master_private *master_priv; */
428 /* struct drm_i915_private *dev_priv = dev->dev_private; */
429 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
430 int pipe = psb_intel_crtc->pipe;
431 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
432 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
433 int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
434 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
435 u32 temp;
436 bool enabled;
437
438 /* XXX: When our outputs are all unaware of DPMS modes other than off
439 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
440 */
441 switch (mode) {
442 case DRM_MODE_DPMS_ON:
443 case DRM_MODE_DPMS_STANDBY:
444 case DRM_MODE_DPMS_SUSPEND:
445 /* Enable the DPLL */
446 temp = REG_READ(dpll_reg);
447 if ((temp & DPLL_VCO_ENABLE) == 0) {
448 REG_WRITE(dpll_reg, temp);
449 REG_READ(dpll_reg);
450 /* Wait for the clocks to stabilize. */
451 udelay(150);
452 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
453 REG_READ(dpll_reg);
454 /* Wait for the clocks to stabilize. */
455 udelay(150);
456 REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
457 REG_READ(dpll_reg);
458 /* Wait for the clocks to stabilize. */
459 udelay(150);
460 }
461
462 /* Enable the pipe */
463 temp = REG_READ(pipeconf_reg);
464 if ((temp & PIPEACONF_ENABLE) == 0)
465 REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
466
467 /* Enable the plane */
468 temp = REG_READ(dspcntr_reg);
469 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
470 REG_WRITE(dspcntr_reg,
471 temp | DISPLAY_PLANE_ENABLE);
472 /* Flush the plane changes */
473 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
474 }
475
476 psb_intel_crtc_load_lut(crtc);
477
478 /* Give the overlay scaler a chance to enable
479 * if it's on this pipe */
480 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
481 break;
482 case DRM_MODE_DPMS_OFF:
483 /* Give the overlay scaler a chance to disable
484 * if it's on this pipe */
485 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
486
487 /* Disable the VGA plane that we never use */
488 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
489
490 /* Disable display plane */
491 temp = REG_READ(dspcntr_reg);
492 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
493 REG_WRITE(dspcntr_reg,
494 temp & ~DISPLAY_PLANE_ENABLE);
495 /* Flush the plane changes */
496 REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
497 REG_READ(dspbase_reg);
498 }
499
500 /* Next, disable display pipes */
501 temp = REG_READ(pipeconf_reg);
502 if ((temp & PIPEACONF_ENABLE) != 0) {
503 REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
504 REG_READ(pipeconf_reg);
505 }
506
507 /* Wait for vblank for the disable to take effect. */
508 psb_intel_wait_for_vblank(dev);
509
510 temp = REG_READ(dpll_reg);
511 if ((temp & DPLL_VCO_ENABLE) != 0) {
512 REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
513 REG_READ(dpll_reg);
514 }
515
516 /* Wait for the clocks to turn off. */
517 udelay(150);
518 break;
519 }
520
521 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
522
523 /*Set FIFO Watermarks*/
524 REG_WRITE(DSPARB, 0x3F3E);
525}
526
527static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
528{
529 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
530 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
531}
532
533static void psb_intel_crtc_commit(struct drm_crtc *crtc)
534{
535 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
536 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
537}
538
539void psb_intel_encoder_prepare(struct drm_encoder *encoder)
540{
541 struct drm_encoder_helper_funcs *encoder_funcs =
542 encoder->helper_private;
543 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
544 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
545}
546
547void psb_intel_encoder_commit(struct drm_encoder *encoder)
548{
549 struct drm_encoder_helper_funcs *encoder_funcs =
550 encoder->helper_private;
551 /* lvds has its own version of commit see psb_intel_lvds_commit */
552 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
553}
554
555void psb_intel_encoder_destroy(struct drm_encoder *encoder)
556{
557 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
558
559 drm_encoder_cleanup(encoder);
560 kfree(intel_encoder);
561}
562
563static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
564 struct drm_display_mode *mode,
565 struct drm_display_mode *adjusted_mode)
566{
567 return true;
568}
569
570
571/**
572 * Return the pipe currently connected to the panel fitter,
573 * or -1 if the panel fitter is not present or not in use
574 */
575static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
576{
577 u32 pfit_control;
578
579 pfit_control = REG_READ(PFIT_CONTROL);
580
581 /* See if the panel fitter is in use */
582 if ((pfit_control & PFIT_ENABLE) == 0)
583 return -1;
584 /* Must be on PIPE 1 for PSB */
585 return 1;
586}
587
588static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
589 struct drm_display_mode *mode,
590 struct drm_display_mode *adjusted_mode,
591 int x, int y,
592 struct drm_framebuffer *old_fb)
593{
594 struct drm_device *dev = crtc->dev;
595 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
596 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
597 int pipe = psb_intel_crtc->pipe;
598 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
599 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
600 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
601 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
602 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
603 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
604 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
605 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
606 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
607 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
608 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
609 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
610 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
611 int refclk;
612 struct psb_intel_clock_t clock;
613 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
614 bool ok, is_sdvo = false, is_dvo = false;
615 bool is_crt = false, is_lvds = false, is_tv = false;
616 struct drm_mode_config *mode_config = &dev->mode_config;
617 struct drm_connector *connector;
618
619 /* No scan out no play */
620 if (crtc->fb == NULL) {
621 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
622 return 0;
623 }
624
625 list_for_each_entry(connector, &mode_config->connector_list, head) {
626 struct psb_intel_encoder *psb_intel_encoder =
627 psb_intel_attached_encoder(connector);
628
629 if (!connector->encoder
630 || connector->encoder->crtc != crtc)
631 continue;
632
633 switch (psb_intel_encoder->type) {
634 case INTEL_OUTPUT_LVDS:
635 is_lvds = true;
636 break;
637 case INTEL_OUTPUT_SDVO:
638 is_sdvo = true;
639 break;
640 case INTEL_OUTPUT_DVO:
641 is_dvo = true;
642 break;
643 case INTEL_OUTPUT_TVOUT:
644 is_tv = true;
645 break;
646 case INTEL_OUTPUT_ANALOG:
647 is_crt = true;
648 break;
649 }
650 }
651
652 refclk = 96000;
653
654 ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
655 &clock);
656 if (!ok) {
657 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
658 return 0;
659 }
660
661 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
662
663 dpll = DPLL_VGA_MODE_DIS;
664 if (is_lvds) {
665 dpll |= DPLLB_MODE_LVDS;
666 dpll |= DPLL_DVO_HIGH_SPEED;
667 } else
668 dpll |= DPLLB_MODE_DAC_SERIAL;
669 if (is_sdvo) {
670 int sdvo_pixel_multiply =
671 adjusted_mode->clock / mode->clock;
672 dpll |= DPLL_DVO_HIGH_SPEED;
673 dpll |=
674 (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
675 }
676
677 /* compute bitmask from p1 value */
678 dpll |= (1 << (clock.p1 - 1)) << 16;
679 switch (clock.p2) {
680 case 5:
681 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
682 break;
683 case 7:
684 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
685 break;
686 case 10:
687 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
688 break;
689 case 14:
690 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
691 break;
692 }
693
694 if (is_tv) {
695 /* XXX: just matching BIOS for now */
696/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
697 dpll |= 3;
698 }
699 dpll |= PLL_REF_INPUT_DREFCLK;
700
701 /* setup pipeconf */
702 pipeconf = REG_READ(pipeconf_reg);
703
704 /* Set up the display plane register */
705 dspcntr = DISPPLANE_GAMMA_ENABLE;
706
707 if (pipe == 0)
708 dspcntr |= DISPPLANE_SEL_PIPE_A;
709 else
710 dspcntr |= DISPPLANE_SEL_PIPE_B;
711
712 dspcntr |= DISPLAY_PLANE_ENABLE;
713 pipeconf |= PIPEACONF_ENABLE;
714 dpll |= DPLL_VCO_ENABLE;
715
716
717 /* Disable the panel fitter if it was on our pipe */
718 if (psb_intel_panel_fitter_pipe(dev) == pipe)
719 REG_WRITE(PFIT_CONTROL, 0);
720
721 drm_mode_debug_printmodeline(mode);
722
723 if (dpll & DPLL_VCO_ENABLE) {
724 REG_WRITE(fp_reg, fp);
725 REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
726 REG_READ(dpll_reg);
727 udelay(150);
728 }
729
730 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
731 * This is an exception to the general rule that mode_set doesn't turn
732 * things on.
733 */
734 if (is_lvds) {
735 u32 lvds = REG_READ(LVDS);
736
737 lvds &= ~LVDS_PIPEB_SELECT;
738 if (pipe == 1)
739 lvds |= LVDS_PIPEB_SELECT;
740
741 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
742 /* Set the B0-B3 data pairs corresponding to
743 * whether we're going to
744 * set the DPLLs for dual-channel mode or not.
745 */
746 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
747 if (clock.p2 == 7)
748 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
749
750 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
751 * appropriately here, but we need to look more
752 * thoroughly into how panels behave in the two modes.
753 */
754
755 REG_WRITE(LVDS, lvds);
756 REG_READ(LVDS);
757 }
758
759 REG_WRITE(fp_reg, fp);
760 REG_WRITE(dpll_reg, dpll);
761 REG_READ(dpll_reg);
762 /* Wait for the clocks to stabilize. */
763 udelay(150);
764
765 /* write it again -- the BIOS does, after all */
766 REG_WRITE(dpll_reg, dpll);
767
768 REG_READ(dpll_reg);
769 /* Wait for the clocks to stabilize. */
770 udelay(150);
771
772 REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
773 ((adjusted_mode->crtc_htotal - 1) << 16));
774 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
775 ((adjusted_mode->crtc_hblank_end - 1) << 16));
776 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
777 ((adjusted_mode->crtc_hsync_end - 1) << 16));
778 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
779 ((adjusted_mode->crtc_vtotal - 1) << 16));
780 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
781 ((adjusted_mode->crtc_vblank_end - 1) << 16));
782 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
783 ((adjusted_mode->crtc_vsync_end - 1) << 16));
784 /* pipesrc and dspsize control the size that is scaled from,
785 * which should always be the user's requested size.
786 */
787 REG_WRITE(dspsize_reg,
788 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
789 REG_WRITE(dsppos_reg, 0);
790 REG_WRITE(pipesrc_reg,
791 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
792 REG_WRITE(pipeconf_reg, pipeconf);
793 REG_READ(pipeconf_reg);
794
795 psb_intel_wait_for_vblank(dev);
796
797 REG_WRITE(dspcntr_reg, dspcntr);
798
799 /* Flush the plane changes */
800 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
801
802 psb_intel_wait_for_vblank(dev);
803
804 return 0;
805}
806
807/** Loads the palette/gamma unit for the CRTC with the prepared values */
808void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
809{
810 struct drm_device *dev = crtc->dev;
811 struct drm_psb_private *dev_priv =
812 (struct drm_psb_private *)dev->dev_private;
813 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
814 int palreg = PALETTE_A;
815 int i;
816
817 /* The clocks have to be on to load the palette. */
818 if (!crtc->enabled)
819 return;
820
821 switch (psb_intel_crtc->pipe) {
822 case 0:
823 break;
824 case 1:
825 palreg = PALETTE_B;
826 break;
827 case 2:
828 palreg = PALETTE_C;
829 break;
830 default:
831 dev_err(dev->dev, "Illegal Pipe Number.\n");
832 return;
833 }
834
835 if (gma_power_begin(dev, false)) {
836 for (i = 0; i < 256; i++) {
837 REG_WRITE(palreg + 4 * i,
838 ((psb_intel_crtc->lut_r[i] +
839 psb_intel_crtc->lut_adj[i]) << 16) |
840 ((psb_intel_crtc->lut_g[i] +
841 psb_intel_crtc->lut_adj[i]) << 8) |
842 (psb_intel_crtc->lut_b[i] +
843 psb_intel_crtc->lut_adj[i]));
844 }
845 gma_power_end(dev);
846 } else {
847 for (i = 0; i < 256; i++) {
848 dev_priv->save_palette_a[i] =
849 ((psb_intel_crtc->lut_r[i] +
850 psb_intel_crtc->lut_adj[i]) << 16) |
851 ((psb_intel_crtc->lut_g[i] +
852 psb_intel_crtc->lut_adj[i]) << 8) |
853 (psb_intel_crtc->lut_b[i] +
854 psb_intel_crtc->lut_adj[i]);
855 }
856
857 }
858}
859
860/**
861 * Save HW states of giving crtc
862 */
863static void psb_intel_crtc_save(struct drm_crtc *crtc)
864{
865 struct drm_device *dev = crtc->dev;
866 /* struct drm_psb_private *dev_priv =
867 (struct drm_psb_private *)dev->dev_private; */
868 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
869 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
870 int pipeA = (psb_intel_crtc->pipe == 0);
871 uint32_t paletteReg;
872 int i;
873
874 if (!crtc_state) {
875 dev_err(dev->dev, "No CRTC state found\n");
876 return;
877 }
878
879 crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
880 crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
881 crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
882 crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
883 crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
884 crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
885 crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
886 crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
887 crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
888 crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
889 crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
890 crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
891 crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
892
893 /*NOTE: DSPSIZE DSPPOS only for psb*/
894 crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
895 crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
896
897 crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
898
899 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
900 for (i = 0; i < 256; ++i)
901 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
902}
903
904/**
905 * Restore HW states of giving crtc
906 */
907static void psb_intel_crtc_restore(struct drm_crtc *crtc)
908{
909 struct drm_device *dev = crtc->dev;
910 /* struct drm_psb_private * dev_priv =
911 (struct drm_psb_private *)dev->dev_private; */
912 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
913 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
914 /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
915 int pipeA = (psb_intel_crtc->pipe == 0);
916 uint32_t paletteReg;
917 int i;
918
919 if (!crtc_state) {
920 dev_err(dev->dev, "No crtc state\n");
921 return;
922 }
923
924 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
925 REG_WRITE(pipeA ? DPLL_A : DPLL_B,
926 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
927 REG_READ(pipeA ? DPLL_A : DPLL_B);
928 udelay(150);
929 }
930
931 REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
932 REG_READ(pipeA ? FPA0 : FPB0);
933
934 REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
935 REG_READ(pipeA ? FPA1 : FPB1);
936
937 REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
938 REG_READ(pipeA ? DPLL_A : DPLL_B);
939 udelay(150);
940
941 REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
942 REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
943 REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
944 REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
945 REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
946 REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
947 REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
948
949 REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
950 REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
951
952 REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
953 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
954 REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
955
956 psb_intel_wait_for_vblank(dev);
957
958 REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
959 REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
960
961 psb_intel_wait_for_vblank(dev);
962
963 paletteReg = pipeA ? PALETTE_A : PALETTE_B;
964 for (i = 0; i < 256; ++i)
965 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
966}
967
968static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
969 struct drm_file *file_priv,
970 uint32_t handle,
971 uint32_t width, uint32_t height)
972{
973 struct drm_device *dev = crtc->dev;
974 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
975 int pipe = psb_intel_crtc->pipe;
976 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
977 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
978 uint32_t temp;
979 size_t addr = 0;
980 struct gtt_range *gt;
981 struct drm_gem_object *obj;
982 int ret;
983
984 /* if we want to turn of the cursor ignore width and height */
985 if (!handle) {
986 /* turn off the cursor */
987 temp = CURSOR_MODE_DISABLE;
988
989 if (gma_power_begin(dev, false)) {
990 REG_WRITE(control, temp);
991 REG_WRITE(base, 0);
992 gma_power_end(dev);
993 }
994
995 /* Unpin the old GEM object */
996 if (psb_intel_crtc->cursor_obj) {
997 gt = container_of(psb_intel_crtc->cursor_obj,
998 struct gtt_range, gem);
999 psb_gtt_unpin(gt);
1000 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1001 psb_intel_crtc->cursor_obj = NULL;
1002 }
1003
1004 return 0;
1005 }
1006
1007 /* Currently we only support 64x64 cursors */
1008 if (width != 64 || height != 64) {
1009 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1010 return -EINVAL;
1011 }
1012
1013 obj = drm_gem_object_lookup(dev, file_priv, handle);
1014 if (!obj)
1015 return -ENOENT;
1016
1017 if (obj->size < width * height * 4) {
1018 dev_dbg(dev->dev, "buffer is to small\n");
1019 return -ENOMEM;
1020 }
1021
1022 gt = container_of(obj, struct gtt_range, gem);
1023
1024 /* Pin the memory into the GTT */
1025 ret = psb_gtt_pin(gt);
1026 if (ret) {
1027 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1028 return ret;
1029 }
1030
1031
1032 addr = gt->offset; /* Or resource.start ??? */
1033
1034 psb_intel_crtc->cursor_addr = addr;
1035
1036 temp = 0;
1037 /* set the pipe for the cursor */
1038 temp |= (pipe << 28);
1039 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1040
1041 if (gma_power_begin(dev, false)) {
1042 REG_WRITE(control, temp);
1043 REG_WRITE(base, addr);
1044 gma_power_end(dev);
1045 }
1046
1047 /* unpin the old bo */
1048 if (psb_intel_crtc->cursor_obj) {
1049 gt = container_of(psb_intel_crtc->cursor_obj,
1050 struct gtt_range, gem);
1051 psb_gtt_unpin(gt);
1052 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1053 psb_intel_crtc->cursor_obj = obj;
1054 }
1055 return 0;
1056}
1057
1058static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1059{
1060 struct drm_device *dev = crtc->dev;
1061 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1062 int pipe = psb_intel_crtc->pipe;
1063 uint32_t temp = 0;
1064 uint32_t addr;
1065
1066
1067 if (x < 0) {
1068 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1069 x = -x;
1070 }
1071 if (y < 0) {
1072 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1073 y = -y;
1074 }
1075
1076 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1077 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1078
1079 addr = psb_intel_crtc->cursor_addr;
1080
1081 if (gma_power_begin(dev, false)) {
1082 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1083 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
1084 gma_power_end(dev);
1085 }
1086 return 0;
1087}
1088
1089void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1090 u16 *green, u16 *blue, uint32_t type, uint32_t size)
1091{
1092 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1093 int i;
1094
1095 if (size != 256)
1096 return;
1097
1098 for (i = 0; i < 256; i++) {
1099 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1100 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1101 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1102 }
1103
1104 psb_intel_crtc_load_lut(crtc);
1105}
1106
1107static int psb_crtc_set_config(struct drm_mode_set *set)
1108{
1109 int ret;
1110 struct drm_device *dev = set->crtc->dev;
1111 struct drm_psb_private *dev_priv = dev->dev_private;
1112
1113 if (!dev_priv->rpm_enabled)
1114 return drm_crtc_helper_set_config(set);
1115
1116 pm_runtime_forbid(&dev->pdev->dev);
1117 ret = drm_crtc_helper_set_config(set);
1118 pm_runtime_allow(&dev->pdev->dev);
1119 return ret;
1120}
1121
1122/* Returns the clock of the currently programmed mode of the given pipe. */
1123static int psb_intel_crtc_clock_get(struct drm_device *dev,
1124 struct drm_crtc *crtc)
1125{
1126 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1127 int pipe = psb_intel_crtc->pipe;
1128 u32 dpll;
1129 u32 fp;
1130 struct psb_intel_clock_t clock;
1131 bool is_lvds;
1132 struct drm_psb_private *dev_priv = dev->dev_private;
1133
1134 if (gma_power_begin(dev, false)) {
1135 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
1136 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1137 fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
1138 else
1139 fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
1140 is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
1141 gma_power_end(dev);
1142 } else {
1143 dpll = (pipe == 0) ?
1144 dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
1145
1146 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1147 fp = (pipe == 0) ?
1148 dev_priv->saveFPA0 :
1149 dev_priv->saveFPB0;
1150 else
1151 fp = (pipe == 0) ?
1152 dev_priv->saveFPA1 :
1153 dev_priv->saveFPB1;
1154
1155 is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
1156 }
1157
1158 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1159 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1160 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1161
1162 if (is_lvds) {
1163 clock.p1 =
1164 ffs((dpll &
1165 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
1166 DPLL_FPA01_P1_POST_DIV_SHIFT);
1167 clock.p2 = 14;
1168
1169 if ((dpll & PLL_REF_INPUT_MASK) ==
1170 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1171 /* XXX: might not be 66MHz */
1172 i8xx_clock(66000, &clock);
1173 } else
1174 i8xx_clock(48000, &clock);
1175 } else {
1176 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1177 clock.p1 = 2;
1178 else {
1179 clock.p1 =
1180 ((dpll &
1181 DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
1182 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
1183 }
1184 if (dpll & PLL_P2_DIVIDE_BY_4)
1185 clock.p2 = 4;
1186 else
1187 clock.p2 = 2;
1188
1189 i8xx_clock(48000, &clock);
1190 }
1191
1192 /* XXX: It would be nice to validate the clocks, but we can't reuse
1193 * i830PllIsValid() because it relies on the xf86_config connector
1194 * configuration being accurate, which it isn't necessarily.
1195 */
1196
1197 return clock.dot;
1198}
1199
1200/** Returns the currently programmed mode of the given pipe. */
1201struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1202 struct drm_crtc *crtc)
1203{
1204 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1205 int pipe = psb_intel_crtc->pipe;
1206 struct drm_display_mode *mode;
1207 int htot;
1208 int hsync;
1209 int vtot;
1210 int vsync;
1211 struct drm_psb_private *dev_priv = dev->dev_private;
1212
1213 if (gma_power_begin(dev, false)) {
1214 htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
1215 hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
1216 vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
1217 vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
1218 gma_power_end(dev);
1219 } else {
1220 htot = (pipe == 0) ?
1221 dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
1222 hsync = (pipe == 0) ?
1223 dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
1224 vtot = (pipe == 0) ?
1225 dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
1226 vsync = (pipe == 0) ?
1227 dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
1228 }
1229
1230 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
1231 if (!mode)
1232 return NULL;
1233
1234 mode->clock = psb_intel_crtc_clock_get(dev, crtc);
1235 mode->hdisplay = (htot & 0xffff) + 1;
1236 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
1237 mode->hsync_start = (hsync & 0xffff) + 1;
1238 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
1239 mode->vdisplay = (vtot & 0xffff) + 1;
1240 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
1241 mode->vsync_start = (vsync & 0xffff) + 1;
1242 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
1243
1244 drm_mode_set_name(mode);
1245 drm_mode_set_crtcinfo(mode, 0);
1246
1247 return mode;
1248}
1249
1250void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1251{
1252 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1253 struct gtt_range *gt;
1254
1255 /* Unpin the old GEM object */
1256 if (psb_intel_crtc->cursor_obj) {
1257 gt = container_of(psb_intel_crtc->cursor_obj,
1258 struct gtt_range, gem);
1259 psb_gtt_unpin(gt);
1260 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1261 psb_intel_crtc->cursor_obj = NULL;
1262 }
1263 kfree(psb_intel_crtc->crtc_state);
1264 drm_crtc_cleanup(crtc);
1265 kfree(psb_intel_crtc);
1266}
1267
1268const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1269 .dpms = psb_intel_crtc_dpms,
1270 .mode_fixup = psb_intel_crtc_mode_fixup,
1271 .mode_set = psb_intel_crtc_mode_set,
1272 .mode_set_base = psb_intel_pipe_set_base,
1273 .prepare = psb_intel_crtc_prepare,
1274 .commit = psb_intel_crtc_commit,
1275};
1276
1277const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1278 .save = psb_intel_crtc_save,
1279 .restore = psb_intel_crtc_restore,
1280 .cursor_set = psb_intel_crtc_cursor_set,
1281 .cursor_move = psb_intel_crtc_cursor_move,
1282 .gamma_set = psb_intel_crtc_gamma_set,
1283 .set_config = psb_crtc_set_config,
1284 .destroy = psb_intel_crtc_destroy,
1285};
1286
1287/*
1288 * Set the default value of cursor control and base register
1289 * to zero. This is a workaround for h/w defect on Oaktrail
1290 */
1291static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
1292{
1293 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
1294 u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
1295
1296 REG_WRITE(control[pipe], 0);
1297 REG_WRITE(base[pipe], 0);
1298}
1299
1300void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1301 struct psb_intel_mode_device *mode_dev)
1302{
1303 struct drm_psb_private *dev_priv = dev->dev_private;
1304 struct psb_intel_crtc *psb_intel_crtc;
1305 int i;
1306 uint16_t *r_base, *g_base, *b_base;
1307
1308 /* We allocate a extra array of drm_connector pointers
1309 * for fbdev after the crtc */
1310 psb_intel_crtc =
1311 kzalloc(sizeof(struct psb_intel_crtc) +
1312 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
1313 GFP_KERNEL);
1314 if (psb_intel_crtc == NULL)
1315 return;
1316
1317 psb_intel_crtc->crtc_state =
1318 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
1319 if (!psb_intel_crtc->crtc_state) {
1320 dev_err(dev->dev, "Crtc state error: No memory\n");
1321 kfree(psb_intel_crtc);
1322 return;
1323 }
1324
1325 /* Set the CRTC operations from the chip specific data */
1326 drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
1327
1328 drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
1329 psb_intel_crtc->pipe = pipe;
1330 psb_intel_crtc->plane = pipe;
1331
1332 r_base = psb_intel_crtc->base.gamma_store;
1333 g_base = r_base + 256;
1334 b_base = g_base + 256;
1335 for (i = 0; i < 256; i++) {
1336 psb_intel_crtc->lut_r[i] = i;
1337 psb_intel_crtc->lut_g[i] = i;
1338 psb_intel_crtc->lut_b[i] = i;
1339 r_base[i] = i << 8;
1340 g_base[i] = i << 8;
1341 b_base[i] = i << 8;
1342
1343 psb_intel_crtc->lut_adj[i] = 0;
1344 }
1345
1346 psb_intel_crtc->mode_dev = mode_dev;
1347 psb_intel_crtc->cursor_addr = 0;
1348
1349 drm_crtc_helper_add(&psb_intel_crtc->base,
1350 dev_priv->ops->crtc_helper);
1351
1352 /* Setup the array of drm_connector pointer array */
1353 psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
1354 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
1355 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
1356 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
1357 &psb_intel_crtc->base;
1358 dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
1359 &psb_intel_crtc->base;
1360 psb_intel_crtc->mode_set.connectors =
1361 (struct drm_connector **) (psb_intel_crtc + 1);
1362 psb_intel_crtc->mode_set.num_connectors = 0;
1363 psb_intel_cursor_init(dev, pipe);
1364}
1365
1366int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1367 struct drm_file *file_priv)
1368{
1369 struct drm_psb_private *dev_priv = dev->dev_private;
1370 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
1371 struct drm_mode_object *drmmode_obj;
1372 struct psb_intel_crtc *crtc;
1373
1374 if (!dev_priv) {
1375 dev_err(dev->dev, "called with no initialization\n");
1376 return -EINVAL;
1377 }
1378
1379 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
1380 DRM_MODE_OBJECT_CRTC);
1381
1382 if (!drmmode_obj) {
1383 dev_err(dev->dev, "no such CRTC id\n");
1384 return -EINVAL;
1385 }
1386
1387 crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
1388 pipe_from_crtc_id->pipe = crtc->pipe;
1389
1390 return 0;
1391}
1392
1393struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1394{
1395 struct drm_crtc *crtc = NULL;
1396
1397 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1398 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1399 if (psb_intel_crtc->pipe == pipe)
1400 break;
1401 }
1402 return crtc;
1403}
1404
1405int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
1406{
1407 int index_mask = 0;
1408 struct drm_connector *connector;
1409 int entry = 0;
1410
1411 list_for_each_entry(connector, &dev->mode_config.connector_list,
1412 head) {
1413 struct psb_intel_encoder *psb_intel_encoder =
1414 psb_intel_attached_encoder(connector);
1415 if (type_mask & (1 << psb_intel_encoder->type))
1416 index_mask |= (1 << entry);
1417 entry++;
1418 }
1419 return index_mask;
1420}
1421
1422
1423void psb_intel_modeset_cleanup(struct drm_device *dev)
1424{
1425 drm_mode_config_cleanup(dev);
1426}
1427
1428
1429/* current intel driver doesn't take advantage of encoders
1430 always give back the encoder for the connector
1431*/
1432struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
1433{
1434 struct psb_intel_encoder *psb_intel_encoder =
1435 psb_intel_attached_encoder(connector);
1436
1437 return &psb_intel_encoder->base;
1438}
1439
1440void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
1441 struct psb_intel_encoder *encoder)
1442{
1443 connector->encoder = encoder;
1444 drm_mode_connector_attach_encoder(&connector->base,
1445 &encoder->base);
1446}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644
index 00000000000..535b49a5e40
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -0,0 +1,28 @@
1/* copyright (c) 2008, Intel Corporation
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License,
5 * version 2, as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 * more details.
11 *
12 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Authors:
17 * Eric Anholt <eric@anholt.net>
18 */
19
20#ifndef _INTEL_DISPLAY_H_
21#define _INTEL_DISPLAY_H_
22
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
24void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
25 u16 *green, u16 *blue, uint32_t type, uint32_t size);
26void psb_intel_crtc_destroy(struct drm_crtc *crtc);
27
28#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644
index 00000000000..f40535e5668
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -0,0 +1,289 @@
1/*
2 * Copyright (c) 2009-2011, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18
19#ifndef __INTEL_DRV_H__
20#define __INTEL_DRV_H__
21
22#include <linux/i2c.h>
23#include <linux/i2c-algo-bit.h>
24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h>
26#include <linux/gpio.h>
27
28/*
29 * Display related stuff
30 */
31
32/* store information about an Ixxx DVO */
33/* The i830->i865 use multiple DVOs with multiple i2cs */
34/* the i915, i945 have a single sDVO i2c bus - which is different */
35#define MAX_OUTPUTS 6
36/* maximum connectors per crtcs in the mode set */
37#define INTELFB_CONN_LIMIT 4
38
39#define INTEL_I2C_BUS_DVO 1
40#define INTEL_I2C_BUS_SDVO 2
41
42/* Intel Pipe Clone Bit */
43#define INTEL_HDMIB_CLONE_BIT 1
44#define INTEL_HDMIC_CLONE_BIT 2
45#define INTEL_HDMID_CLONE_BIT 3
46#define INTEL_HDMIE_CLONE_BIT 4
47#define INTEL_HDMIF_CLONE_BIT 5
48#define INTEL_SDVO_NON_TV_CLONE_BIT 6
49#define INTEL_SDVO_TV_CLONE_BIT 7
50#define INTEL_SDVO_LVDS_CLONE_BIT 8
51#define INTEL_ANALOG_CLONE_BIT 9
52#define INTEL_TV_CLONE_BIT 10
53#define INTEL_DP_B_CLONE_BIT 11
54#define INTEL_DP_C_CLONE_BIT 12
55#define INTEL_DP_D_CLONE_BIT 13
56#define INTEL_LVDS_CLONE_BIT 14
57#define INTEL_DVO_TMDS_CLONE_BIT 15
58#define INTEL_DVO_LVDS_CLONE_BIT 16
59#define INTEL_EDP_CLONE_BIT 17
60
61/* these are outputs from the chip - integrated only
62 * external chips are via DVO or SDVO output */
63#define INTEL_OUTPUT_UNUSED 0
64#define INTEL_OUTPUT_ANALOG 1
65#define INTEL_OUTPUT_DVO 2
66#define INTEL_OUTPUT_SDVO 3
67#define INTEL_OUTPUT_LVDS 4
68#define INTEL_OUTPUT_TVOUT 5
69#define INTEL_OUTPUT_HDMI 6
70#define INTEL_OUTPUT_MIPI 7
71#define INTEL_OUTPUT_MIPI2 8
72
73#define INTEL_DVO_CHIP_NONE 0
74#define INTEL_DVO_CHIP_LVDS 1
75#define INTEL_DVO_CHIP_TMDS 2
76#define INTEL_DVO_CHIP_TVOUT 4
77
78#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
79#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
80
81static inline void
82psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
83 int multiplier)
84{
85 mode->clock *= multiplier;
86 mode->private_flags |= multiplier;
87}
88
89static inline int
90psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
91{
92 return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
93 >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
94}
95
96
97/*
98 * Hold information useally put on the device driver privates here,
99 * since it needs to be shared across multiple of devices drivers privates.
100 */
101struct psb_intel_mode_device {
102
103 /*
104 * Abstracted memory manager operations
105 */
106 size_t(*bo_offset) (struct drm_device *dev, void *bo);
107
108 /*
109 * Cursor (Can go ?)
110 */
111 int cursor_needs_physical;
112
113 /*
114 * LVDS info
115 */
116 int backlight_duty_cycle; /* restore backlight to this value */
117 bool panel_wants_dither;
118 struct drm_display_mode *panel_fixed_mode;
119 struct drm_display_mode *panel_fixed_mode2;
120 struct drm_display_mode *vbt_mode; /* if any */
121
122 uint32_t saveBLC_PWM_CTL;
123};
124
125struct psb_intel_i2c_chan {
126 /* for getting at dev. private (mmio etc.) */
127 struct drm_device *drm_dev;
128 u32 reg; /* GPIO reg */
129 struct i2c_adapter adapter;
130 struct i2c_algo_bit_data algo;
131 u8 slave_addr;
132};
133
134struct psb_intel_encoder {
135 struct drm_encoder base;
136 int type;
137 bool needs_tv_clock;
138 void (*hot_plug)(struct psb_intel_encoder *);
139 int crtc_mask;
140 int clone_mask;
141 void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
142
143 /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
144 own set of output privates */
145 struct psb_intel_i2c_chan *i2c_bus;
146 struct psb_intel_i2c_chan *ddc_bus;
147};
148
149struct psb_intel_connector {
150 struct drm_connector base;
151 struct psb_intel_encoder *encoder;
152};
153
154struct psb_intel_crtc_state {
155 uint32_t saveDSPCNTR;
156 uint32_t savePIPECONF;
157 uint32_t savePIPESRC;
158 uint32_t saveDPLL;
159 uint32_t saveFP0;
160 uint32_t saveFP1;
161 uint32_t saveHTOTAL;
162 uint32_t saveHBLANK;
163 uint32_t saveHSYNC;
164 uint32_t saveVTOTAL;
165 uint32_t saveVBLANK;
166 uint32_t saveVSYNC;
167 uint32_t saveDSPSTRIDE;
168 uint32_t saveDSPSIZE;
169 uint32_t saveDSPPOS;
170 uint32_t saveDSPBASE;
171 uint32_t savePalette[256];
172};
173
174struct psb_intel_crtc {
175 struct drm_crtc base;
176 int pipe;
177 int plane;
178 uint32_t cursor_addr;
179 u8 lut_r[256], lut_g[256], lut_b[256];
180 u8 lut_adj[256];
181 struct psb_intel_framebuffer *fbdev_fb;
182 /* a mode_set for fbdev users on this crtc */
183 struct drm_mode_set mode_set;
184
185 /* GEM object that holds our cursor */
186 struct drm_gem_object *cursor_obj;
187
188 struct drm_display_mode saved_mode;
189 struct drm_display_mode saved_adjusted_mode;
190
191 struct psb_intel_mode_device *mode_dev;
192
193 /*crtc mode setting flags*/
194 u32 mode_flags;
195
196 /* Saved Crtc HW states */
197 struct psb_intel_crtc_state *crtc_state;
198};
199
200#define to_psb_intel_crtc(x) \
201 container_of(x, struct psb_intel_crtc, base)
202#define to_psb_intel_connector(x) \
203 container_of(x, struct psb_intel_connector, base)
204#define to_psb_intel_encoder(x) \
205 container_of(x, struct psb_intel_encoder, base)
206#define to_psb_intel_framebuffer(x) \
207 container_of(x, struct psb_intel_framebuffer, base)
208
209struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
210 const u32 reg, const char *name);
211void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
212int psb_intel_ddc_get_modes(struct drm_connector *connector,
213 struct i2c_adapter *adapter);
214extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
215
216extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
217 struct psb_intel_mode_device *mode_dev);
218extern void psb_intel_crt_init(struct drm_device *dev);
219extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
220extern void psb_intel_dvo_init(struct drm_device *dev);
221extern void psb_intel_tv_init(struct drm_device *dev);
222extern void psb_intel_lvds_init(struct drm_device *dev,
223 struct psb_intel_mode_device *mode_dev);
224extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
225extern void oaktrail_lvds_init(struct drm_device *dev,
226 struct psb_intel_mode_device *mode_dev);
227extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
228extern void oaktrail_dsi_init(struct drm_device *dev,
229 struct psb_intel_mode_device *mode_dev);
230extern void mid_dsi_init(struct drm_device *dev,
231 struct psb_intel_mode_device *mode_dev, int dsi_num);
232
233extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
234extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
235extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
236extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
237
238static inline struct psb_intel_encoder *psb_intel_attached_encoder(
239 struct drm_connector *connector)
240{
241 return to_psb_intel_connector(connector)->encoder;
242}
243
244extern void psb_intel_connector_attach_encoder(
245 struct psb_intel_connector *connector,
246 struct psb_intel_encoder *encoder);
247
248extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
249 *connector);
250
251extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
252 struct drm_crtc *crtc);
253extern void psb_intel_wait_for_vblank(struct drm_device *dev);
254extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
255 struct drm_file *file_priv);
256extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
257 int pipe);
258extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
259 int sdvoB);
260extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
261extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
262 int enable);
263extern int intelfb_probe(struct drm_device *dev);
264extern int intelfb_remove(struct drm_device *dev,
265 struct drm_framebuffer *fb);
266extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
267 *dev, struct
268 drm_mode_fb_cmd
269 *mode_cmd,
270 void *mm_private);
271extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
272 struct drm_display_mode *mode,
273 struct drm_display_mode *adjusted_mode);
274extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
275 struct drm_display_mode *mode);
276extern int psb_intel_lvds_set_property(struct drm_connector *connector,
277 struct drm_property *property,
278 uint64_t value);
279extern void psb_intel_lvds_destroy(struct drm_connector *connector);
280extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
281
282/* intel_gmbus.c */
283extern void gma_intel_i2c_reset(struct drm_device *dev);
284extern int gma_intel_setup_gmbus(struct drm_device *dev);
285extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
286extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
287extern void gma_intel_teardown_gmbus(struct drm_device *dev);
288
289#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644
index 00000000000..a25e4ca5e91
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -0,0 +1,868 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Dave Airlie <airlied@linux.ie>
20 * Jesse Barnes <jesse.barnes@intel.com>
21 */
22
23#include <linux/i2c.h>
24#include <drm/drmP.h>
25
26#include "intel_bios.h"
27#include "psb_drv.h"
28#include "psb_intel_drv.h"
29#include "psb_intel_reg.h"
30#include "power.h"
31#include <linux/pm_runtime.h>
32
33/*
34 * LVDS I2C backlight control macros
35 */
36#define BRIGHTNESS_MAX_LEVEL 100
37#define BRIGHTNESS_MASK 0xFF
38#define BLC_I2C_TYPE 0x01
39#define BLC_PWM_TYPT 0x02
40
41#define BLC_POLARITY_NORMAL 0
42#define BLC_POLARITY_INVERSE 1
43
44#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
45#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
46#define PSB_BLC_PWM_PRECISION_FACTOR (10)
47#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
48#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
49
50struct psb_intel_lvds_priv {
51 /*
52 * Saved LVDO output states
53 */
54 uint32_t savePP_ON;
55 uint32_t savePP_OFF;
56 uint32_t saveLVDS;
57 uint32_t savePP_CONTROL;
58 uint32_t savePP_CYCLE;
59 uint32_t savePFIT_CONTROL;
60 uint32_t savePFIT_PGM_RATIOS;
61 uint32_t saveBLC_PWM_CTL;
62
63 struct psb_intel_i2c_chan *i2c_bus;
64 struct psb_intel_i2c_chan *ddc_bus;
65};
66
67
68/*
69 * Returns the maximum level of the backlight duty cycle field.
70 */
71static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
72{
73 struct drm_psb_private *dev_priv = dev->dev_private;
74 u32 ret;
75
76 if (gma_power_begin(dev, false)) {
77 ret = REG_READ(BLC_PWM_CTL);
78 gma_power_end(dev);
79 } else /* Powered off, use the saved value */
80 ret = dev_priv->saveBLC_PWM_CTL;
81
82 /* Top 15bits hold the frequency mask */
83 ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
84 BACKLIGHT_MODULATION_FREQ_SHIFT;
85
86 ret *= 2; /* Return a 16bit range as needed for setting */
87 if (ret == 0)
88 dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
89 REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
90 return ret;
91}
92
93/*
94 * Set LVDS backlight level by I2C command
95 *
96 * FIXME: at some point we need to both track this for PM and also
97 * disable runtime pm on MRST if the brightness is nil (ie blanked)
98 */
99static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
100 unsigned int level)
101{
102 struct drm_psb_private *dev_priv =
103 (struct drm_psb_private *)dev->dev_private;
104
105 struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
106 u8 out_buf[2];
107 unsigned int blc_i2c_brightness;
108
109 struct i2c_msg msgs[] = {
110 {
111 .addr = lvds_i2c_bus->slave_addr,
112 .flags = 0,
113 .len = 2,
114 .buf = out_buf,
115 }
116 };
117
118 blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
119 BRIGHTNESS_MASK /
120 BRIGHTNESS_MAX_LEVEL);
121
122 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
123 blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
124
125 out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
126 out_buf[1] = (u8)blc_i2c_brightness;
127
128 if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
129 dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
130 dev_priv->lvds_bl->brightnesscmd,
131 blc_i2c_brightness);
132 return 0;
133 }
134
135 dev_err(dev->dev, "I2C transfer error\n");
136 return -1;
137}
138
139
140static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
141{
142 struct drm_psb_private *dev_priv =
143 (struct drm_psb_private *)dev->dev_private;
144
145 u32 max_pwm_blc;
146 u32 blc_pwm_duty_cycle;
147
148 max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
149
150 /*BLC_PWM_CTL Should be initiated while backlight device init*/
151 BUG_ON(max_pwm_blc == 0);
152
153 blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
154
155 if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
156 blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
157
158 blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
159 REG_WRITE(BLC_PWM_CTL,
160 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
161 (blc_pwm_duty_cycle));
162
163 dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
164 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
165 (blc_pwm_duty_cycle));
166
167 return 0;
168}
169
170/*
171 * Set LVDS backlight level either by I2C or PWM
172 */
173void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
174{
175 struct drm_psb_private *dev_priv = dev->dev_private;
176
177 dev_dbg(dev->dev, "backlight level is %d\n", level);
178
179 if (!dev_priv->lvds_bl) {
180 dev_err(dev->dev, "NO LVDS backlight info\n");
181 return;
182 }
183
184 if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
185 psb_lvds_i2c_set_brightness(dev, level);
186 else
187 psb_lvds_pwm_set_brightness(dev, level);
188}
189
190/*
191 * Sets the backlight level.
192 *
193 * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
194 */
195static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
196{
197 struct drm_psb_private *dev_priv = dev->dev_private;
198 u32 blc_pwm_ctl;
199
200 if (gma_power_begin(dev, false)) {
201 blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
202 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
203 REG_WRITE(BLC_PWM_CTL,
204 (blc_pwm_ctl |
205 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
206 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
207 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
208 gma_power_end(dev);
209 } else {
210 blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
211 ~BACKLIGHT_DUTY_CYCLE_MASK;
212 dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
213 (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
214 }
215}
216
217/*
218 * Sets the power state for the panel.
219 */
220static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
221{
222 struct drm_psb_private *dev_priv = dev->dev_private;
223 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
224 u32 pp_status;
225
226 if (!gma_power_begin(dev, true)) {
227 dev_err(dev->dev, "set power, chip off!\n");
228 return;
229 }
230
231 if (on) {
232 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
233 POWER_TARGET_ON);
234 do {
235 pp_status = REG_READ(PP_STATUS);
236 } while ((pp_status & PP_ON) == 0);
237
238 psb_intel_lvds_set_backlight(dev,
239 mode_dev->backlight_duty_cycle);
240 } else {
241 psb_intel_lvds_set_backlight(dev, 0);
242
243 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
244 ~POWER_TARGET_ON);
245 do {
246 pp_status = REG_READ(PP_STATUS);
247 } while (pp_status & PP_ON);
248 }
249
250 gma_power_end(dev);
251}
252
253static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
254{
255 struct drm_device *dev = encoder->dev;
256
257 if (mode == DRM_MODE_DPMS_ON)
258 psb_intel_lvds_set_power(dev, true);
259 else
260 psb_intel_lvds_set_power(dev, false);
261
262 /* XXX: We never power down the LVDS pairs. */
263}
264
265static void psb_intel_lvds_save(struct drm_connector *connector)
266{
267 struct drm_device *dev = connector->dev;
268 struct drm_psb_private *dev_priv =
269 (struct drm_psb_private *)dev->dev_private;
270 struct psb_intel_encoder *psb_intel_encoder =
271 psb_intel_attached_encoder(connector);
272 struct psb_intel_lvds_priv *lvds_priv =
273 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
274
275 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
276 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
277 lvds_priv->saveLVDS = REG_READ(LVDS);
278 lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
279 lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
280 /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
281 lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
282 lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
283 lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
284
285 /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
286 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
287 BACKLIGHT_DUTY_CYCLE_MASK);
288
289 /*
290 * If the light is off at server startup,
291 * just make it full brightness
292 */
293 if (dev_priv->backlight_duty_cycle == 0)
294 dev_priv->backlight_duty_cycle =
295 psb_intel_lvds_get_max_backlight(dev);
296
297 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
298 lvds_priv->savePP_ON,
299 lvds_priv->savePP_OFF,
300 lvds_priv->saveLVDS,
301 lvds_priv->savePP_CONTROL,
302 lvds_priv->savePP_CYCLE,
303 lvds_priv->saveBLC_PWM_CTL);
304}
305
306static void psb_intel_lvds_restore(struct drm_connector *connector)
307{
308 struct drm_device *dev = connector->dev;
309 u32 pp_status;
310 struct psb_intel_encoder *psb_intel_encoder =
311 psb_intel_attached_encoder(connector);
312 struct psb_intel_lvds_priv *lvds_priv =
313 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
314
315 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
316 lvds_priv->savePP_ON,
317 lvds_priv->savePP_OFF,
318 lvds_priv->saveLVDS,
319 lvds_priv->savePP_CONTROL,
320 lvds_priv->savePP_CYCLE,
321 lvds_priv->saveBLC_PWM_CTL);
322
323 REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
324 REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
325 REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
326 REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
327 REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
328 /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
329 REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
330 REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
331 REG_WRITE(LVDS, lvds_priv->saveLVDS);
332
333 if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
334 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
335 POWER_TARGET_ON);
336 do {
337 pp_status = REG_READ(PP_STATUS);
338 } while ((pp_status & PP_ON) == 0);
339 } else {
340 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
341 ~POWER_TARGET_ON);
342 do {
343 pp_status = REG_READ(PP_STATUS);
344 } while (pp_status & PP_ON);
345 }
346}
347
348int psb_intel_lvds_mode_valid(struct drm_connector *connector,
349 struct drm_display_mode *mode)
350{
351 struct drm_psb_private *dev_priv = connector->dev->dev_private;
352 struct psb_intel_encoder *psb_intel_encoder =
353 psb_intel_attached_encoder(connector);
354 struct drm_display_mode *fixed_mode =
355 dev_priv->mode_dev.panel_fixed_mode;
356
357 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
358 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
359
360 /* just in case */
361 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
362 return MODE_NO_DBLESCAN;
363
364 /* just in case */
365 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
366 return MODE_NO_INTERLACE;
367
368 if (fixed_mode) {
369 if (mode->hdisplay > fixed_mode->hdisplay)
370 return MODE_PANEL;
371 if (mode->vdisplay > fixed_mode->vdisplay)
372 return MODE_PANEL;
373 }
374 return MODE_OK;
375}
376
377bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
378 struct drm_display_mode *mode,
379 struct drm_display_mode *adjusted_mode)
380{
381 struct drm_device *dev = encoder->dev;
382 struct drm_psb_private *dev_priv = dev->dev_private;
383 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
384 struct psb_intel_crtc *psb_intel_crtc =
385 to_psb_intel_crtc(encoder->crtc);
386 struct drm_encoder *tmp_encoder;
387 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
388 struct psb_intel_encoder *psb_intel_encoder =
389 to_psb_intel_encoder(encoder);
390
391 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
392 panel_fixed_mode = mode_dev->panel_fixed_mode2;
393
394 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
395 if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
396 printk(KERN_ERR "Can't support LVDS on pipe A\n");
397 return false;
398 }
399 if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
400 printk(KERN_ERR "Must use PIPE A\n");
401 return false;
402 }
403 /* Should never happen!! */
404 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
405 head) {
406 if (tmp_encoder != encoder
407 && tmp_encoder->crtc == encoder->crtc) {
408 printk(KERN_ERR "Can't enable LVDS and another "
409 "encoder on the same pipe\n");
410 return false;
411 }
412 }
413
414 /*
415 * If we have timings from the BIOS for the panel, put them in
416 * to the adjusted mode. The CRTC will be set up for this mode,
417 * with the panel scaling set up to source from the H/VDisplay
418 * of the original mode.
419 */
420 if (panel_fixed_mode != NULL) {
421 adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
422 adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
423 adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
424 adjusted_mode->htotal = panel_fixed_mode->htotal;
425 adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
426 adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
427 adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
428 adjusted_mode->vtotal = panel_fixed_mode->vtotal;
429 adjusted_mode->clock = panel_fixed_mode->clock;
430 drm_mode_set_crtcinfo(adjusted_mode,
431 CRTC_INTERLACE_HALVE_V);
432 }
433
434 /*
435 * XXX: It would be nice to support lower refresh rates on the
436 * panels to reduce power consumption, and perhaps match the
437 * user's requested refresh rate.
438 */
439
440 return true;
441}
442
443static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
444{
445 struct drm_device *dev = encoder->dev;
446 struct drm_psb_private *dev_priv = dev->dev_private;
447 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
448
449 if (!gma_power_begin(dev, true))
450 return;
451
452 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
453 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
454 BACKLIGHT_DUTY_CYCLE_MASK);
455
456 psb_intel_lvds_set_power(dev, false);
457
458 gma_power_end(dev);
459}
460
461static void psb_intel_lvds_commit(struct drm_encoder *encoder)
462{
463 struct drm_device *dev = encoder->dev;
464 struct drm_psb_private *dev_priv = dev->dev_private;
465 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
466
467 if (mode_dev->backlight_duty_cycle == 0)
468 mode_dev->backlight_duty_cycle =
469 psb_intel_lvds_get_max_backlight(dev);
470
471 psb_intel_lvds_set_power(dev, true);
472}
473
474static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
475 struct drm_display_mode *mode,
476 struct drm_display_mode *adjusted_mode)
477{
478 struct drm_device *dev = encoder->dev;
479 struct drm_psb_private *dev_priv = dev->dev_private;
480 u32 pfit_control;
481
482 /*
483 * The LVDS pin pair will already have been turned on in the
484 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
485 * settings.
486 */
487
488 /*
489 * Enable automatic panel scaling so that non-native modes fill the
490 * screen. Should be enabled before the pipe is enabled, according to
491 * register description and PRM.
492 */
493 if (mode->hdisplay != adjusted_mode->hdisplay ||
494 mode->vdisplay != adjusted_mode->vdisplay)
495 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
496 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
497 HORIZ_INTERP_BILINEAR);
498 else
499 pfit_control = 0;
500
501 if (dev_priv->lvds_dither)
502 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
503
504 REG_WRITE(PFIT_CONTROL, pfit_control);
505}
506
507/*
508 * Detect the LVDS connection.
509 *
510 * This always returns CONNECTOR_STATUS_CONNECTED.
511 * This connector should only have
512 * been set up if the LVDS was actually connected anyway.
513 */
514static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
515 *connector, bool force)
516{
517 return connector_status_connected;
518}
519
520/*
521 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
522 */
523static int psb_intel_lvds_get_modes(struct drm_connector *connector)
524{
525 struct drm_device *dev = connector->dev;
526 struct drm_psb_private *dev_priv = dev->dev_private;
527 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
528 struct psb_intel_encoder *psb_intel_encoder =
529 psb_intel_attached_encoder(connector);
530 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
531 int ret = 0;
532
533 if (!IS_MRST(dev))
534 ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
535
536 if (ret)
537 return ret;
538
539 /* Didn't get an EDID, so
540 * Set wide sync ranges so we get all modes
541 * handed to valid_mode for checking
542 */
543 connector->display_info.min_vfreq = 0;
544 connector->display_info.max_vfreq = 200;
545 connector->display_info.min_hfreq = 0;
546 connector->display_info.max_hfreq = 200;
547
548 if (mode_dev->panel_fixed_mode != NULL) {
549 struct drm_display_mode *mode =
550 drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
551 drm_mode_probed_add(connector, mode);
552 return 1;
553 }
554
555 return 0;
556}
557
558/**
559 * psb_intel_lvds_destroy - unregister and free LVDS structures
560 * @connector: connector to free
561 *
562 * Unregister the DDC bus for this connector then free the driver private
563 * structure.
564 */
565void psb_intel_lvds_destroy(struct drm_connector *connector)
566{
567 struct psb_intel_encoder *psb_intel_encoder =
568 psb_intel_attached_encoder(connector);
569 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
570
571 if (lvds_priv->ddc_bus)
572 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
573 drm_sysfs_connector_remove(connector);
574 drm_connector_cleanup(connector);
575 kfree(connector);
576}
577
578int psb_intel_lvds_set_property(struct drm_connector *connector,
579 struct drm_property *property,
580 uint64_t value)
581{
582 struct drm_encoder *encoder = connector->encoder;
583
584 if (!encoder)
585 return -1;
586
587 if (!strcmp(property->name, "scaling mode")) {
588 struct psb_intel_crtc *crtc =
589 to_psb_intel_crtc(encoder->crtc);
590 uint64_t curval;
591
592 if (!crtc)
593 goto set_prop_error;
594
595 switch (value) {
596 case DRM_MODE_SCALE_FULLSCREEN:
597 break;
598 case DRM_MODE_SCALE_NO_SCALE:
599 break;
600 case DRM_MODE_SCALE_ASPECT:
601 break;
602 default:
603 goto set_prop_error;
604 }
605
606 if (drm_connector_property_get_value(connector,
607 property,
608 &curval))
609 goto set_prop_error;
610
611 if (curval == value)
612 goto set_prop_done;
613
614 if (drm_connector_property_set_value(connector,
615 property,
616 value))
617 goto set_prop_error;
618
619 if (crtc->saved_mode.hdisplay != 0 &&
620 crtc->saved_mode.vdisplay != 0) {
621 if (!drm_crtc_helper_set_mode(encoder->crtc,
622 &crtc->saved_mode,
623 encoder->crtc->x,
624 encoder->crtc->y,
625 encoder->crtc->fb))
626 goto set_prop_error;
627 }
628 } else if (!strcmp(property->name, "backlight")) {
629 if (drm_connector_property_set_value(connector,
630 property,
631 value))
632 goto set_prop_error;
633 else {
634#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
635 struct drm_psb_private *devp =
636 encoder->dev->dev_private;
637 struct backlight_device *bd = devp->backlight_device;
638 if (bd) {
639 bd->props.brightness = value;
640 backlight_update_status(bd);
641 }
642#endif
643 }
644 } else if (!strcmp(property->name, "DPMS")) {
645 struct drm_encoder_helper_funcs *hfuncs
646 = encoder->helper_private;
647 hfuncs->dpms(encoder, value);
648 }
649
650set_prop_done:
651 return 0;
652set_prop_error:
653 return -1;
654}
655
656static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
657 .dpms = psb_intel_lvds_encoder_dpms,
658 .mode_fixup = psb_intel_lvds_mode_fixup,
659 .prepare = psb_intel_lvds_prepare,
660 .mode_set = psb_intel_lvds_mode_set,
661 .commit = psb_intel_lvds_commit,
662};
663
664const struct drm_connector_helper_funcs
665 psb_intel_lvds_connector_helper_funcs = {
666 .get_modes = psb_intel_lvds_get_modes,
667 .mode_valid = psb_intel_lvds_mode_valid,
668 .best_encoder = psb_intel_best_encoder,
669};
670
671const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
672 .dpms = drm_helper_connector_dpms,
673 .save = psb_intel_lvds_save,
674 .restore = psb_intel_lvds_restore,
675 .detect = psb_intel_lvds_detect,
676 .fill_modes = drm_helper_probe_single_connector_modes,
677 .set_property = psb_intel_lvds_set_property,
678 .destroy = psb_intel_lvds_destroy,
679};
680
681
682static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
683{
684 drm_encoder_cleanup(encoder);
685}
686
687const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
688 .destroy = psb_intel_lvds_enc_destroy,
689};
690
691
692
693/**
694 * psb_intel_lvds_init - setup LVDS connectors on this device
695 * @dev: drm device
696 *
697 * Create the connector, register the LVDS DDC bus, and try to figure out what
698 * modes we can display on the LVDS panel (if present).
699 */
700void psb_intel_lvds_init(struct drm_device *dev,
701 struct psb_intel_mode_device *mode_dev)
702{
703 struct psb_intel_encoder *psb_intel_encoder;
704 struct psb_intel_connector *psb_intel_connector;
705 struct psb_intel_lvds_priv *lvds_priv;
706 struct drm_connector *connector;
707 struct drm_encoder *encoder;
708 struct drm_display_mode *scan; /* *modes, *bios_mode; */
709 struct drm_crtc *crtc;
710 struct drm_psb_private *dev_priv = dev->dev_private;
711 u32 lvds;
712 int pipe;
713
714 psb_intel_encoder =
715 kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
716
717 if (!psb_intel_encoder) {
718 dev_err(dev->dev, "psb_intel_encoder allocation error\n");
719 return;
720 }
721
722 psb_intel_connector =
723 kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
724
725 if (!psb_intel_connector) {
726 kfree(psb_intel_encoder);
727 dev_err(dev->dev, "psb_intel_connector allocation error\n");
728 }
729
730 lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
731 if (!lvds_priv) {
732 dev_err(dev->dev, "LVDS private allocation error\n");
733 goto failed_connector;
734 }
735
736 psb_intel_encoder->dev_priv = lvds_priv;
737
738 connector = &psb_intel_connector->base;
739 encoder = &psb_intel_encoder->base;
740 drm_connector_init(dev, connector,
741 &psb_intel_lvds_connector_funcs,
742 DRM_MODE_CONNECTOR_LVDS);
743
744 drm_encoder_init(dev, encoder,
745 &psb_intel_lvds_enc_funcs,
746 DRM_MODE_ENCODER_LVDS);
747
748 psb_intel_connector_attach_encoder(psb_intel_connector,
749 psb_intel_encoder);
750 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
751
752 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
753 drm_connector_helper_add(connector,
754 &psb_intel_lvds_connector_helper_funcs);
755 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
756 connector->interlace_allowed = false;
757 connector->doublescan_allowed = false;
758
759 /*Attach connector properties*/
760 drm_connector_attach_property(connector,
761 dev->mode_config.scaling_mode_property,
762 DRM_MODE_SCALE_FULLSCREEN);
763 drm_connector_attach_property(connector,
764 dev_priv->backlight_property,
765 BRIGHTNESS_MAX_LEVEL);
766
767 /*
768 * Set up I2C bus
769 * FIXME: distroy i2c_bus when exit
770 */
771 lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
772 if (!lvds_priv->i2c_bus) {
773 dev_printk(KERN_ERR,
774 &dev->pdev->dev, "I2C bus registration failed.\n");
775 goto failed_blc_i2c;
776 }
777 lvds_priv->i2c_bus->slave_addr = 0x2C;
778 dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
779
780 /*
781 * LVDS discovery:
782 * 1) check for EDID on DDC
783 * 2) check for VBT data
784 * 3) check to see if LVDS is already on
785 * if none of the above, no panel
786 * 4) make sure lid is open
787 * if closed, act like it's not there for now
788 */
789
790 /* Set up the DDC bus. */
791 lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
792 if (!lvds_priv->ddc_bus) {
793 dev_printk(KERN_ERR, &dev->pdev->dev,
794 "DDC bus registration " "failed.\n");
795 goto failed_ddc;
796 }
797
798 /*
799 * Attempt to get the fixed panel mode from DDC. Assume that the
800 * preferred mode is the right one.
801 */
802 psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
803 list_for_each_entry(scan, &connector->probed_modes, head) {
804 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
805 mode_dev->panel_fixed_mode =
806 drm_mode_duplicate(dev, scan);
807 goto out; /* FIXME: check for quirks */
808 }
809 }
810
811 /* Failed to get EDID, what about VBT? do we need this? */
812 if (mode_dev->vbt_mode)
813 mode_dev->panel_fixed_mode =
814 drm_mode_duplicate(dev, mode_dev->vbt_mode);
815
816 if (!mode_dev->panel_fixed_mode)
817 if (dev_priv->lfp_lvds_vbt_mode)
818 mode_dev->panel_fixed_mode =
819 drm_mode_duplicate(dev,
820 dev_priv->lfp_lvds_vbt_mode);
821
822 /*
823 * If we didn't get EDID, try checking if the panel is already turned
824 * on. If so, assume that whatever is currently programmed is the
825 * correct mode.
826 */
827 lvds = REG_READ(LVDS);
828 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
829 crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
830
831 if (crtc && (lvds & LVDS_PORT_EN)) {
832 mode_dev->panel_fixed_mode =
833 psb_intel_crtc_mode_get(dev, crtc);
834 if (mode_dev->panel_fixed_mode) {
835 mode_dev->panel_fixed_mode->type |=
836 DRM_MODE_TYPE_PREFERRED;
837 goto out; /* FIXME: check for quirks */
838 }
839 }
840
841 /* If we still don't have a mode after all that, give up. */
842 if (!mode_dev->panel_fixed_mode) {
843 dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
844 goto failed_find;
845 }
846
847 /*
848 * Blacklist machines with BIOSes that list an LVDS panel without
849 * actually having one.
850 */
851out:
852 drm_sysfs_connector_add(connector);
853 return;
854
855failed_find:
856 if (lvds_priv->ddc_bus)
857 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
858failed_ddc:
859 if (lvds_priv->i2c_bus)
860 psb_intel_i2c_destroy(lvds_priv->i2c_bus);
861failed_blc_i2c:
862 drm_encoder_cleanup(encoder);
863 drm_connector_cleanup(connector);
864failed_connector:
865 if (psb_intel_connector)
866 kfree(psb_intel_connector);
867}
868
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644
index 00000000000..4fca0d6feeb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2007 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authers: Jesse Barnes <jesse.barnes@intel.com>
18 */
19
20#include <linux/i2c.h>
21#include <linux/fb.h>
22#include <drm/drmP.h>
23#include "psb_intel_drv.h"
24
25/**
26 * psb_intel_ddc_probe
27 *
28 */
29bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
30{
31 u8 out_buf[] = { 0x0, 0x0 };
32 u8 buf[2];
33 int ret;
34 struct i2c_msg msgs[] = {
35 {
36 .addr = 0x50,
37 .flags = 0,
38 .len = 1,
39 .buf = out_buf,
40 },
41 {
42 .addr = 0x50,
43 .flags = I2C_M_RD,
44 .len = 1,
45 .buf = buf,
46 }
47 };
48
49 ret = i2c_transfer(adapter, msgs, 2);
50 if (ret == 2)
51 return true;
52
53 return false;
54}
55
56/**
57 * psb_intel_ddc_get_modes - get modelist from monitor
58 * @connector: DRM connector device to use
59 *
60 * Fetch the EDID information from @connector using the DDC bus.
61 */
62int psb_intel_ddc_get_modes(struct drm_connector *connector,
63 struct i2c_adapter *adapter)
64{
65 struct edid *edid;
66 int ret = 0;
67
68 edid = drm_get_edid(connector, adapter);
69 if (edid) {
70 drm_mode_connector_update_edid_property(connector, edid);
71 ret = drm_add_edid_modes(connector, edid);
72 kfree(edid);
73 }
74 return ret;
75}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644
index 00000000000..fcc0af03d68
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -0,0 +1,1309 @@
1/*
2 * Copyright (c) 2009, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __PSB_INTEL_REG_H__
18#define __PSB_INTEL_REG_H__
19
20/*
21 * GPIO regs
22 */
23#define GPIOA 0x5010
24#define GPIOB 0x5014
25#define GPIOC 0x5018
26#define GPIOD 0x501c
27#define GPIOE 0x5020
28#define GPIOF 0x5024
29#define GPIOG 0x5028
30#define GPIOH 0x502c
31# define GPIO_CLOCK_DIR_MASK (1 << 0)
32# define GPIO_CLOCK_DIR_IN (0 << 1)
33# define GPIO_CLOCK_DIR_OUT (1 << 1)
34# define GPIO_CLOCK_VAL_MASK (1 << 2)
35# define GPIO_CLOCK_VAL_OUT (1 << 3)
36# define GPIO_CLOCK_VAL_IN (1 << 4)
37# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
38# define GPIO_DATA_DIR_MASK (1 << 8)
39# define GPIO_DATA_DIR_IN (0 << 9)
40# define GPIO_DATA_DIR_OUT (1 << 9)
41# define GPIO_DATA_VAL_MASK (1 << 10)
42# define GPIO_DATA_VAL_OUT (1 << 11)
43# define GPIO_DATA_VAL_IN (1 << 12)
44# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
45
46#define GMBUS0 0x5100 /* clock/port select */
47#define GMBUS_RATE_100KHZ (0<<8)
48#define GMBUS_RATE_50KHZ (1<<8)
49#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
50#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
51#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
52#define GMBUS_PORT_DISABLED 0
53#define GMBUS_PORT_SSC 1
54#define GMBUS_PORT_VGADDC 2
55#define GMBUS_PORT_PANEL 3
56#define GMBUS_PORT_DPC 4 /* HDMIC */
57#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
58 /* 6 reserved */
59#define GMBUS_PORT_DPD 7 /* HDMID */
60#define GMBUS_NUM_PORTS 8
61#define GMBUS1 0x5104 /* command/status */
62#define GMBUS_SW_CLR_INT (1<<31)
63#define GMBUS_SW_RDY (1<<30)
64#define GMBUS_ENT (1<<29) /* enable timeout */
65#define GMBUS_CYCLE_NONE (0<<25)
66#define GMBUS_CYCLE_WAIT (1<<25)
67#define GMBUS_CYCLE_INDEX (2<<25)
68#define GMBUS_CYCLE_STOP (4<<25)
69#define GMBUS_BYTE_COUNT_SHIFT 16
70#define GMBUS_SLAVE_INDEX_SHIFT 8
71#define GMBUS_SLAVE_ADDR_SHIFT 1
72#define GMBUS_SLAVE_READ (1<<0)
73#define GMBUS_SLAVE_WRITE (0<<0)
74#define GMBUS2 0x5108 /* status */
75#define GMBUS_INUSE (1<<15)
76#define GMBUS_HW_WAIT_PHASE (1<<14)
77#define GMBUS_STALL_TIMEOUT (1<<13)
78#define GMBUS_INT (1<<12)
79#define GMBUS_HW_RDY (1<<11)
80#define GMBUS_SATOER (1<<10)
81#define GMBUS_ACTIVE (1<<9)
82#define GMBUS3 0x510c /* data buffer bytes 3-0 */
83#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
84#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
85#define GMBUS_NAK_EN (1<<3)
86#define GMBUS_IDLE_EN (1<<2)
87#define GMBUS_HW_WAIT_EN (1<<1)
88#define GMBUS_HW_RDY_EN (1<<0)
89#define GMBUS5 0x5120 /* byte index */
90#define GMBUS_2BYTE_INDEX_EN (1<<31)
91
92#define BLC_PWM_CTL 0x61254
93#define BLC_PWM_CTL2 0x61250
94#define BLC_PWM_CTL_C 0x62254
95#define BLC_PWM_CTL2_C 0x62250
96#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
97/*
98 * This is the most significant 15 bits of the number of backlight cycles in a
99 * complete cycle of the modulated backlight control.
100 *
101 * The actual value is this field multiplied by two.
102 */
103#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
104#define BLM_LEGACY_MODE (1 << 16)
105/*
106 * This is the number of cycles out of the backlight modulation cycle for which
107 * the backlight is on.
108 *
109 * This field must be no greater than the number of cycles in the complete
110 * backlight modulation cycle.
111 */
112#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
113#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
114
115#define I915_GCFGC 0xf0
116#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
117#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
118#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
119#define I915_DISPLAY_CLOCK_MASK (7 << 4)
120
121#define I855_HPLLCC 0xc0
122#define I855_CLOCK_CONTROL_MASK (3 << 0)
123#define I855_CLOCK_133_200 (0 << 0)
124#define I855_CLOCK_100_200 (1 << 0)
125#define I855_CLOCK_100_133 (2 << 0)
126#define I855_CLOCK_166_250 (3 << 0)
127
128/* I830 CRTC registers */
129#define HTOTAL_A 0x60000
130#define HBLANK_A 0x60004
131#define HSYNC_A 0x60008
132#define VTOTAL_A 0x6000c
133#define VBLANK_A 0x60010
134#define VSYNC_A 0x60014
135#define PIPEASRC 0x6001c
136#define BCLRPAT_A 0x60020
137#define VSYNCSHIFT_A 0x60028
138
139#define HTOTAL_B 0x61000
140#define HBLANK_B 0x61004
141#define HSYNC_B 0x61008
142#define VTOTAL_B 0x6100c
143#define VBLANK_B 0x61010
144#define VSYNC_B 0x61014
145#define PIPEBSRC 0x6101c
146#define BCLRPAT_B 0x61020
147#define VSYNCSHIFT_B 0x61028
148
149#define HTOTAL_C 0x62000
150#define HBLANK_C 0x62004
151#define HSYNC_C 0x62008
152#define VTOTAL_C 0x6200c
153#define VBLANK_C 0x62010
154#define VSYNC_C 0x62014
155#define PIPECSRC 0x6201c
156#define BCLRPAT_C 0x62020
157#define VSYNCSHIFT_C 0x62028
158
159#define PP_STATUS 0x61200
160# define PP_ON (1 << 31)
161/*
162 * Indicates that all dependencies of the panel are on:
163 *
164 * - PLL enabled
165 * - pipe enabled
166 * - LVDS/DVOB/DVOC on
167 */
168#define PP_READY (1 << 30)
169#define PP_SEQUENCE_NONE (0 << 28)
170#define PP_SEQUENCE_ON (1 << 28)
171#define PP_SEQUENCE_OFF (2 << 28)
172#define PP_SEQUENCE_MASK 0x30000000
173#define PP_CONTROL 0x61204
174#define POWER_TARGET_ON (1 << 0)
175
176#define LVDSPP_ON 0x61208
177#define LVDSPP_OFF 0x6120c
178#define PP_CYCLE 0x61210
179
180#define PFIT_CONTROL 0x61230
181#define PFIT_ENABLE (1 << 31)
182#define PFIT_PIPE_MASK (3 << 29)
183#define PFIT_PIPE_SHIFT 29
184#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
185#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
186#define VERT_INTERP_DISABLE (0 << 10)
187#define VERT_INTERP_BILINEAR (1 << 10)
188#define VERT_INTERP_MASK (3 << 10)
189#define VERT_AUTO_SCALE (1 << 9)
190#define HORIZ_INTERP_DISABLE (0 << 6)
191#define HORIZ_INTERP_BILINEAR (1 << 6)
192#define HORIZ_INTERP_MASK (3 << 6)
193#define HORIZ_AUTO_SCALE (1 << 5)
194#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
195
196#define PFIT_PGM_RATIOS 0x61234
197#define PFIT_VERT_SCALE_MASK 0xfff00000
198#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
199
200#define PFIT_AUTO_RATIOS 0x61238
201
202#define DPLL_A 0x06014
203#define DPLL_B 0x06018
204#define DPLL_VCO_ENABLE (1 << 31)
205#define DPLL_DVO_HIGH_SPEED (1 << 30)
206#define DPLL_SYNCLOCK_ENABLE (1 << 29)
207#define DPLL_VGA_MODE_DIS (1 << 28)
208#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
209#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
210#define DPLL_MODE_MASK (3 << 26)
211#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
212#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
213#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
214#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
215#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
216#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
217#define DPLL_LOCK (1 << 15) /* CDV */
218
219/*
220 * The i830 generation, in DAC/serial mode, defines p1 as two plus this
221 * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
222 */
223# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
224/*
225 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
226 * this field (only one bit may be set).
227 */
228#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
229#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
230#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
231 * in DVO non-gang */
232# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
233#define PLL_REF_INPUT_DREFCLK (0 << 13)
234#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
235#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
236 * TVCLKIN */
237#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
238#define PLL_REF_INPUT_MASK (3 << 13)
239#define PLL_LOAD_PULSE_PHASE_SHIFT 9
240/*
241 * Parallel to Serial Load Pulse phase selection.
242 * Selects the phase for the 10X DPLL clock for the PCIe
243 * digital display port. The range is 4 to 13; 10 or more
244 * is just a flip delay. The default is 6
245 */
246#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
247#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
248
249/*
250 * SDVO multiplier for 945G/GM. Not used on 965.
251 *
252 * DPLL_MD_UDI_MULTIPLIER_MASK
253 */
254#define SDVO_MULTIPLIER_MASK 0x000000ff
255#define SDVO_MULTIPLIER_SHIFT_HIRES 4
256#define SDVO_MULTIPLIER_SHIFT_VGA 0
257
258/*
259 * PLL_MD
260 */
261/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
262#define DPLL_A_MD 0x0601c
263/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
264#define DPLL_B_MD 0x06020
265/*
266 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
267 *
268 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
269 */
270#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
271#define DPLL_MD_UDI_DIVIDER_SHIFT 24
272/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
273#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
274#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
275/*
276 * SDVO/UDI pixel multiplier.
277 *
278 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
279 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
280 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
281 * dummy bytes in the datastream at an increased clock rate, with both sides of
282 * the link knowing how many bytes are fill.
283 *
284 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
285 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
286 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
287 * through an SDVO command.
288 *
289 * This register field has values of multiplication factor minus 1, with
290 * a maximum multiplier of 5 for SDVO.
291 */
292#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
293#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
294/*
295 * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
296 * This best be set to the default value (3) or the CRT won't work. No,
297 * I don't entirely understand what this does...
298 */
299#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
300#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
301
302#define DPLL_TEST 0x606c
303#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
304#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
305#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
306#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
307#define DPLLB_TEST_N_BYPASS (1 << 19)
308#define DPLLB_TEST_M_BYPASS (1 << 18)
309#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
310#define DPLLA_TEST_N_BYPASS (1 << 3)
311#define DPLLA_TEST_M_BYPASS (1 << 2)
312#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
313
314#define ADPA 0x61100
315#define ADPA_DAC_ENABLE (1 << 31)
316#define ADPA_DAC_DISABLE 0
317#define ADPA_PIPE_SELECT_MASK (1 << 30)
318#define ADPA_PIPE_A_SELECT 0
319#define ADPA_PIPE_B_SELECT (1 << 30)
320#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
321#define ADPA_SETS_HVPOLARITY 0
322#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
323#define ADPA_VSYNC_CNTL_ENABLE 0
324#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
325#define ADPA_HSYNC_CNTL_ENABLE 0
326#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
327#define ADPA_VSYNC_ACTIVE_LOW 0
328#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
329#define ADPA_HSYNC_ACTIVE_LOW 0
330
331#define FPA0 0x06040
332#define FPA1 0x06044
333#define FPB0 0x06048
334#define FPB1 0x0604c
335#define FP_N_DIV_MASK 0x003f0000
336#define FP_N_DIV_SHIFT 16
337#define FP_M1_DIV_MASK 0x00003f00
338#define FP_M1_DIV_SHIFT 8
339#define FP_M2_DIV_MASK 0x0000003f
340#define FP_M2_DIV_SHIFT 0
341
342#define PORT_HOTPLUG_EN 0x61110
343#define SDVOB_HOTPLUG_INT_EN (1 << 26)
344#define SDVOC_HOTPLUG_INT_EN (1 << 25)
345#define TV_HOTPLUG_INT_EN (1 << 18)
346#define CRT_HOTPLUG_INT_EN (1 << 9)
347#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
348/* CDV.. */
349#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
350#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
351#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
352#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
353#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
354#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
355#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
356#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
357#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
358#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
359#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
360#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
361#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
362
363#define PORT_HOTPLUG_STAT 0x61114
364#define CRT_HOTPLUG_INT_STATUS (1 << 11)
365#define TV_HOTPLUG_INT_STATUS (1 << 10)
366#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
367#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
368#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
369#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
370#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
371#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
372
373#define SDVOB 0x61140
374#define SDVOC 0x61160
375#define SDVO_ENABLE (1 << 31)
376#define SDVO_PIPE_B_SELECT (1 << 30)
377#define SDVO_STALL_SELECT (1 << 29)
378#define SDVO_INTERRUPT_ENABLE (1 << 26)
379#define SDVO_COLOR_RANGE_16_235 (1 << 8)
380#define SDVO_AUDIO_ENABLE (1 << 6)
381
382/**
383 * 915G/GM SDVO pixel multiplier.
384 *
385 * Programmed value is multiplier - 1, up to 5x.
386 *
387 * DPLL_MD_UDI_MULTIPLIER_MASK
388 */
389#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
390#define SDVO_PORT_MULTIPLY_SHIFT 23
391#define SDVO_PHASE_SELECT_MASK (15 << 19)
392#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
393#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
394#define SDVOC_GANG_MODE (1 << 16)
395#define SDVO_BORDER_ENABLE (1 << 7)
396#define SDVOB_PCIE_CONCURRENCY (1 << 3)
397#define SDVO_DETECTED (1 << 2)
398/* Bits to be preserved when writing */
399#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
400#define SDVOC_PRESERVE_MASK (1 << 17)
401
402/*
403 * This register controls the LVDS output enable, pipe selection, and data
404 * format selection.
405 *
406 * All of the clock/data pairs are force powered down by power sequencing.
407 */
408#define LVDS 0x61180
409/*
410 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
411 * the DPLL semantics change when the LVDS is assigned to that pipe.
412 */
413#define LVDS_PORT_EN (1 << 31)
414/* Selects pipe B for LVDS data. Must be set on pre-965. */
415#define LVDS_PIPEB_SELECT (1 << 30)
416
417/* Turns on border drawing to allow centered display. */
418#define LVDS_BORDER_EN (1 << 15)
419
420/*
421 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
422 * pixel.
423 */
424#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
425#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
426#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
427/*
428 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
429 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
430 * on.
431 */
432#define LVDS_A3_POWER_MASK (3 << 6)
433#define LVDS_A3_POWER_DOWN (0 << 6)
434#define LVDS_A3_POWER_UP (3 << 6)
435/*
436 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
437 * is set.
438 */
439#define LVDS_CLKB_POWER_MASK (3 << 4)
440#define LVDS_CLKB_POWER_DOWN (0 << 4)
441#define LVDS_CLKB_POWER_UP (3 << 4)
442/*
443 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
444 * setting for whether we are in dual-channel mode. The B3 pair will
445 * additionally only be powered up when LVDS_A3_POWER_UP is set.
446 */
447#define LVDS_B0B3_POWER_MASK (3 << 2)
448#define LVDS_B0B3_POWER_DOWN (0 << 2)
449#define LVDS_B0B3_POWER_UP (3 << 2)
450
451#define PIPEACONF 0x70008
452#define PIPEACONF_ENABLE (1 << 31)
453#define PIPEACONF_DISABLE 0
454#define PIPEACONF_DOUBLE_WIDE (1 << 30)
455#define PIPECONF_ACTIVE (1 << 30)
456#define I965_PIPECONF_ACTIVE (1 << 30)
457#define PIPECONF_DSIPLL_LOCK (1 << 29)
458#define PIPEACONF_SINGLE_WIDE 0
459#define PIPEACONF_PIPE_UNLOCKED 0
460#define PIPEACONF_DSR (1 << 26)
461#define PIPEACONF_PIPE_LOCKED (1 << 25)
462#define PIPEACONF_PALETTE 0
463#define PIPECONF_FORCE_BORDER (1 << 25)
464#define PIPEACONF_GAMMA (1 << 24)
465#define PIPECONF_PROGRESSIVE (0 << 21)
466#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
467#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
468#define PIPECONF_PLANE_OFF (1 << 19)
469#define PIPECONF_CURSOR_OFF (1 << 18)
470
471#define PIPEBCONF 0x71008
472#define PIPEBCONF_ENABLE (1 << 31)
473#define PIPEBCONF_DISABLE 0
474#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
475#define PIPEBCONF_DISABLE 0
476#define PIPEBCONF_GAMMA (1 << 24)
477#define PIPEBCONF_PALETTE 0
478
479#define PIPECCONF 0x72008
480
481#define PIPEBGCMAXRED 0x71010
482#define PIPEBGCMAXGREEN 0x71014
483#define PIPEBGCMAXBLUE 0x71018
484
485#define PIPEASTAT 0x70024
486#define PIPEBSTAT 0x71024
487#define PIPECSTAT 0x72024
488#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
489#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
490#define PIPE_VBLANK_CLEAR (1 << 1)
491#define PIPE_VBLANK_STATUS (1 << 1)
492#define PIPE_TE_STATUS (1UL << 6)
493#define PIPE_DPST_EVENT_STATUS (1UL << 7)
494#define PIPE_VSYNC_CLEAR (1UL << 9)
495#define PIPE_VSYNC_STATUS (1UL << 9)
496#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
497#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
498#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
499#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
500#define PIPE_TE_ENABLE (1UL << 22)
501#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
502#define PIPE_VSYNC_ENABL (1UL << 25)
503#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
504#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
505#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
506 PIPE_HDMI_AUDIO_BUFFER_DONE)
507#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
508#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
509#define HISTOGRAM_INT_CONTROL 0x61268
510#define HISTOGRAM_BIN_DATA 0X61264
511#define HISTOGRAM_LOGIC_CONTROL 0x61260
512#define PWM_CONTROL_LOGIC 0x61250
513#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
514#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
515#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
516#define PWM_LOGIC_ENABLE (1UL << 31)
517#define PWM_PHASEIN_ENABLE (1UL << 25)
518#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
519#define PWM_PHASEIN_VB_COUNT 0x00001f00
520#define PWM_PHASEIN_INC 0x0000001f
521#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
522#define DPST_YUV_LUMA_MODE 0
523
524struct dpst_ie_histogram_control {
525 union {
526 uint32_t data;
527 struct {
528 uint32_t bin_reg_index:7;
529 uint32_t reserved:4;
530 uint32_t bin_reg_func_select:1;
531 uint32_t sync_to_phase_in:1;
532 uint32_t alt_enhancement_mode:2;
533 uint32_t reserved1:1;
534 uint32_t sync_to_phase_in_count:8;
535 uint32_t histogram_mode_select:1;
536 uint32_t reserved2:4;
537 uint32_t ie_pipe_assignment:1;
538 uint32_t ie_mode_table_enabled:1;
539 uint32_t ie_histogram_enable:1;
540 };
541 };
542};
543
544struct dpst_guardband {
545 union {
546 uint32_t data;
547 struct {
548 uint32_t guardband:22;
549 uint32_t guardband_interrupt_delay:8;
550 uint32_t interrupt_status:1;
551 uint32_t interrupt_enable:1;
552 };
553 };
554};
555
556#define PIPEAFRAMEHIGH 0x70040
557#define PIPEAFRAMEPIXEL 0x70044
558#define PIPEBFRAMEHIGH 0x71040
559#define PIPEBFRAMEPIXEL 0x71044
560#define PIPECFRAMEHIGH 0x72040
561#define PIPECFRAMEPIXEL 0x72044
562#define PIPE_FRAME_HIGH_MASK 0x0000ffff
563#define PIPE_FRAME_HIGH_SHIFT 0
564#define PIPE_FRAME_LOW_MASK 0xff000000
565#define PIPE_FRAME_LOW_SHIFT 24
566#define PIPE_PIXEL_MASK 0x00ffffff
567#define PIPE_PIXEL_SHIFT 0
568
569#define DSPARB 0x70030
570#define DSPFW1 0x70034
571#define DSPFW2 0x70038
572#define DSPFW3 0x7003c
573#define DSPFW4 0x70050
574#define DSPFW5 0x70054
575#define DSPFW6 0x70058
576#define DSPCHICKENBIT 0x70400
577#define DSPACNTR 0x70180
578#define DSPBCNTR 0x71180
579#define DSPCCNTR 0x72180
580#define DISPLAY_PLANE_ENABLE (1 << 31)
581#define DISPLAY_PLANE_DISABLE 0
582#define DISPPLANE_GAMMA_ENABLE (1 << 30)
583#define DISPPLANE_GAMMA_DISABLE 0
584#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
585#define DISPPLANE_8BPP (0x2 << 26)
586#define DISPPLANE_15_16BPP (0x4 << 26)
587#define DISPPLANE_16BPP (0x5 << 26)
588#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
589#define DISPPLANE_32BPP (0x7 << 26)
590#define DISPPLANE_STEREO_ENABLE (1 << 25)
591#define DISPPLANE_STEREO_DISABLE 0
592#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
593#define DISPPLANE_SEL_PIPE_POS 24
594#define DISPPLANE_SEL_PIPE_A 0
595#define DISPPLANE_SEL_PIPE_B (1 << 24)
596#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
597#define DISPPLANE_SRC_KEY_DISABLE 0
598#define DISPPLANE_LINE_DOUBLE (1 << 20)
599#define DISPPLANE_NO_LINE_DOUBLE 0
600#define DISPPLANE_STEREO_POLARITY_FIRST 0
601#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
602/* plane B only */
603#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
604#define DISPPLANE_ALPHA_TRANS_DISABLE 0
605#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
606#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
607#define DISPPLANE_BOTTOM (4)
608
609#define DSPABASE 0x70184
610#define DSPALINOFF 0x70184
611#define DSPASTRIDE 0x70188
612
613#define DSPBBASE 0x71184
614#define DSPBLINOFF 0X71184
615#define DSPBADDR DSPBBASE
616#define DSPBSTRIDE 0x71188
617
618#define DSPCBASE 0x72184
619#define DSPCLINOFF 0x72184
620#define DSPCSTRIDE 0x72188
621
622#define DSPAKEYVAL 0x70194
623#define DSPAKEYMASK 0x70198
624
625#define DSPAPOS 0x7018C /* reserved */
626#define DSPASIZE 0x70190
627#define DSPBPOS 0x7118C
628#define DSPBSIZE 0x71190
629#define DSPCPOS 0x7218C
630#define DSPCSIZE 0x72190
631
632#define DSPASURF 0x7019C
633#define DSPATILEOFF 0x701A4
634
635#define DSPBSURF 0x7119C
636#define DSPBTILEOFF 0x711A4
637
638#define DSPCSURF 0x7219C
639#define DSPCTILEOFF 0x721A4
640#define DSPCKEYMAXVAL 0x721A0
641#define DSPCKEYMINVAL 0x72194
642#define DSPCKEYMSK 0x72198
643
644#define VGACNTRL 0x71400
645#define VGA_DISP_DISABLE (1 << 31)
646#define VGA_2X_MODE (1 << 30)
647#define VGA_PIPE_B_SELECT (1 << 29)
648
649/*
650 * Overlay registers
651 */
652#define OV_C_OFFSET 0x08000
653#define OV_OVADD 0x30000
654#define OV_DOVASTA 0x30008
655# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
656# define OV_PIPE_SELECT_POS 6
657# define OV_PIPE_A 0
658# define OV_PIPE_C 1
659#define OV_OGAMC5 0x30010
660#define OV_OGAMC4 0x30014
661#define OV_OGAMC3 0x30018
662#define OV_OGAMC2 0x3001C
663#define OV_OGAMC1 0x30020
664#define OV_OGAMC0 0x30024
665#define OVC_OVADD 0x38000
666#define OVC_DOVCSTA 0x38008
667#define OVC_OGAMC5 0x38010
668#define OVC_OGAMC4 0x38014
669#define OVC_OGAMC3 0x38018
670#define OVC_OGAMC2 0x3801C
671#define OVC_OGAMC1 0x38020
672#define OVC_OGAMC0 0x38024
673
674/*
675 * Some BIOS scratch area registers. The 845 (and 830?) store the amount
676 * of video memory available to the BIOS in SWF1.
677 */
678#define SWF0 0x71410
679#define SWF1 0x71414
680#define SWF2 0x71418
681#define SWF3 0x7141c
682#define SWF4 0x71420
683#define SWF5 0x71424
684#define SWF6 0x71428
685
686/*
687 * 855 scratch registers.
688 */
689#define SWF00 0x70410
690#define SWF01 0x70414
691#define SWF02 0x70418
692#define SWF03 0x7041c
693#define SWF04 0x70420
694#define SWF05 0x70424
695#define SWF06 0x70428
696
697#define SWF10 SWF0
698#define SWF11 SWF1
699#define SWF12 SWF2
700#define SWF13 SWF3
701#define SWF14 SWF4
702#define SWF15 SWF5
703#define SWF16 SWF6
704
705#define SWF30 0x72414
706#define SWF31 0x72418
707#define SWF32 0x7241c
708
709
710/*
711 * Palette registers
712 */
713#define PALETTE_A 0x0a000
714#define PALETTE_B 0x0a800
715#define PALETTE_C 0x0ac00
716
717/* Cursor A & B regs */
718#define CURACNTR 0x70080
719#define CURSOR_MODE_DISABLE 0x00
720#define CURSOR_MODE_64_32B_AX 0x07
721#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
722#define MCURSOR_GAMMA_ENABLE (1 << 26)
723#define CURABASE 0x70084
724#define CURAPOS 0x70088
725#define CURSOR_POS_MASK 0x007FF
726#define CURSOR_POS_SIGN 0x8000
727#define CURSOR_X_SHIFT 0
728#define CURSOR_Y_SHIFT 16
729#define CURBCNTR 0x700c0
730#define CURBBASE 0x700c4
731#define CURBPOS 0x700c8
732#define CURCCNTR 0x700e0
733#define CURCBASE 0x700e4
734#define CURCPOS 0x700e8
735
736/*
737 * Interrupt Registers
738 */
739#define IER 0x020a0
740#define IIR 0x020a4
741#define IMR 0x020a8
742#define ISR 0x020ac
743
744/*
745 * MOORESTOWN delta registers
746 */
747#define MRST_DPLL_A 0x0f014
748#define MDFLD_DPLL_B 0x0f018
749#define MDFLD_INPUT_REF_SEL (1 << 14)
750#define MDFLD_VCO_SEL (1 << 16)
751#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
752#define MDFLD_PLL_LATCHEN (1 << 28)
753#define MDFLD_PWR_GATE_EN (1 << 30)
754#define MDFLD_P1_MASK (0x1FF << 17)
755#define MRST_FPA0 0x0f040
756#define MRST_FPA1 0x0f044
757#define MDFLD_DPLL_DIV0 0x0f048
758#define MDFLD_DPLL_DIV1 0x0f04c
759#define MRST_PERF_MODE 0x020f4
760
761/*
762 * MEDFIELD HDMI registers
763 */
764#define HDMIPHYMISCCTL 0x61134
765#define HDMI_PHY_POWER_DOWN 0x7f
766#define HDMIB_CONTROL 0x61140
767#define HDMIB_PORT_EN (1 << 31)
768#define HDMIB_PIPE_B_SELECT (1 << 30)
769#define HDMIB_NULL_PACKET (1 << 9)
770#define HDMIB_HDCP_PORT (1 << 5)
771
772/* #define LVDS 0x61180 */
773#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
774#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
775#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
776
777#define MIPI 0x61190
778#define MIPI_C 0x62190
779#define MIPI_PORT_EN (1 << 31)
780/* Turns on border drawing to allow centered display. */
781#define SEL_FLOPPED_HSTX (1 << 23)
782#define PASS_FROM_SPHY_TO_AFE (1 << 16)
783#define MIPI_BORDER_EN (1 << 15)
784#define MIPIA_3LANE_MIPIC_1LANE 0x1
785#define MIPIA_2LANE_MIPIC_2LANE 0x2
786#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
787#define TE_TRIGGER_GPIO_PIN (1 << 3)
788#define MIPI_TE_COUNT 0x61194
789
790/* #define PP_CONTROL 0x61204 */
791#define POWER_DOWN_ON_RESET (1 << 1)
792
793/* #define PFIT_CONTROL 0x61230 */
794#define PFIT_PIPE_SELECT (3 << 29)
795#define PFIT_PIPE_SELECT_SHIFT (29)
796
797/* #define BLC_PWM_CTL 0x61254 */
798#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
799#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
800
801/* #define PIPEACONF 0x70008 */
802#define PIPEACONF_PIPE_STATE (1 << 30)
803/* #define DSPACNTR 0x70180 */
804
805#define MRST_DSPABASE 0x7019c
806#define MRST_DSPBBASE 0x7119c
807#define MDFLD_DSPCBASE 0x7219c
808
809/*
810 * Moorestown registers.
811 */
812
813/*
814 * MIPI IP registers
815 */
816#define MIPIC_REG_OFFSET 0x800
817
818#define DEVICE_READY_REG 0xb000
819#define LP_OUTPUT_HOLD (1 << 16)
820#define EXIT_ULPS_DEV_READY 0x3
821#define LP_OUTPUT_HOLD_RELEASE 0x810000
822# define ENTERING_ULPS (2 << 1)
823# define EXITING_ULPS (1 << 1)
824# define ULPS_MASK (3 << 1)
825# define BUS_POSSESSION (1 << 3)
826#define INTR_STAT_REG 0xb004
827#define RX_SOT_ERROR (1 << 0)
828#define RX_SOT_SYNC_ERROR (1 << 1)
829#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
830#define RX_LP_TX_SYNC_ERROR (1 << 4)
831#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
832#define RX_FALSE_CONTROL_ERROR (1 << 6)
833#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
834#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
835#define RX_CHECKSUM_ERROR (1 << 9)
836#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
837#define RX_DSI_VC_ID_INVALID (1 << 11)
838#define TX_FALSE_CONTROL_ERROR (1 << 12)
839#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
840#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
841#define TX_CHECKSUM_ERROR (1 << 15)
842#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
843#define TX_DSI_VC_ID_INVALID (1 << 17)
844#define HIGH_CONTENTION (1 << 18)
845#define LOW_CONTENTION (1 << 19)
846#define DPI_FIFO_UNDER_RUN (1 << 20)
847#define HS_TX_TIMEOUT (1 << 21)
848#define LP_RX_TIMEOUT (1 << 22)
849#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
850#define ACK_WITH_NO_ERROR (1 << 24)
851#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
852#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
853#define SPL_PKT_SENT (1 << 30)
854#define INTR_EN_REG 0xb008
855#define DSI_FUNC_PRG_REG 0xb00c
856#define DPI_CHANNEL_NUMBER_POS 0x03
857#define DBI_CHANNEL_NUMBER_POS 0x05
858#define FMT_DPI_POS 0x07
859#define FMT_DBI_POS 0x0A
860#define DBI_DATA_WIDTH_POS 0x0D
861
862/* DPI PIXEL FORMATS */
863#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
864#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
865#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
866 * 666 FORMAT
867 */
868#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
869#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
870#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
871#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
872#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
873
874#define DBI_NOT_SUPPORTED 0x00 /* command mode
875 * is not supported
876 */
877#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
878#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
879#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
880#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
881#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
882
883#define HS_TX_TIMEOUT_REG 0xb010
884#define LP_RX_TIMEOUT_REG 0xb014
885#define TURN_AROUND_TIMEOUT_REG 0xb018
886#define DEVICE_RESET_REG 0xb01C
887#define DPI_RESOLUTION_REG 0xb020
888#define RES_V_POS 0x10
889#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
890#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
891#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
892#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
893#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
894#define VERT_SYNC_PAD_COUNT_REG 0xb038
895#define VERT_BACK_PORCH_COUNT_REG 0xb03c
896#define VERT_FRONT_PORCH_COUNT_REG 0xb040
897#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
898#define DPI_CONTROL_REG 0xb048
899#define DPI_SHUT_DOWN (1 << 0)
900#define DPI_TURN_ON (1 << 1)
901#define DPI_COLOR_MODE_ON (1 << 2)
902#define DPI_COLOR_MODE_OFF (1 << 3)
903#define DPI_BACK_LIGHT_ON (1 << 4)
904#define DPI_BACK_LIGHT_OFF (1 << 5)
905#define DPI_LP (1 << 6)
906#define DPI_DATA_REG 0xb04c
907#define DPI_BACK_LIGHT_ON_DATA 0x07
908#define DPI_BACK_LIGHT_OFF_DATA 0x17
909#define INIT_COUNT_REG 0xb050
910#define MAX_RET_PAK_REG 0xb054
911#define VIDEO_FMT_REG 0xb058
912#define COMPLETE_LAST_PCKT (1 << 2)
913#define EOT_DISABLE_REG 0xb05c
914#define ENABLE_CLOCK_STOPPING (1 << 1)
915#define LP_BYTECLK_REG 0xb060
916#define LP_GEN_DATA_REG 0xb064
917#define HS_GEN_DATA_REG 0xb068
918#define LP_GEN_CTRL_REG 0xb06C
919#define HS_GEN_CTRL_REG 0xb070
920#define DCS_CHANNEL_NUMBER_POS 0x6
921#define MCS_COMMANDS_POS 0x8
922#define WORD_COUNTS_POS 0x8
923#define MCS_PARAMETER_POS 0x10
924#define GEN_FIFO_STAT_REG 0xb074
925#define HS_DATA_FIFO_FULL (1 << 0)
926#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
927#define HS_DATA_FIFO_EMPTY (1 << 2)
928#define LP_DATA_FIFO_FULL (1 << 8)
929#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
930#define LP_DATA_FIFO_EMPTY (1 << 10)
931#define HS_CTRL_FIFO_FULL (1 << 16)
932#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
933#define HS_CTRL_FIFO_EMPTY (1 << 18)
934#define LP_CTRL_FIFO_FULL (1 << 24)
935#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
936#define LP_CTRL_FIFO_EMPTY (1 << 26)
937#define DBI_FIFO_EMPTY (1 << 27)
938#define DPI_FIFO_EMPTY (1 << 28)
939#define HS_LS_DBI_ENABLE_REG 0xb078
940#define TXCLKESC_REG 0xb07c
941#define DPHY_PARAM_REG 0xb080
942#define DBI_BW_CTRL_REG 0xb084
943#define CLK_LANE_SWT_REG 0xb088
944
945/*
946 * MIPI Adapter registers
947 */
948#define MIPI_CONTROL_REG 0xb104
949#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
950#define MIPI_DATA_ADDRESS_REG 0xb108
951#define MIPI_DATA_LENGTH_REG 0xb10C
952#define MIPI_COMMAND_ADDRESS_REG 0xb110
953#define MIPI_COMMAND_LENGTH_REG 0xb114
954#define MIPI_READ_DATA_RETURN_REG0 0xb118
955#define MIPI_READ_DATA_RETURN_REG1 0xb11C
956#define MIPI_READ_DATA_RETURN_REG2 0xb120
957#define MIPI_READ_DATA_RETURN_REG3 0xb124
958#define MIPI_READ_DATA_RETURN_REG4 0xb128
959#define MIPI_READ_DATA_RETURN_REG5 0xb12C
960#define MIPI_READ_DATA_RETURN_REG6 0xb130
961#define MIPI_READ_DATA_RETURN_REG7 0xb134
962#define MIPI_READ_DATA_VALID_REG 0xb138
963
964/* DBI COMMANDS */
965#define soft_reset 0x01
966/*
967 * The display module performs a software reset.
968 * Registers are written with their SW Reset default values.
969 */
970#define get_power_mode 0x0a
971/*
972 * The display module returns the current power mode
973 */
974#define get_address_mode 0x0b
975/*
976 * The display module returns the current status.
977 */
978#define get_pixel_format 0x0c
979/*
980 * This command gets the pixel format for the RGB image data
981 * used by the interface.
982 */
983#define get_display_mode 0x0d
984/*
985 * The display module returns the Display Image Mode status.
986 */
987#define get_signal_mode 0x0e
988/*
989 * The display module returns the Display Signal Mode.
990 */
991#define get_diagnostic_result 0x0f
992/*
993 * The display module returns the self-diagnostic results following
994 * a Sleep Out command.
995 */
996#define enter_sleep_mode 0x10
997/*
998 * This command causes the display module to enter the Sleep mode.
999 * In this mode, all unnecessary blocks inside the display module are
1000 * disabled except interface communication. This is the lowest power
1001 * mode the display module supports.
1002 */
1003#define exit_sleep_mode 0x11
1004/*
1005 * This command causes the display module to exit Sleep mode.
1006 * All blocks inside the display module are enabled.
1007 */
1008#define enter_partial_mode 0x12
1009/*
1010 * This command causes the display module to enter the Partial Display
1011 * Mode. The Partial Display Mode window is described by the
1012 * set_partial_area command.
1013 */
1014#define enter_normal_mode 0x13
1015/*
1016 * This command causes the display module to enter the Normal mode.
1017 * Normal Mode is defined as Partial Display mode and Scroll mode are off
1018 */
1019#define exit_invert_mode 0x20
1020/*
1021 * This command causes the display module to stop inverting the image
1022 * data on the display device. The frame memory contents remain unchanged.
1023 * No status bits are changed.
1024 */
1025#define enter_invert_mode 0x21
1026/*
1027 * This command causes the display module to invert the image data only on
1028 * the display device. The frame memory contents remain unchanged.
1029 * No status bits are changed.
1030 */
1031#define set_gamma_curve 0x26
1032/*
1033 * This command selects the desired gamma curve for the display device.
1034 * Four fixed gamma curves are defined in section DCS spec.
1035 */
1036#define set_display_off 0x28
1037/* ************************************************************************* *\
1038This command causes the display module to stop displaying the image data
1039on the display device. The frame memory contents remain unchanged.
1040No status bits are changed.
1041\* ************************************************************************* */
1042#define set_display_on 0x29
1043/* ************************************************************************* *\
1044This command causes the display module to start displaying the image data
1045on the display device. The frame memory contents remain unchanged.
1046No status bits are changed.
1047\* ************************************************************************* */
1048#define set_column_address 0x2a
1049/*
1050 * This command defines the column extent of the frame memory accessed by
1051 * the hostprocessor with the read_memory_continue and
1052 * write_memory_continue commands.
1053 * No status bits are changed.
1054 */
1055#define set_page_addr 0x2b
1056/*
1057 * This command defines the page extent of the frame memory accessed by
1058 * the host processor with the write_memory_continue and
1059 * read_memory_continue command.
1060 * No status bits are changed.
1061 */
1062#define write_mem_start 0x2c
1063/*
1064 * This command transfers image data from the host processor to the
1065 * display modules frame memory starting at the pixel location specified
1066 * by preceding set_column_address and set_page_address commands.
1067 */
1068#define set_partial_area 0x30
1069/*
1070 * This command defines the Partial Display mode s display area.
1071 * There are two parameters associated with this command, the first
1072 * defines the Start Row (SR) and the second the End Row (ER). SR and ER
1073 * refer to the Frame Memory Line Pointer.
1074 */
1075#define set_scroll_area 0x33
1076/*
1077 * This command defines the display modules Vertical Scrolling Area.
1078 */
1079#define set_tear_off 0x34
1080/*
1081 * This command turns off the display modules Tearing Effect output
1082 * signal on the TE signal line.
1083 */
1084#define set_tear_on 0x35
1085/*
1086 * This command turns on the display modules Tearing Effect output signal
1087 * on the TE signal line.
1088 */
1089#define set_address_mode 0x36
1090/*
1091 * This command sets the data order for transfers from the host processor
1092 * to display modules frame memory,bits B[7:5] and B3, and from the
1093 * display modules frame memory to the display device, bits B[2:0] and B4.
1094 */
1095#define set_scroll_start 0x37
1096/*
1097 * This command sets the start of the vertical scrolling area in the frame
1098 * memory. The vertical scrolling area is fully defined when this command
1099 * is used with the set_scroll_area command The set_scroll_start command
1100 * has one parameter, the Vertical Scroll Pointer. The VSP defines the
1101 * line in the frame memory that is written to the display device as the
1102 * first line of the vertical scroll area.
1103 */
1104#define exit_idle_mode 0x38
1105/*
1106 * This command causes the display module to exit Idle mode.
1107 */
1108#define enter_idle_mode 0x39
1109/*
1110 * This command causes the display module to enter Idle Mode.
1111 * In Idle Mode, color expression is reduced. Colors are shown on the
1112 * display device using the MSB of each of the R, G and B color
1113 * components in the frame memory
1114 */
1115#define set_pixel_format 0x3a
1116/*
1117 * This command sets the pixel format for the RGB image data used by the
1118 * interface.
1119 * Bits D[6:4] DPI Pixel Format Definition
1120 * Bits D[2:0] DBI Pixel Format Definition
1121 * Bits D7 and D3 are not used.
1122 */
1123#define DCS_PIXEL_FORMAT_3bpp 0x1
1124#define DCS_PIXEL_FORMAT_8bpp 0x2
1125#define DCS_PIXEL_FORMAT_12bpp 0x3
1126#define DCS_PIXEL_FORMAT_16bpp 0x5
1127#define DCS_PIXEL_FORMAT_18bpp 0x6
1128#define DCS_PIXEL_FORMAT_24bpp 0x7
1129
1130#define write_mem_cont 0x3c
1131
1132/*
1133 * This command transfers image data from the host processor to the
1134 * display module's frame memory continuing from the pixel location
1135 * following the previous write_memory_continue or write_memory_start
1136 * command.
1137 */
1138#define set_tear_scanline 0x44
1139/*
1140 * This command turns on the display modules Tearing Effect output signal
1141 * on the TE signal line when the display module reaches line N.
1142 */
1143#define get_scanline 0x45
1144/*
1145 * The display module returns the current scanline, N, used to update the
1146 * display device. The total number of scanlines on a display device is
1147 * defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
1148 * the first line of V Sync and is denoted as Line 0.
1149 * When in Sleep Mode, the value returned by get_scanline is undefined.
1150 */
1151
1152/* MCS or Generic COMMANDS */
1153/* MCS/generic data type */
1154#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
1155#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
1156#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
1157#define GEN_READ_0 0x04 /* generic read, no parameters */
1158#define GEN_READ_1 0x14 /* generic read, 1 parameters */
1159#define GEN_READ_2 0x24 /* generic read, 2 parameters */
1160#define GEN_LONG_WRITE 0x29 /* generic long write */
1161#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
1162#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
1163#define MCS_READ 0x06 /* MCS read, no parameters */
1164#define MCS_LONG_WRITE 0x39 /* MCS long write */
1165/* MCS/generic commands */
1166/* TPO MCS */
1167#define write_display_profile 0x50
1168#define write_display_brightness 0x51
1169#define write_ctrl_display 0x53
1170#define write_ctrl_cabc 0x55
1171 #define UI_IMAGE 0x01
1172 #define STILL_IMAGE 0x02
1173 #define MOVING_IMAGE 0x03
1174#define write_hysteresis 0x57
1175#define write_gamma_setting 0x58
1176#define write_cabc_min_bright 0x5e
1177#define write_kbbc_profile 0x60
1178/* TMD MCS */
1179#define tmd_write_display_brightness 0x8c
1180
1181/*
1182 * This command is used to control ambient light, panel backlight
1183 * brightness and gamma settings.
1184 */
1185#define BRIGHT_CNTL_BLOCK_ON (1 << 5)
1186#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
1187#define DISPLAY_DIMMING_ON (1 << 3)
1188#define BACKLIGHT_ON (1 << 2)
1189#define DISPLAY_BRIGHTNESS_AUTO (1 << 1)
1190#define GAMMA_AUTO (1 << 0)
1191
1192/* DCS Interface Pixel Formats */
1193#define DCS_PIXEL_FORMAT_3BPP 0x1
1194#define DCS_PIXEL_FORMAT_8BPP 0x2
1195#define DCS_PIXEL_FORMAT_12BPP 0x3
1196#define DCS_PIXEL_FORMAT_16BPP 0x5
1197#define DCS_PIXEL_FORMAT_18BPP 0x6
1198#define DCS_PIXEL_FORMAT_24BPP 0x7
1199/* ONE PARAMETER READ DATA */
1200#define addr_mode_data 0xfc
1201#define diag_res_data 0x00
1202#define disp_mode_data 0x23
1203#define pxl_fmt_data 0x77
1204#define pwr_mode_data 0x74
1205#define sig_mode_data 0x00
1206/* TWO PARAMETERS READ DATA */
1207#define scanline_data1 0xff
1208#define scanline_data2 0xff
1209#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
1210 * with Sync Pulse
1211 */
1212#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
1213 * with Sync events
1214 */
1215#define BURST_MODE 0x03 /* Burst Mode */
1216#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
1217 /* Allocate at least
1218 * 0x100 Byte with 32
1219 * byte alignment
1220 */
1221#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
1222 * 0x100 Byte with 32
1223 * byte alignment
1224 */
1225#define DBI_CB_TIME_OUT 0xFFFF
1226
1227#define GEN_FB_TIME_OUT 2000
1228
1229#define SKU_83 0x01
1230#define SKU_100 0x02
1231#define SKU_100L 0x04
1232#define SKU_BYPASS 0x08
1233
1234/* Some handy macros for playing with bitfields. */
1235#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
1236#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
1237#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1238
1239#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
1240
1241/* PCI config space */
1242
1243#define SB_PCKT 0x02100 /* cedarview */
1244# define SB_OPCODE_MASK PSB_MASK(31, 16)
1245# define SB_OPCODE_SHIFT 16
1246# define SB_OPCODE_READ 0
1247# define SB_OPCODE_WRITE 1
1248# define SB_DEST_MASK PSB_MASK(15, 8)
1249# define SB_DEST_SHIFT 8
1250# define SB_DEST_DPLL 0x88
1251# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
1252# define SB_BYTE_ENABLE_SHIFT 4
1253# define SB_BUSY (1 << 0)
1254
1255
1256/* 32-bit value read/written from the DPIO reg. */
1257#define SB_DATA 0x02104 /* cedarview */
1258/* 32-bit address of the DPIO reg to be read/written. */
1259#define SB_ADDR 0x02108 /* cedarview */
1260#define DPIO_CFG 0x02110 /* cedarview */
1261# define DPIO_MODE_SELECT_1 (1 << 3)
1262# define DPIO_MODE_SELECT_0 (1 << 2)
1263# define DPIO_SFR_BYPASS (1 << 1)
1264/* reset is active low */
1265# define DPIO_CMN_RESET_N (1 << 0)
1266
1267/* Cedarview sideband registers */
1268#define _SB_M_A 0x8008
1269#define _SB_M_B 0x8028
1270#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
1271# define SB_M_DIVIDER_MASK (0xFF << 24)
1272# define SB_M_DIVIDER_SHIFT 24
1273
1274#define _SB_N_VCO_A 0x8014
1275#define _SB_N_VCO_B 0x8034
1276#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
1277#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
1278#define SB_N_VCO_SEL_SHIFT 30
1279#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
1280#define SB_N_DIVIDER_SHIFT 26
1281#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
1282#define SB_N_CB_TUNE_SHIFT 24
1283
1284#define _SB_REF_A 0x8018
1285#define _SB_REF_B 0x8038
1286#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
1287
1288#define _SB_P_A 0x801c
1289#define _SB_P_B 0x803c
1290#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
1291#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
1292#define SB_P2_DIVIDER_SHIFT 30
1293#define SB_P2_10 0 /* HDMI, DP, DAC */
1294#define SB_P2_5 1 /* DAC */
1295#define SB_P2_14 2 /* LVDS single */
1296#define SB_P2_7 3 /* LVDS double */
1297#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
1298#define SB_P1_DIVIDER_SHIFT 12
1299
1300#define PSB_LANE0 0x120
1301#define PSB_LANE1 0x220
1302#define PSB_LANE2 0x2320
1303#define PSB_LANE3 0x2420
1304
1305#define LANE_PLL_MASK (0x7 << 20)
1306#define LANE_PLL_ENABLE (0x3 << 20)
1307
1308
1309#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 00000000000..4882b29119e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2617 @@
1/*
2 * Copyright 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2007 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric@anholt.net>
27 */
28#include <linux/module.h>
29#include <linux/i2c.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include "drmP.h"
33#include "drm.h"
34#include "drm_crtc.h"
35#include "drm_edid.h"
36#include "psb_intel_drv.h"
37#include "gma_drm.h"
38#include "psb_drv.h"
39#include "psb_intel_sdvo_regs.h"
40#include "psb_intel_reg.h"
41
42#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
43#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
44#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
45#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
46
47#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
48 SDVO_TV_MASK)
49
50#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
51#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
52#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
53#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
54
55
56static const char *tv_format_names[] = {
57 "NTSC_M" , "NTSC_J" , "NTSC_443",
58 "PAL_B" , "PAL_D" , "PAL_G" ,
59 "PAL_H" , "PAL_I" , "PAL_M" ,
60 "PAL_N" , "PAL_NC" , "PAL_60" ,
61 "SECAM_B" , "SECAM_D" , "SECAM_G" ,
62 "SECAM_K" , "SECAM_K1", "SECAM_L" ,
63 "SECAM_60"
64};
65
66#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
67
68struct psb_intel_sdvo {
69 struct psb_intel_encoder base;
70
71 struct i2c_adapter *i2c;
72 u8 slave_addr;
73
74 struct i2c_adapter ddc;
75
76 /* Register for the SDVO device: SDVOB or SDVOC */
77 int sdvo_reg;
78
79 /* Active outputs controlled by this SDVO output */
80 uint16_t controlled_output;
81
82 /*
83 * Capabilities of the SDVO device returned by
84 * i830_sdvo_get_capabilities()
85 */
86 struct psb_intel_sdvo_caps caps;
87
88 /* Pixel clock limitations reported by the SDVO device, in kHz */
89 int pixel_clock_min, pixel_clock_max;
90
91 /*
92 * For multiple function SDVO device,
93 * this is for current attached outputs.
94 */
95 uint16_t attached_output;
96
97 /**
98 * This is used to select the color range of RBG outputs in HDMI mode.
99 * It is only valid when using TMDS encoding and 8 bit per color mode.
100 */
101 uint32_t color_range;
102
103 /**
104 * This is set if we're going to treat the device as TV-out.
105 *
106 * While we have these nice friendly flags for output types that ought
107 * to decide this for us, the S-Video output on our HDMI+S-Video card
108 * shows up as RGB1 (VGA).
109 */
110 bool is_tv;
111
112 /* This is for current tv format name */
113 int tv_format_index;
114
115 /**
116 * This is set if we treat the device as HDMI, instead of DVI.
117 */
118 bool is_hdmi;
119 bool has_hdmi_monitor;
120 bool has_hdmi_audio;
121
122 /**
123 * This is set if we detect output of sdvo device as LVDS and
124 * have a valid fixed mode to use with the panel.
125 */
126 bool is_lvds;
127
128 /**
129 * This is sdvo fixed pannel mode pointer
130 */
131 struct drm_display_mode *sdvo_lvds_fixed_mode;
132
133 /* DDC bus used by this SDVO encoder */
134 uint8_t ddc_bus;
135
136 /* Input timings for adjusted_mode */
137 struct psb_intel_sdvo_dtd input_dtd;
138};
139
140struct psb_intel_sdvo_connector {
141 struct psb_intel_connector base;
142
143 /* Mark the type of connector */
144 uint16_t output_flag;
145
146 int force_audio;
147
148 /* This contains all current supported TV format */
149 u8 tv_format_supported[TV_FORMAT_NUM];
150 int format_supported_num;
151 struct drm_property *tv_format;
152
153 /* add the property for the SDVO-TV */
154 struct drm_property *left;
155 struct drm_property *right;
156 struct drm_property *top;
157 struct drm_property *bottom;
158 struct drm_property *hpos;
159 struct drm_property *vpos;
160 struct drm_property *contrast;
161 struct drm_property *saturation;
162 struct drm_property *hue;
163 struct drm_property *sharpness;
164 struct drm_property *flicker_filter;
165 struct drm_property *flicker_filter_adaptive;
166 struct drm_property *flicker_filter_2d;
167 struct drm_property *tv_chroma_filter;
168 struct drm_property *tv_luma_filter;
169 struct drm_property *dot_crawl;
170
171 /* add the property for the SDVO-TV/LVDS */
172 struct drm_property *brightness;
173
174 /* Add variable to record current setting for the above property */
175 u32 left_margin, right_margin, top_margin, bottom_margin;
176
177 /* this is to get the range of margin.*/
178 u32 max_hscan, max_vscan;
179 u32 max_hpos, cur_hpos;
180 u32 max_vpos, cur_vpos;
181 u32 cur_brightness, max_brightness;
182 u32 cur_contrast, max_contrast;
183 u32 cur_saturation, max_saturation;
184 u32 cur_hue, max_hue;
185 u32 cur_sharpness, max_sharpness;
186 u32 cur_flicker_filter, max_flicker_filter;
187 u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
188 u32 cur_flicker_filter_2d, max_flicker_filter_2d;
189 u32 cur_tv_chroma_filter, max_tv_chroma_filter;
190 u32 cur_tv_luma_filter, max_tv_luma_filter;
191 u32 cur_dot_crawl, max_dot_crawl;
192};
193
194static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
195{
196 return container_of(encoder, struct psb_intel_sdvo, base.base);
197}
198
199static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
200{
201 return container_of(psb_intel_attached_encoder(connector),
202 struct psb_intel_sdvo, base);
203}
204
205static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
206{
207 return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
208}
209
210static bool
211psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
212static bool
213psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
214 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
215 int type);
216static bool
217psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
218 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
219
220/**
221 * Writes the SDVOB or SDVOC with the given value, but always writes both
222 * SDVOB and SDVOC to work around apparent hardware issues (according to
223 * comments in the BIOS).
224 */
225static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
226{
227 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
228 u32 bval = val, cval = val;
229 int i;
230
231 if (psb_intel_sdvo->sdvo_reg == SDVOB) {
232 cval = REG_READ(SDVOC);
233 } else {
234 bval = REG_READ(SDVOB);
235 }
236 /*
237 * Write the registers twice for luck. Sometimes,
238 * writing them only once doesn't appear to 'stick'.
239 * The BIOS does this too. Yay, magic
240 */
241 for (i = 0; i < 2; i++)
242 {
243 REG_WRITE(SDVOB, bval);
244 REG_READ(SDVOB);
245 REG_WRITE(SDVOC, cval);
246 REG_READ(SDVOC);
247 }
248}
249
250static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
251{
252 struct i2c_msg msgs[] = {
253 {
254 .addr = psb_intel_sdvo->slave_addr,
255 .flags = 0,
256 .len = 1,
257 .buf = &addr,
258 },
259 {
260 .addr = psb_intel_sdvo->slave_addr,
261 .flags = I2C_M_RD,
262 .len = 1,
263 .buf = ch,
264 }
265 };
266 int ret;
267
268 if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
269 return true;
270
271 DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
272 return false;
273}
274
275#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
276/** Mapping of command numbers to names, for debug output */
277static const struct _sdvo_cmd_name {
278 u8 cmd;
279 const char *name;
280} sdvo_cmd_names[] = {
281 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
282 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
283 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
284 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
285 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
286 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
287 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
288 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
289 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
290 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
291 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
292 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
293 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
294 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
295 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
296 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
297 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
298 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
299 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
300 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
301 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
302 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
303 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
304 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
305 SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
306 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
307 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
308 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
309 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
312 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
313 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
314 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
315 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
316 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
317 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
318 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
319 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
320 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
321 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
322 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
323 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
324
325 /* Add the op code for SDVO enhancements */
326 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
327 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
331 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
333 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
334 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
335 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
336 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
337 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
338 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
339 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
340 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
341 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
342 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
343 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
344 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
345 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
346 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
347 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
348 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
349 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
350 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
351 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
352 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
354 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
355 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
356 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
357 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
358 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
359 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
360 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
361 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
362 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
363 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
364 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
365 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
366 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
367 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
368 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
369 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
370
371 /* HDMI op code */
372 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
373 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
374 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
375 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
377 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
378 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
379 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
380 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
381 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
382 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
383 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
384 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
385 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
386 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
387 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
388 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
389 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
390 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
391 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
392};
393
394#define IS_SDVOB(reg) (reg == SDVOB)
395#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
396
397static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
398 const void *args, int args_len)
399{
400 int i;
401
402 DRM_DEBUG_KMS("%s: W: %02X ",
403 SDVO_NAME(psb_intel_sdvo), cmd);
404 for (i = 0; i < args_len; i++)
405 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
406 for (; i < 8; i++)
407 DRM_LOG_KMS(" ");
408 for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
409 if (cmd == sdvo_cmd_names[i].cmd) {
410 DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
411 break;
412 }
413 }
414 if (i == ARRAY_SIZE(sdvo_cmd_names))
415 DRM_LOG_KMS("(%02X)", cmd);
416 DRM_LOG_KMS("\n");
417}
418
419static const char *cmd_status_names[] = {
420 "Power on",
421 "Success",
422 "Not supported",
423 "Invalid arg",
424 "Pending",
425 "Target not specified",
426 "Scaling not supported"
427};
428
429static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
430 const void *args, int args_len)
431{
432 u8 buf[args_len*2 + 2], status;
433 struct i2c_msg msgs[args_len + 3];
434 int i, ret;
435
436 psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
437
438 for (i = 0; i < args_len; i++) {
439 msgs[i].addr = psb_intel_sdvo->slave_addr;
440 msgs[i].flags = 0;
441 msgs[i].len = 2;
442 msgs[i].buf = buf + 2 *i;
443 buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
444 buf[2*i + 1] = ((u8*)args)[i];
445 }
446 msgs[i].addr = psb_intel_sdvo->slave_addr;
447 msgs[i].flags = 0;
448 msgs[i].len = 2;
449 msgs[i].buf = buf + 2*i;
450 buf[2*i + 0] = SDVO_I2C_OPCODE;
451 buf[2*i + 1] = cmd;
452
453 /* the following two are to read the response */
454 status = SDVO_I2C_CMD_STATUS;
455 msgs[i+1].addr = psb_intel_sdvo->slave_addr;
456 msgs[i+1].flags = 0;
457 msgs[i+1].len = 1;
458 msgs[i+1].buf = &status;
459
460 msgs[i+2].addr = psb_intel_sdvo->slave_addr;
461 msgs[i+2].flags = I2C_M_RD;
462 msgs[i+2].len = 1;
463 msgs[i+2].buf = &status;
464
465 ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
466 if (ret < 0) {
467 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
468 return false;
469 }
470 if (ret != i+3) {
471 /* failure in I2C transfer */
472 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
473 return false;
474 }
475
476 return true;
477}
478
479static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
480 void *response, int response_len)
481{
482 u8 retry = 5;
483 u8 status;
484 int i;
485
486 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
487
488 /*
489 * The documentation states that all commands will be
490 * processed within 15µs, and that we need only poll
491 * the status byte a maximum of 3 times in order for the
492 * command to be complete.
493 *
494 * Check 5 times in case the hardware failed to read the docs.
495 */
496 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
497 SDVO_I2C_CMD_STATUS,
498 &status))
499 goto log_fail;
500
501 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
502 udelay(15);
503 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
504 SDVO_I2C_CMD_STATUS,
505 &status))
506 goto log_fail;
507 }
508
509 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
510 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
511 else
512 DRM_LOG_KMS("(??? %d)", status);
513
514 if (status != SDVO_CMD_STATUS_SUCCESS)
515 goto log_fail;
516
517 /* Read the command response */
518 for (i = 0; i < response_len; i++) {
519 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
520 SDVO_I2C_RETURN_0 + i,
521 &((u8 *)response)[i]))
522 goto log_fail;
523 DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
524 }
525 DRM_LOG_KMS("\n");
526 return true;
527
528log_fail:
529 DRM_LOG_KMS("... failed\n");
530 return false;
531}
532
533static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
534{
535 if (mode->clock >= 100000)
536 return 1;
537 else if (mode->clock >= 50000)
538 return 2;
539 else
540 return 4;
541}
542
543static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
544 u8 ddc_bus)
545{
546 /* This must be the immediately preceding write before the i2c xfer */
547 return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
548 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
549 &ddc_bus, 1);
550}
551
552static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
553{
554 if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
555 return false;
556
557 return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
558}
559
560static bool
561psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
562{
563 if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
564 return false;
565
566 return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
567}
568
569static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
570{
571 struct psb_intel_sdvo_set_target_input_args targets = {0};
572 return psb_intel_sdvo_set_value(psb_intel_sdvo,
573 SDVO_CMD_SET_TARGET_INPUT,
574 &targets, sizeof(targets));
575}
576
577/**
578 * Return whether each input is trained.
579 *
580 * This function is making an assumption about the layout of the response,
581 * which should be checked against the docs.
582 */
583static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
584{
585 struct psb_intel_sdvo_get_trained_inputs_response response;
586
587 BUILD_BUG_ON(sizeof(response) != 1);
588 if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
589 &response, sizeof(response)))
590 return false;
591
592 *input_1 = response.input0_trained;
593 *input_2 = response.input1_trained;
594 return true;
595}
596
597static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
598 u16 outputs)
599{
600 return psb_intel_sdvo_set_value(psb_intel_sdvo,
601 SDVO_CMD_SET_ACTIVE_OUTPUTS,
602 &outputs, sizeof(outputs));
603}
604
605static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
606 int mode)
607{
608 u8 state = SDVO_ENCODER_STATE_ON;
609
610 switch (mode) {
611 case DRM_MODE_DPMS_ON:
612 state = SDVO_ENCODER_STATE_ON;
613 break;
614 case DRM_MODE_DPMS_STANDBY:
615 state = SDVO_ENCODER_STATE_STANDBY;
616 break;
617 case DRM_MODE_DPMS_SUSPEND:
618 state = SDVO_ENCODER_STATE_SUSPEND;
619 break;
620 case DRM_MODE_DPMS_OFF:
621 state = SDVO_ENCODER_STATE_OFF;
622 break;
623 }
624
625 return psb_intel_sdvo_set_value(psb_intel_sdvo,
626 SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
627}
628
629static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
630 int *clock_min,
631 int *clock_max)
632{
633 struct psb_intel_sdvo_pixel_clock_range clocks;
634
635 BUILD_BUG_ON(sizeof(clocks) != 4);
636 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
637 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
638 &clocks, sizeof(clocks)))
639 return false;
640
641 /* Convert the values from units of 10 kHz to kHz. */
642 *clock_min = clocks.min * 10;
643 *clock_max = clocks.max * 10;
644 return true;
645}
646
647static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
648 u16 outputs)
649{
650 return psb_intel_sdvo_set_value(psb_intel_sdvo,
651 SDVO_CMD_SET_TARGET_OUTPUT,
652 &outputs, sizeof(outputs));
653}
654
655static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
656 struct psb_intel_sdvo_dtd *dtd)
657{
658 return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
659 psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
660}
661
662static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
663 struct psb_intel_sdvo_dtd *dtd)
664{
665 return psb_intel_sdvo_set_timing(psb_intel_sdvo,
666 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
667}
668
669static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
670 struct psb_intel_sdvo_dtd *dtd)
671{
672 return psb_intel_sdvo_set_timing(psb_intel_sdvo,
673 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
674}
675
676static bool
677psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
678 uint16_t clock,
679 uint16_t width,
680 uint16_t height)
681{
682 struct psb_intel_sdvo_preferred_input_timing_args args;
683
684 memset(&args, 0, sizeof(args));
685 args.clock = clock;
686 args.width = width;
687 args.height = height;
688 args.interlace = 0;
689
690 if (psb_intel_sdvo->is_lvds &&
691 (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
692 psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
693 args.scaled = 1;
694
695 return psb_intel_sdvo_set_value(psb_intel_sdvo,
696 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
697 &args, sizeof(args));
698}
699
700static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
701 struct psb_intel_sdvo_dtd *dtd)
702{
703 BUILD_BUG_ON(sizeof(dtd->part1) != 8);
704 BUILD_BUG_ON(sizeof(dtd->part2) != 8);
705 return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
706 &dtd->part1, sizeof(dtd->part1)) &&
707 psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
708 &dtd->part2, sizeof(dtd->part2));
709}
710
711static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
712{
713 return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
714}
715
716static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
717 const struct drm_display_mode *mode)
718{
719 uint16_t width, height;
720 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
721 uint16_t h_sync_offset, v_sync_offset;
722
723 width = mode->crtc_hdisplay;
724 height = mode->crtc_vdisplay;
725
726 /* do some mode translations */
727 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
728 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
729
730 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
731 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
732
733 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
734 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
735
736 dtd->part1.clock = mode->clock / 10;
737 dtd->part1.h_active = width & 0xff;
738 dtd->part1.h_blank = h_blank_len & 0xff;
739 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
740 ((h_blank_len >> 8) & 0xf);
741 dtd->part1.v_active = height & 0xff;
742 dtd->part1.v_blank = v_blank_len & 0xff;
743 dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
744 ((v_blank_len >> 8) & 0xf);
745
746 dtd->part2.h_sync_off = h_sync_offset & 0xff;
747 dtd->part2.h_sync_width = h_sync_len & 0xff;
748 dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
749 (v_sync_len & 0xf);
750 dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
751 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
752 ((v_sync_len & 0x30) >> 4);
753
754 dtd->part2.dtd_flags = 0x18;
755 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
756 dtd->part2.dtd_flags |= 0x2;
757 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
758 dtd->part2.dtd_flags |= 0x4;
759
760 dtd->part2.sdvo_flags = 0;
761 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
762 dtd->part2.reserved = 0;
763}
764
765static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
766 const struct psb_intel_sdvo_dtd *dtd)
767{
768 mode->hdisplay = dtd->part1.h_active;
769 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
770 mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
771 mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
772 mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
773 mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
774 mode->htotal = mode->hdisplay + dtd->part1.h_blank;
775 mode->htotal += (dtd->part1.h_high & 0xf) << 8;
776
777 mode->vdisplay = dtd->part1.v_active;
778 mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
779 mode->vsync_start = mode->vdisplay;
780 mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
781 mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
782 mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
783 mode->vsync_end = mode->vsync_start +
784 (dtd->part2.v_sync_off_width & 0xf);
785 mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
786 mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
787 mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
788
789 mode->clock = dtd->part1.clock * 10;
790
791 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
792 if (dtd->part2.dtd_flags & 0x2)
793 mode->flags |= DRM_MODE_FLAG_PHSYNC;
794 if (dtd->part2.dtd_flags & 0x4)
795 mode->flags |= DRM_MODE_FLAG_PVSYNC;
796}
797
798static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
799{
800 struct psb_intel_sdvo_encode encode;
801
802 BUILD_BUG_ON(sizeof(encode) != 2);
803 return psb_intel_sdvo_get_value(psb_intel_sdvo,
804 SDVO_CMD_GET_SUPP_ENCODE,
805 &encode, sizeof(encode));
806}
807
808static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
809 uint8_t mode)
810{
811 return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
812}
813
814static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
815 uint8_t mode)
816{
817 return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
818}
819
820#if 0
821static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
822{
823 int i, j;
824 uint8_t set_buf_index[2];
825 uint8_t av_split;
826 uint8_t buf_size;
827 uint8_t buf[48];
828 uint8_t *pos;
829
830 psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
831
832 for (i = 0; i <= av_split; i++) {
833 set_buf_index[0] = i; set_buf_index[1] = 0;
834 psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
835 set_buf_index, 2);
836 psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
837 psb_intel_sdvo_read_response(encoder, &buf_size, 1);
838
839 pos = buf;
840 for (j = 0; j <= buf_size; j += 8) {
841 psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
842 NULL, 0);
843 psb_intel_sdvo_read_response(encoder, pos, 8);
844 pos += 8;
845 }
846 }
847}
848#endif
849
850static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
851{
852 DRM_INFO("HDMI is not supported yet");
853
854 return false;
855#if 0
856 struct dip_infoframe avi_if = {
857 .type = DIP_TYPE_AVI,
858 .ver = DIP_VERSION_AVI,
859 .len = DIP_LEN_AVI,
860 };
861 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
862 uint8_t set_buf_index[2] = { 1, 0 };
863 uint64_t *data = (uint64_t *)&avi_if;
864 unsigned i;
865
866 intel_dip_infoframe_csum(&avi_if);
867
868 if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
869 SDVO_CMD_SET_HBUF_INDEX,
870 set_buf_index, 2))
871 return false;
872
873 for (i = 0; i < sizeof(avi_if); i += 8) {
874 if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
875 SDVO_CMD_SET_HBUF_DATA,
876 data, 8))
877 return false;
878 data++;
879 }
880
881 return psb_intel_sdvo_set_value(psb_intel_sdvo,
882 SDVO_CMD_SET_HBUF_TXRATE,
883 &tx_rate, 1);
884#endif
885}
886
887static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
888{
889 struct psb_intel_sdvo_tv_format format;
890 uint32_t format_map;
891
892 format_map = 1 << psb_intel_sdvo->tv_format_index;
893 memset(&format, 0, sizeof(format));
894 memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
895
896 BUILD_BUG_ON(sizeof(format) != 6);
897 return psb_intel_sdvo_set_value(psb_intel_sdvo,
898 SDVO_CMD_SET_TV_FORMAT,
899 &format, sizeof(format));
900}
901
902static bool
903psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
904 struct drm_display_mode *mode)
905{
906 struct psb_intel_sdvo_dtd output_dtd;
907
908 if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
909 psb_intel_sdvo->attached_output))
910 return false;
911
912 psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
913 if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
914 return false;
915
916 return true;
917}
918
919static bool
920psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
921 struct drm_display_mode *mode,
922 struct drm_display_mode *adjusted_mode)
923{
924 /* Reset the input timing to the screen. Assume always input 0. */
925 if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
926 return false;
927
928 if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
929 mode->clock / 10,
930 mode->hdisplay,
931 mode->vdisplay))
932 return false;
933
934 if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
935 &psb_intel_sdvo->input_dtd))
936 return false;
937
938 psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
939
940 drm_mode_set_crtcinfo(adjusted_mode, 0);
941 return true;
942}
943
944static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
945 struct drm_display_mode *mode,
946 struct drm_display_mode *adjusted_mode)
947{
948 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
949 int multiplier;
950
951 /* We need to construct preferred input timings based on our
952 * output timings. To do that, we have to set the output
953 * timings, even though this isn't really the right place in
954 * the sequence to do it. Oh well.
955 */
956 if (psb_intel_sdvo->is_tv) {
957 if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
958 return false;
959
960 (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
961 mode,
962 adjusted_mode);
963 } else if (psb_intel_sdvo->is_lvds) {
964 if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
965 psb_intel_sdvo->sdvo_lvds_fixed_mode))
966 return false;
967
968 (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
969 mode,
970 adjusted_mode);
971 }
972
973 /* Make the CRTC code factor in the SDVO pixel multiplier. The
974 * SDVO device will factor out the multiplier during mode_set.
975 */
976 multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
977 psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
978
979 return true;
980}
981
982static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
983 struct drm_display_mode *mode,
984 struct drm_display_mode *adjusted_mode)
985{
986 struct drm_device *dev = encoder->dev;
987 struct drm_crtc *crtc = encoder->crtc;
988 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
989 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
990 u32 sdvox;
991 struct psb_intel_sdvo_in_out_map in_out;
992 struct psb_intel_sdvo_dtd input_dtd;
993 int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
994 int rate;
995
996 if (!mode)
997 return;
998
999 /* First, set the input mapping for the first input to our controlled
1000 * output. This is only correct if we're a single-input device, in
1001 * which case the first input is the output from the appropriate SDVO
1002 * channel on the motherboard. In a two-input device, the first input
1003 * will be SDVOB and the second SDVOC.
1004 */
1005 in_out.in0 = psb_intel_sdvo->attached_output;
1006 in_out.in1 = 0;
1007
1008 psb_intel_sdvo_set_value(psb_intel_sdvo,
1009 SDVO_CMD_SET_IN_OUT_MAP,
1010 &in_out, sizeof(in_out));
1011
1012 /* Set the output timings to the screen */
1013 if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
1014 psb_intel_sdvo->attached_output))
1015 return;
1016
1017 /* We have tried to get input timing in mode_fixup, and filled into
1018 * adjusted_mode.
1019 */
1020 if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
1021 input_dtd = psb_intel_sdvo->input_dtd;
1022 } else {
1023 /* Set the output timing to the screen */
1024 if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
1025 psb_intel_sdvo->attached_output))
1026 return;
1027
1028 psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1029 (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
1030 }
1031
1032 /* Set the input timing to the screen. Assume always input 0. */
1033 if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
1034 return;
1035
1036 if (psb_intel_sdvo->has_hdmi_monitor) {
1037 psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
1038 psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
1039 SDVO_COLORIMETRY_RGB256);
1040 psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
1041 } else
1042 psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
1043
1044 if (psb_intel_sdvo->is_tv &&
1045 !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
1046 return;
1047
1048 (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
1049
1050 switch (pixel_multiplier) {
1051 default:
1052 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1053 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
1054 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
1055 }
1056 if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
1057 return;
1058
1059 /* Set the SDVO control regs. */
1060 sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
1061 switch (psb_intel_sdvo->sdvo_reg) {
1062 case SDVOB:
1063 sdvox &= SDVOB_PRESERVE_MASK;
1064 break;
1065 case SDVOC:
1066 sdvox &= SDVOC_PRESERVE_MASK;
1067 break;
1068 }
1069 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1070
1071 if (psb_intel_crtc->pipe == 1)
1072 sdvox |= SDVO_PIPE_B_SELECT;
1073 if (psb_intel_sdvo->has_hdmi_audio)
1074 sdvox |= SDVO_AUDIO_ENABLE;
1075
1076 /* FIXME: Check if this is needed for PSB
1077 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1078 */
1079
1080 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
1081 sdvox |= SDVO_STALL_SELECT;
1082 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
1083}
1084
1085static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1086{
1087 struct drm_device *dev = encoder->dev;
1088 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
1089 u32 temp;
1090
1091 switch (mode) {
1092 case DRM_MODE_DPMS_ON:
1093 DRM_DEBUG("DPMS_ON");
1094 break;
1095 case DRM_MODE_DPMS_OFF:
1096 DRM_DEBUG("DPMS_OFF");
1097 break;
1098 default:
1099 DRM_DEBUG("DPMS: %d", mode);
1100 }
1101
1102 if (mode != DRM_MODE_DPMS_ON) {
1103 psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
1104 if (0)
1105 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
1106
1107 if (mode == DRM_MODE_DPMS_OFF) {
1108 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1109 if ((temp & SDVO_ENABLE) != 0) {
1110 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
1111 }
1112 }
1113 } else {
1114 bool input1, input2;
1115 int i;
1116 u8 status;
1117
1118 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1119 if ((temp & SDVO_ENABLE) == 0)
1120 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1121 for (i = 0; i < 2; i++)
1122 psb_intel_wait_for_vblank(dev);
1123
1124 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
1125 /* Warn if the device reported failure to sync.
1126 * A lot of SDVO devices fail to notify of sync, but it's
1127 * a given it the status is a success, we succeeded.
1128 */
1129 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1130 DRM_DEBUG_KMS("First %s output reported failure to "
1131 "sync\n", SDVO_NAME(psb_intel_sdvo));
1132 }
1133
1134 if (0)
1135 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
1136 psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
1137 }
1138 return;
1139}
1140
1141static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
1142 struct drm_display_mode *mode)
1143{
1144 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1145
1146 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1147 return MODE_NO_DBLESCAN;
1148
1149 if (psb_intel_sdvo->pixel_clock_min > mode->clock)
1150 return MODE_CLOCK_LOW;
1151
1152 if (psb_intel_sdvo->pixel_clock_max < mode->clock)
1153 return MODE_CLOCK_HIGH;
1154
1155 if (psb_intel_sdvo->is_lvds) {
1156 if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
1157 return MODE_PANEL;
1158
1159 if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
1160 return MODE_PANEL;
1161 }
1162
1163 return MODE_OK;
1164}
1165
1166static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
1167{
1168 BUILD_BUG_ON(sizeof(*caps) != 8);
1169 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
1170 SDVO_CMD_GET_DEVICE_CAPS,
1171 caps, sizeof(*caps)))
1172 return false;
1173
1174 DRM_DEBUG_KMS("SDVO capabilities:\n"
1175 " vendor_id: %d\n"
1176 " device_id: %d\n"
1177 " device_rev_id: %d\n"
1178 " sdvo_version_major: %d\n"
1179 " sdvo_version_minor: %d\n"
1180 " sdvo_inputs_mask: %d\n"
1181 " smooth_scaling: %d\n"
1182 " sharp_scaling: %d\n"
1183 " up_scaling: %d\n"
1184 " down_scaling: %d\n"
1185 " stall_support: %d\n"
1186 " output_flags: %d\n",
1187 caps->vendor_id,
1188 caps->device_id,
1189 caps->device_rev_id,
1190 caps->sdvo_version_major,
1191 caps->sdvo_version_minor,
1192 caps->sdvo_inputs_mask,
1193 caps->smooth_scaling,
1194 caps->sharp_scaling,
1195 caps->up_scaling,
1196 caps->down_scaling,
1197 caps->stall_support,
1198 caps->output_flags);
1199
1200 return true;
1201}
1202
1203/* No use! */
1204#if 0
1205struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
1206{
1207 struct drm_connector *connector = NULL;
1208 struct psb_intel_sdvo *iout = NULL;
1209 struct psb_intel_sdvo *sdvo;
1210
1211 /* find the sdvo connector */
1212 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1213 iout = to_psb_intel_sdvo(connector);
1214
1215 if (iout->type != INTEL_OUTPUT_SDVO)
1216 continue;
1217
1218 sdvo = iout->dev_priv;
1219
1220 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1221 return connector;
1222
1223 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1224 return connector;
1225
1226 }
1227
1228 return NULL;
1229}
1230
1231int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
1232{
1233 u8 response[2];
1234 u8 status;
1235 struct psb_intel_sdvo *psb_intel_sdvo;
1236 DRM_DEBUG_KMS("\n");
1237
1238 if (!connector)
1239 return 0;
1240
1241 psb_intel_sdvo = to_psb_intel_sdvo(connector);
1242
1243 return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1244 &response, 2) && response[0];
1245}
1246
1247void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1248{
1249 u8 response[2];
1250 u8 status;
1251 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
1252
1253 psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1254 psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
1255
1256 if (on) {
1257 psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1258 status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
1259
1260 psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1261 } else {
1262 response[0] = 0;
1263 response[1] = 0;
1264 psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1265 }
1266
1267 psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1268 psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
1269}
1270#endif
1271
1272static bool
1273psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
1274{
1275 /* Is there more than one type of output? */
1276 int caps = psb_intel_sdvo->caps.output_flags & 0xf;
1277 return caps & -caps;
1278}
1279
1280static struct edid *
1281psb_intel_sdvo_get_edid(struct drm_connector *connector)
1282{
1283 struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
1284 return drm_get_edid(connector, &sdvo->ddc);
1285}
1286
1287/* Mac mini hack -- use the same DDC as the analog connector */
1288static struct edid *
1289psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
1290{
1291 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1292
1293 return drm_get_edid(connector,
1294 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1295 return NULL;
1296}
1297
1298enum drm_connector_status
1299psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1300{
1301 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1302 enum drm_connector_status status;
1303 struct edid *edid;
1304
1305 edid = psb_intel_sdvo_get_edid(connector);
1306
1307 if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
1308 u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
1309
1310 /*
1311 * Don't use the 1 as the argument of DDC bus switch to get
1312 * the EDID. It is used for SDVO SPD ROM.
1313 */
1314 for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
1315 psb_intel_sdvo->ddc_bus = ddc;
1316 edid = psb_intel_sdvo_get_edid(connector);
1317 if (edid)
1318 break;
1319 }
1320 /*
1321 * If we found the EDID on the other bus,
1322 * assume that is the correct DDC bus.
1323 */
1324 if (edid == NULL)
1325 psb_intel_sdvo->ddc_bus = saved_ddc;
1326 }
1327
1328 /*
1329 * When there is no edid and no monitor is connected with VGA
1330 * port, try to use the CRT ddc to read the EDID for DVI-connector.
1331 */
1332 if (edid == NULL)
1333 edid = psb_intel_sdvo_get_analog_edid(connector);
1334
1335 status = connector_status_unknown;
1336 if (edid != NULL) {
1337 /* DDC bus is shared, match EDID to connector type */
1338 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1339 status = connector_status_connected;
1340 if (psb_intel_sdvo->is_hdmi) {
1341 psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1342 psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1343 }
1344 } else
1345 status = connector_status_disconnected;
1346 connector->display_info.raw_edid = NULL;
1347 kfree(edid);
1348 }
1349
1350 if (status == connector_status_connected) {
1351 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1352 if (psb_intel_sdvo_connector->force_audio)
1353 psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
1354 }
1355
1356 return status;
1357}
1358
1359static enum drm_connector_status
1360psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
1361{
1362 uint16_t response;
1363 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1364 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1365 enum drm_connector_status ret;
1366
1367 if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
1368 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1369 return connector_status_unknown;
1370
1371 /* add 30ms delay when the output type might be TV */
1372 if (psb_intel_sdvo->caps.output_flags &
1373 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1374 mdelay(30);
1375
1376 if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
1377 return connector_status_unknown;
1378
1379 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
1380 response & 0xff, response >> 8,
1381 psb_intel_sdvo_connector->output_flag);
1382
1383 if (response == 0)
1384 return connector_status_disconnected;
1385
1386 psb_intel_sdvo->attached_output = response;
1387
1388 psb_intel_sdvo->has_hdmi_monitor = false;
1389 psb_intel_sdvo->has_hdmi_audio = false;
1390
1391 if ((psb_intel_sdvo_connector->output_flag & response) == 0)
1392 ret = connector_status_disconnected;
1393 else if (IS_TMDS(psb_intel_sdvo_connector))
1394 ret = psb_intel_sdvo_hdmi_sink_detect(connector);
1395 else {
1396 struct edid *edid;
1397
1398 /* if we have an edid check it matches the connection */
1399 edid = psb_intel_sdvo_get_edid(connector);
1400 if (edid == NULL)
1401 edid = psb_intel_sdvo_get_analog_edid(connector);
1402 if (edid != NULL) {
1403 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1404 ret = connector_status_disconnected;
1405 else
1406 ret = connector_status_connected;
1407 connector->display_info.raw_edid = NULL;
1408 kfree(edid);
1409 } else
1410 ret = connector_status_connected;
1411 }
1412
1413 /* May update encoder flag for like clock for SDVO TV, etc.*/
1414 if (ret == connector_status_connected) {
1415 psb_intel_sdvo->is_tv = false;
1416 psb_intel_sdvo->is_lvds = false;
1417 psb_intel_sdvo->base.needs_tv_clock = false;
1418
1419 if (response & SDVO_TV_MASK) {
1420 psb_intel_sdvo->is_tv = true;
1421 psb_intel_sdvo->base.needs_tv_clock = true;
1422 }
1423 if (response & SDVO_LVDS_MASK)
1424 psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
1425 }
1426
1427 return ret;
1428}
1429
1430static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1431{
1432 struct edid *edid;
1433
1434 /* set the bus switch and get the modes */
1435 edid = psb_intel_sdvo_get_edid(connector);
1436
1437 /*
1438 * Mac mini hack. On this device, the DVI-I connector shares one DDC
1439 * link between analog and digital outputs. So, if the regular SDVO
1440 * DDC fails, check to see if the analog output is disconnected, in
1441 * which case we'll look there for the digital DDC data.
1442 */
1443 if (edid == NULL)
1444 edid = psb_intel_sdvo_get_analog_edid(connector);
1445
1446 if (edid != NULL) {
1447 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1448 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1449 bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
1450
1451 if (connector_is_digital == monitor_is_digital) {
1452 drm_mode_connector_update_edid_property(connector, edid);
1453 drm_add_edid_modes(connector, edid);
1454 }
1455
1456 connector->display_info.raw_edid = NULL;
1457 kfree(edid);
1458 }
1459}
1460
1461/*
1462 * Set of SDVO TV modes.
1463 * Note! This is in reply order (see loop in get_tv_modes).
1464 * XXX: all 60Hz refresh?
1465 */
1466static const struct drm_display_mode sdvo_tv_modes[] = {
1467 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
1468 416, 0, 200, 201, 232, 233, 0,
1469 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1470 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
1471 416, 0, 240, 241, 272, 273, 0,
1472 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1473 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
1474 496, 0, 300, 301, 332, 333, 0,
1475 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1476 { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
1477 736, 0, 350, 351, 382, 383, 0,
1478 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1479 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
1480 736, 0, 400, 401, 432, 433, 0,
1481 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1482 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
1483 736, 0, 480, 481, 512, 513, 0,
1484 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1485 { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
1486 800, 0, 480, 481, 512, 513, 0,
1487 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1488 { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
1489 800, 0, 576, 577, 608, 609, 0,
1490 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1491 { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
1492 816, 0, 350, 351, 382, 383, 0,
1493 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1494 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
1495 816, 0, 400, 401, 432, 433, 0,
1496 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1497 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
1498 816, 0, 480, 481, 512, 513, 0,
1499 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1500 { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
1501 816, 0, 540, 541, 572, 573, 0,
1502 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1503 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
1504 816, 0, 576, 577, 608, 609, 0,
1505 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1506 { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
1507 864, 0, 576, 577, 608, 609, 0,
1508 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1509 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
1510 896, 0, 600, 601, 632, 633, 0,
1511 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1512 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
1513 928, 0, 624, 625, 656, 657, 0,
1514 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1515 { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
1516 1016, 0, 766, 767, 798, 799, 0,
1517 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1518 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
1519 1120, 0, 768, 769, 800, 801, 0,
1520 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1521 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
1522 1376, 0, 1024, 1025, 1056, 1057, 0,
1523 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1524};
1525
1526static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
1527{
1528 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1529 struct psb_intel_sdvo_sdtv_resolution_request tv_res;
1530 uint32_t reply = 0, format_map = 0;
1531 int i;
1532
1533 /* Read the list of supported input resolutions for the selected TV
1534 * format.
1535 */
1536 format_map = 1 << psb_intel_sdvo->tv_format_index;
1537 memcpy(&tv_res, &format_map,
1538 min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
1539
1540 if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
1541 return;
1542
1543 BUILD_BUG_ON(sizeof(tv_res) != 3);
1544 if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
1545 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1546 &tv_res, sizeof(tv_res)))
1547 return;
1548 if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
1549 return;
1550
1551 for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
1552 if (reply & (1 << i)) {
1553 struct drm_display_mode *nmode;
1554 nmode = drm_mode_duplicate(connector->dev,
1555 &sdvo_tv_modes[i]);
1556 if (nmode)
1557 drm_mode_probed_add(connector, nmode);
1558 }
1559}
1560
1561static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1562{
1563 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1564 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1565 struct drm_display_mode *newmode;
1566
1567 /*
1568 * Attempt to get the mode list from DDC.
1569 * Assume that the preferred modes are
1570 * arranged in priority order.
1571 */
1572 psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
1573 if (list_empty(&connector->probed_modes) == false)
1574 goto end;
1575
1576 /* Fetch modes from VBT */
1577 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1578 newmode = drm_mode_duplicate(connector->dev,
1579 dev_priv->sdvo_lvds_vbt_mode);
1580 if (newmode != NULL) {
1581 /* Guarantee the mode is preferred */
1582 newmode->type = (DRM_MODE_TYPE_PREFERRED |
1583 DRM_MODE_TYPE_DRIVER);
1584 drm_mode_probed_add(connector, newmode);
1585 }
1586 }
1587
1588end:
1589 list_for_each_entry(newmode, &connector->probed_modes, head) {
1590 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1591 psb_intel_sdvo->sdvo_lvds_fixed_mode =
1592 drm_mode_duplicate(connector->dev, newmode);
1593
1594 drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
1595 0);
1596
1597 psb_intel_sdvo->is_lvds = true;
1598 break;
1599 }
1600 }
1601
1602}
1603
1604static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
1605{
1606 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1607
1608 if (IS_TV(psb_intel_sdvo_connector))
1609 psb_intel_sdvo_get_tv_modes(connector);
1610 else if (IS_LVDS(psb_intel_sdvo_connector))
1611 psb_intel_sdvo_get_lvds_modes(connector);
1612 else
1613 psb_intel_sdvo_get_ddc_modes(connector);
1614
1615 return !list_empty(&connector->probed_modes);
1616}
1617
1618static void
1619psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1620{
1621 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1622 struct drm_device *dev = connector->dev;
1623
1624 if (psb_intel_sdvo_connector->left)
1625 drm_property_destroy(dev, psb_intel_sdvo_connector->left);
1626 if (psb_intel_sdvo_connector->right)
1627 drm_property_destroy(dev, psb_intel_sdvo_connector->right);
1628 if (psb_intel_sdvo_connector->top)
1629 drm_property_destroy(dev, psb_intel_sdvo_connector->top);
1630 if (psb_intel_sdvo_connector->bottom)
1631 drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
1632 if (psb_intel_sdvo_connector->hpos)
1633 drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
1634 if (psb_intel_sdvo_connector->vpos)
1635 drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
1636 if (psb_intel_sdvo_connector->saturation)
1637 drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
1638 if (psb_intel_sdvo_connector->contrast)
1639 drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
1640 if (psb_intel_sdvo_connector->hue)
1641 drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
1642 if (psb_intel_sdvo_connector->sharpness)
1643 drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
1644 if (psb_intel_sdvo_connector->flicker_filter)
1645 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
1646 if (psb_intel_sdvo_connector->flicker_filter_2d)
1647 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
1648 if (psb_intel_sdvo_connector->flicker_filter_adaptive)
1649 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
1650 if (psb_intel_sdvo_connector->tv_luma_filter)
1651 drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
1652 if (psb_intel_sdvo_connector->tv_chroma_filter)
1653 drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
1654 if (psb_intel_sdvo_connector->dot_crawl)
1655 drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
1656 if (psb_intel_sdvo_connector->brightness)
1657 drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
1658}
1659
1660static void psb_intel_sdvo_destroy(struct drm_connector *connector)
1661{
1662 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1663
1664 if (psb_intel_sdvo_connector->tv_format)
1665 drm_property_destroy(connector->dev,
1666 psb_intel_sdvo_connector->tv_format);
1667
1668 psb_intel_sdvo_destroy_enhance_property(connector);
1669 drm_sysfs_connector_remove(connector);
1670 drm_connector_cleanup(connector);
1671 kfree(connector);
1672}
1673
1674static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1675{
1676 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1677 struct edid *edid;
1678 bool has_audio = false;
1679
1680 if (!psb_intel_sdvo->is_hdmi)
1681 return false;
1682
1683 edid = psb_intel_sdvo_get_edid(connector);
1684 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1685 has_audio = drm_detect_monitor_audio(edid);
1686
1687 return has_audio;
1688}
1689
1690static int
1691psb_intel_sdvo_set_property(struct drm_connector *connector,
1692 struct drm_property *property,
1693 uint64_t val)
1694{
1695 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
1696 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1697 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1698 uint16_t temp_value;
1699 uint8_t cmd;
1700 int ret;
1701
1702 ret = drm_connector_property_set_value(connector, property, val);
1703 if (ret)
1704 return ret;
1705
1706 if (property == dev_priv->force_audio_property) {
1707 int i = val;
1708 bool has_audio;
1709
1710 if (i == psb_intel_sdvo_connector->force_audio)
1711 return 0;
1712
1713 psb_intel_sdvo_connector->force_audio = i;
1714
1715 if (i == 0)
1716 has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
1717 else
1718 has_audio = i > 0;
1719
1720 if (has_audio == psb_intel_sdvo->has_hdmi_audio)
1721 return 0;
1722
1723 psb_intel_sdvo->has_hdmi_audio = has_audio;
1724 goto done;
1725 }
1726
1727 if (property == dev_priv->broadcast_rgb_property) {
1728 if (val == !!psb_intel_sdvo->color_range)
1729 return 0;
1730
1731 psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
1732 goto done;
1733 }
1734
1735#define CHECK_PROPERTY(name, NAME) \
1736 if (psb_intel_sdvo_connector->name == property) { \
1737 if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
1738 if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
1739 cmd = SDVO_CMD_SET_##NAME; \
1740 psb_intel_sdvo_connector->cur_##name = temp_value; \
1741 goto set_value; \
1742 }
1743
1744 if (property == psb_intel_sdvo_connector->tv_format) {
1745 if (val >= TV_FORMAT_NUM)
1746 return -EINVAL;
1747
1748 if (psb_intel_sdvo->tv_format_index ==
1749 psb_intel_sdvo_connector->tv_format_supported[val])
1750 return 0;
1751
1752 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
1753 goto done;
1754 } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
1755 temp_value = val;
1756 if (psb_intel_sdvo_connector->left == property) {
1757 drm_connector_property_set_value(connector,
1758 psb_intel_sdvo_connector->right, val);
1759 if (psb_intel_sdvo_connector->left_margin == temp_value)
1760 return 0;
1761
1762 psb_intel_sdvo_connector->left_margin = temp_value;
1763 psb_intel_sdvo_connector->right_margin = temp_value;
1764 temp_value = psb_intel_sdvo_connector->max_hscan -
1765 psb_intel_sdvo_connector->left_margin;
1766 cmd = SDVO_CMD_SET_OVERSCAN_H;
1767 goto set_value;
1768 } else if (psb_intel_sdvo_connector->right == property) {
1769 drm_connector_property_set_value(connector,
1770 psb_intel_sdvo_connector->left, val);
1771 if (psb_intel_sdvo_connector->right_margin == temp_value)
1772 return 0;
1773
1774 psb_intel_sdvo_connector->left_margin = temp_value;
1775 psb_intel_sdvo_connector->right_margin = temp_value;
1776 temp_value = psb_intel_sdvo_connector->max_hscan -
1777 psb_intel_sdvo_connector->left_margin;
1778 cmd = SDVO_CMD_SET_OVERSCAN_H;
1779 goto set_value;
1780 } else if (psb_intel_sdvo_connector->top == property) {
1781 drm_connector_property_set_value(connector,
1782 psb_intel_sdvo_connector->bottom, val);
1783 if (psb_intel_sdvo_connector->top_margin == temp_value)
1784 return 0;
1785
1786 psb_intel_sdvo_connector->top_margin = temp_value;
1787 psb_intel_sdvo_connector->bottom_margin = temp_value;
1788 temp_value = psb_intel_sdvo_connector->max_vscan -
1789 psb_intel_sdvo_connector->top_margin;
1790 cmd = SDVO_CMD_SET_OVERSCAN_V;
1791 goto set_value;
1792 } else if (psb_intel_sdvo_connector->bottom == property) {
1793 drm_connector_property_set_value(connector,
1794 psb_intel_sdvo_connector->top, val);
1795 if (psb_intel_sdvo_connector->bottom_margin == temp_value)
1796 return 0;
1797
1798 psb_intel_sdvo_connector->top_margin = temp_value;
1799 psb_intel_sdvo_connector->bottom_margin = temp_value;
1800 temp_value = psb_intel_sdvo_connector->max_vscan -
1801 psb_intel_sdvo_connector->top_margin;
1802 cmd = SDVO_CMD_SET_OVERSCAN_V;
1803 goto set_value;
1804 }
1805 CHECK_PROPERTY(hpos, HPOS)
1806 CHECK_PROPERTY(vpos, VPOS)
1807 CHECK_PROPERTY(saturation, SATURATION)
1808 CHECK_PROPERTY(contrast, CONTRAST)
1809 CHECK_PROPERTY(hue, HUE)
1810 CHECK_PROPERTY(brightness, BRIGHTNESS)
1811 CHECK_PROPERTY(sharpness, SHARPNESS)
1812 CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
1813 CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
1814 CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
1815 CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
1816 CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
1817 CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
1818 }
1819
1820 return -EINVAL; /* unknown property */
1821
1822set_value:
1823 if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
1824 return -EIO;
1825
1826
1827done:
1828 if (psb_intel_sdvo->base.base.crtc) {
1829 struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
1830 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1831 crtc->y, crtc->fb);
1832 }
1833
1834 return 0;
1835#undef CHECK_PROPERTY
1836}
1837
1838static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1839 .dpms = psb_intel_sdvo_dpms,
1840 .mode_fixup = psb_intel_sdvo_mode_fixup,
1841 .prepare = psb_intel_encoder_prepare,
1842 .mode_set = psb_intel_sdvo_mode_set,
1843 .commit = psb_intel_encoder_commit,
1844};
1845
1846static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1847 .dpms = drm_helper_connector_dpms,
1848 .detect = psb_intel_sdvo_detect,
1849 .fill_modes = drm_helper_probe_single_connector_modes,
1850 .set_property = psb_intel_sdvo_set_property,
1851 .destroy = psb_intel_sdvo_destroy,
1852};
1853
1854static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
1855 .get_modes = psb_intel_sdvo_get_modes,
1856 .mode_valid = psb_intel_sdvo_mode_valid,
1857 .best_encoder = psb_intel_best_encoder,
1858};
1859
1860static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1861{
1862 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
1863
1864 if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
1865 drm_mode_destroy(encoder->dev,
1866 psb_intel_sdvo->sdvo_lvds_fixed_mode);
1867
1868 i2c_del_adapter(&psb_intel_sdvo->ddc);
1869 psb_intel_encoder_destroy(encoder);
1870}
1871
1872static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
1873 .destroy = psb_intel_sdvo_enc_destroy,
1874};
1875
1876static void
1877psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
1878{
1879 /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
1880 * We need to figure out if this is true for all available poulsbo
1881 * hardware, or if we need to fiddle with the guessing code above.
1882 * The problem might go away if we can parse sdvo mappings from bios */
1883 sdvo->ddc_bus = 2;
1884
1885#if 0
1886 uint16_t mask = 0;
1887 unsigned int num_bits;
1888
1889 /* Make a mask of outputs less than or equal to our own priority in the
1890 * list.
1891 */
1892 switch (sdvo->controlled_output) {
1893 case SDVO_OUTPUT_LVDS1:
1894 mask |= SDVO_OUTPUT_LVDS1;
1895 case SDVO_OUTPUT_LVDS0:
1896 mask |= SDVO_OUTPUT_LVDS0;
1897 case SDVO_OUTPUT_TMDS1:
1898 mask |= SDVO_OUTPUT_TMDS1;
1899 case SDVO_OUTPUT_TMDS0:
1900 mask |= SDVO_OUTPUT_TMDS0;
1901 case SDVO_OUTPUT_RGB1:
1902 mask |= SDVO_OUTPUT_RGB1;
1903 case SDVO_OUTPUT_RGB0:
1904 mask |= SDVO_OUTPUT_RGB0;
1905 break;
1906 }
1907
1908 /* Count bits to find what number we are in the priority list. */
1909 mask &= sdvo->caps.output_flags;
1910 num_bits = hweight16(mask);
1911 /* If more than 3 outputs, default to DDC bus 3 for now. */
1912 if (num_bits > 3)
1913 num_bits = 3;
1914
1915 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1916 sdvo->ddc_bus = 1 << num_bits;
1917#endif
1918}
1919
1920/**
1921 * Choose the appropriate DDC bus for control bus switch command for this
1922 * SDVO output based on the controlled output.
1923 *
1924 * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
1925 * outputs, then LVDS outputs.
1926 */
1927static void
1928psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
1929 struct psb_intel_sdvo *sdvo, u32 reg)
1930{
1931 struct sdvo_device_mapping *mapping;
1932
1933 if (IS_SDVOB(reg))
1934 mapping = &(dev_priv->sdvo_mappings[0]);
1935 else
1936 mapping = &(dev_priv->sdvo_mappings[1]);
1937
1938 if (mapping->initialized)
1939 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
1940 else
1941 psb_intel_sdvo_guess_ddc_bus(sdvo);
1942}
1943
1944static void
1945psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
1946 struct psb_intel_sdvo *sdvo, u32 reg)
1947{
1948 struct sdvo_device_mapping *mapping;
1949 u8 pin, speed;
1950
1951 if (IS_SDVOB(reg))
1952 mapping = &dev_priv->sdvo_mappings[0];
1953 else
1954 mapping = &dev_priv->sdvo_mappings[1];
1955
1956 pin = GMBUS_PORT_DPB;
1957 speed = GMBUS_RATE_1MHZ >> 8;
1958 if (mapping->initialized) {
1959 pin = mapping->i2c_pin;
1960 speed = mapping->i2c_speed;
1961 }
1962
1963 if (pin < GMBUS_NUM_PORTS) {
1964 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1965 gma_intel_gmbus_set_speed(sdvo->i2c, speed);
1966 gma_intel_gmbus_force_bit(sdvo->i2c, true);
1967 } else
1968 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
1969}
1970
1971static bool
1972psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
1973{
1974 return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
1975}
1976
1977static u8
1978psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
1979{
1980 struct drm_psb_private *dev_priv = dev->dev_private;
1981 struct sdvo_device_mapping *my_mapping, *other_mapping;
1982
1983 if (IS_SDVOB(sdvo_reg)) {
1984 my_mapping = &dev_priv->sdvo_mappings[0];
1985 other_mapping = &dev_priv->sdvo_mappings[1];
1986 } else {
1987 my_mapping = &dev_priv->sdvo_mappings[1];
1988 other_mapping = &dev_priv->sdvo_mappings[0];
1989 }
1990
1991 /* If the BIOS described our SDVO device, take advantage of it. */
1992 if (my_mapping->slave_addr)
1993 return my_mapping->slave_addr;
1994
1995 /* If the BIOS only described a different SDVO device, use the
1996 * address that it isn't using.
1997 */
1998 if (other_mapping->slave_addr) {
1999 if (other_mapping->slave_addr == 0x70)
2000 return 0x72;
2001 else
2002 return 0x70;
2003 }
2004
2005 /* No SDVO device info is found for another DVO port,
2006 * so use mapping assumption we had before BIOS parsing.
2007 */
2008 if (IS_SDVOB(sdvo_reg))
2009 return 0x70;
2010 else
2011 return 0x72;
2012}
2013
2014static void
2015psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2016 struct psb_intel_sdvo *encoder)
2017{
2018 drm_connector_init(encoder->base.base.dev,
2019 &connector->base.base,
2020 &psb_intel_sdvo_connector_funcs,
2021 connector->base.base.connector_type);
2022
2023 drm_connector_helper_add(&connector->base.base,
2024 &psb_intel_sdvo_connector_helper_funcs);
2025
2026 connector->base.base.interlace_allowed = 0;
2027 connector->base.base.doublescan_allowed = 0;
2028 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2029
2030 psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
2031 drm_sysfs_connector_add(&connector->base.base);
2032}
2033
2034static void
2035psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
2036{
2037 /* FIXME: We don't support HDMI at the moment
2038 struct drm_device *dev = connector->base.base.dev;
2039
2040 intel_attach_force_audio_property(&connector->base.base);
2041 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
2042 intel_attach_broadcast_rgb_property(&connector->base.base);
2043 */
2044}
2045
2046static bool
2047psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2048{
2049 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2050 struct drm_connector *connector;
2051 struct psb_intel_connector *intel_connector;
2052 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2053
2054 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
2055 if (!psb_intel_sdvo_connector)
2056 return false;
2057
2058 if (device == 0) {
2059 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
2060 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
2061 } else if (device == 1) {
2062 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
2063 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
2064 }
2065
2066 intel_connector = &psb_intel_sdvo_connector->base;
2067 connector = &intel_connector->base;
2068 // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2069 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2070 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2071
2072 if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
2073 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2074 psb_intel_sdvo->is_hdmi = true;
2075 }
2076 psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2077 (1 << INTEL_ANALOG_CLONE_BIT));
2078
2079 psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
2080 if (psb_intel_sdvo->is_hdmi)
2081 psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
2082
2083 return true;
2084}
2085
2086static bool
2087psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
2088{
2089 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2090 struct drm_connector *connector;
2091 struct psb_intel_connector *intel_connector;
2092 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2093
2094 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
2095 if (!psb_intel_sdvo_connector)
2096 return false;
2097
2098 intel_connector = &psb_intel_sdvo_connector->base;
2099 connector = &intel_connector->base;
2100 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2101 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2102
2103 psb_intel_sdvo->controlled_output |= type;
2104 psb_intel_sdvo_connector->output_flag = type;
2105
2106 psb_intel_sdvo->is_tv = true;
2107 psb_intel_sdvo->base.needs_tv_clock = true;
2108 psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2109
2110 psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
2111
2112 if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
2113 goto err;
2114
2115 if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
2116 goto err;
2117
2118 return true;
2119
2120err:
2121 psb_intel_sdvo_destroy(connector);
2122 return false;
2123}
2124
2125static bool
2126psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2127{
2128 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2129 struct drm_connector *connector;
2130 struct psb_intel_connector *intel_connector;
2131 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2132
2133 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
2134 if (!psb_intel_sdvo_connector)
2135 return false;
2136
2137 intel_connector = &psb_intel_sdvo_connector->base;
2138 connector = &intel_connector->base;
2139 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2140 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2141 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2142
2143 if (device == 0) {
2144 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
2145 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2146 } else if (device == 1) {
2147 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
2148 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2149 }
2150
2151 psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2152 (1 << INTEL_ANALOG_CLONE_BIT));
2153
2154 psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
2155 psb_intel_sdvo);
2156 return true;
2157}
2158
2159static bool
2160psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2161{
2162 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2163 struct drm_connector *connector;
2164 struct psb_intel_connector *intel_connector;
2165 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2166
2167 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
2168 if (!psb_intel_sdvo_connector)
2169 return false;
2170
2171 intel_connector = &psb_intel_sdvo_connector->base;
2172 connector = &intel_connector->base;
2173 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2174 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2175
2176 if (device == 0) {
2177 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
2178 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2179 } else if (device == 1) {
2180 psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
2181 psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2182 }
2183
2184 psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
2185 (1 << INTEL_SDVO_LVDS_CLONE_BIT));
2186
2187 psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
2188 if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
2189 goto err;
2190
2191 return true;
2192
2193err:
2194 psb_intel_sdvo_destroy(connector);
2195 return false;
2196}
2197
2198static bool
2199psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
2200{
2201 psb_intel_sdvo->is_tv = false;
2202 psb_intel_sdvo->base.needs_tv_clock = false;
2203 psb_intel_sdvo->is_lvds = false;
2204
2205 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2206
2207 if (flags & SDVO_OUTPUT_TMDS0)
2208 if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
2209 return false;
2210
2211 if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
2212 if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
2213 return false;
2214
2215 /* TV has no XXX1 function block */
2216 if (flags & SDVO_OUTPUT_SVID0)
2217 if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
2218 return false;
2219
2220 if (flags & SDVO_OUTPUT_CVBS0)
2221 if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
2222 return false;
2223
2224 if (flags & SDVO_OUTPUT_RGB0)
2225 if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
2226 return false;
2227
2228 if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
2229 if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
2230 return false;
2231
2232 if (flags & SDVO_OUTPUT_LVDS0)
2233 if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
2234 return false;
2235
2236 if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
2237 if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
2238 return false;
2239
2240 if ((flags & SDVO_OUTPUT_MASK) == 0) {
2241 unsigned char bytes[2];
2242
2243 psb_intel_sdvo->controlled_output = 0;
2244 memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
2245 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
2246 SDVO_NAME(psb_intel_sdvo),
2247 bytes[0], bytes[1]);
2248 return false;
2249 }
2250 psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
2251
2252 return true;
2253}
2254
2255static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
2256 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
2257 int type)
2258{
2259 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
2260 struct psb_intel_sdvo_tv_format format;
2261 uint32_t format_map, i;
2262
2263 if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
2264 return false;
2265
2266 BUILD_BUG_ON(sizeof(format) != 6);
2267 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
2268 SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
2269 &format, sizeof(format)))
2270 return false;
2271
2272 memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
2273
2274 if (format_map == 0)
2275 return false;
2276
2277 psb_intel_sdvo_connector->format_supported_num = 0;
2278 for (i = 0 ; i < TV_FORMAT_NUM; i++)
2279 if (format_map & (1 << i))
2280 psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
2281
2282
2283 psb_intel_sdvo_connector->tv_format =
2284 drm_property_create(dev, DRM_MODE_PROP_ENUM,
2285 "mode", psb_intel_sdvo_connector->format_supported_num);
2286 if (!psb_intel_sdvo_connector->tv_format)
2287 return false;
2288
2289 for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
2290 drm_property_add_enum(
2291 psb_intel_sdvo_connector->tv_format, i,
2292 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
2293
2294 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
2295 drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
2296 psb_intel_sdvo_connector->tv_format, 0);
2297 return true;
2298
2299}
2300
2301#define ENHANCEMENT(name, NAME) do { \
2302 if (enhancements.name) { \
2303 if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
2304 !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
2305 return false; \
2306 psb_intel_sdvo_connector->max_##name = data_value[0]; \
2307 psb_intel_sdvo_connector->cur_##name = response; \
2308 psb_intel_sdvo_connector->name = \
2309 drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
2310 if (!psb_intel_sdvo_connector->name) return false; \
2311 psb_intel_sdvo_connector->name->values[0] = 0; \
2312 psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
2313 drm_connector_attach_property(connector, \
2314 psb_intel_sdvo_connector->name, \
2315 psb_intel_sdvo_connector->cur_##name); \
2316 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
2317 data_value[0], data_value[1], response); \
2318 } \
2319} while(0)
2320
2321static bool
2322psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2323 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
2324 struct psb_intel_sdvo_enhancements_reply enhancements)
2325{
2326 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
2327 struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
2328 uint16_t response, data_value[2];
2329
2330 /* when horizontal overscan is supported, Add the left/right property */
2331 if (enhancements.overscan_h) {
2332 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
2333 SDVO_CMD_GET_MAX_OVERSCAN_H,
2334 &data_value, 4))
2335 return false;
2336
2337 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
2338 SDVO_CMD_GET_OVERSCAN_H,
2339 &response, 2))
2340 return false;
2341
2342 psb_intel_sdvo_connector->max_hscan = data_value[0];
2343 psb_intel_sdvo_connector->left_margin = data_value[0] - response;
2344 psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
2345 psb_intel_sdvo_connector->left =
2346 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2347 "left_margin", 2);
2348 if (!psb_intel_sdvo_connector->left)
2349 return false;
2350
2351 psb_intel_sdvo_connector->left->values[0] = 0;
2352 psb_intel_sdvo_connector->left->values[1] = data_value[0];
2353 drm_connector_attach_property(connector,
2354 psb_intel_sdvo_connector->left,
2355 psb_intel_sdvo_connector->left_margin);
2356
2357 psb_intel_sdvo_connector->right =
2358 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2359 "right_margin", 2);
2360 if (!psb_intel_sdvo_connector->right)
2361 return false;
2362
2363 psb_intel_sdvo_connector->right->values[0] = 0;
2364 psb_intel_sdvo_connector->right->values[1] = data_value[0];
2365 drm_connector_attach_property(connector,
2366 psb_intel_sdvo_connector->right,
2367 psb_intel_sdvo_connector->right_margin);
2368 DRM_DEBUG_KMS("h_overscan: max %d, "
2369 "default %d, current %d\n",
2370 data_value[0], data_value[1], response);
2371 }
2372
2373 if (enhancements.overscan_v) {
2374 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
2375 SDVO_CMD_GET_MAX_OVERSCAN_V,
2376 &data_value, 4))
2377 return false;
2378
2379 if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
2380 SDVO_CMD_GET_OVERSCAN_V,
2381 &response, 2))
2382 return false;
2383
2384 psb_intel_sdvo_connector->max_vscan = data_value[0];
2385 psb_intel_sdvo_connector->top_margin = data_value[0] - response;
2386 psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
2387 psb_intel_sdvo_connector->top =
2388 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2389 "top_margin", 2);
2390 if (!psb_intel_sdvo_connector->top)
2391 return false;
2392
2393 psb_intel_sdvo_connector->top->values[0] = 0;
2394 psb_intel_sdvo_connector->top->values[1] = data_value[0];
2395 drm_connector_attach_property(connector,
2396 psb_intel_sdvo_connector->top,
2397 psb_intel_sdvo_connector->top_margin);
2398
2399 psb_intel_sdvo_connector->bottom =
2400 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2401 "bottom_margin", 2);
2402 if (!psb_intel_sdvo_connector->bottom)
2403 return false;
2404
2405 psb_intel_sdvo_connector->bottom->values[0] = 0;
2406 psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
2407 drm_connector_attach_property(connector,
2408 psb_intel_sdvo_connector->bottom,
2409 psb_intel_sdvo_connector->bottom_margin);
2410 DRM_DEBUG_KMS("v_overscan: max %d, "
2411 "default %d, current %d\n",
2412 data_value[0], data_value[1], response);
2413 }
2414
2415 ENHANCEMENT(hpos, HPOS);
2416 ENHANCEMENT(vpos, VPOS);
2417 ENHANCEMENT(saturation, SATURATION);
2418 ENHANCEMENT(contrast, CONTRAST);
2419 ENHANCEMENT(hue, HUE);
2420 ENHANCEMENT(sharpness, SHARPNESS);
2421 ENHANCEMENT(brightness, BRIGHTNESS);
2422 ENHANCEMENT(flicker_filter, FLICKER_FILTER);
2423 ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
2424 ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
2425 ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
2426 ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
2427
2428 if (enhancements.dot_crawl) {
2429 if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
2430 return false;
2431
2432 psb_intel_sdvo_connector->max_dot_crawl = 1;
2433 psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
2434 psb_intel_sdvo_connector->dot_crawl =
2435 drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
2436 if (!psb_intel_sdvo_connector->dot_crawl)
2437 return false;
2438
2439 psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
2440 psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
2441 drm_connector_attach_property(connector,
2442 psb_intel_sdvo_connector->dot_crawl,
2443 psb_intel_sdvo_connector->cur_dot_crawl);
2444 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
2445 }
2446
2447 return true;
2448}
2449
2450static bool
2451psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
2452 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
2453 struct psb_intel_sdvo_enhancements_reply enhancements)
2454{
2455 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
2456 struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
2457 uint16_t response, data_value[2];
2458
2459 ENHANCEMENT(brightness, BRIGHTNESS);
2460
2461 return true;
2462}
2463#undef ENHANCEMENT
2464
2465static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
2466 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
2467{
2468 union {
2469 struct psb_intel_sdvo_enhancements_reply reply;
2470 uint16_t response;
2471 } enhancements;
2472
2473 BUILD_BUG_ON(sizeof(enhancements) != 2);
2474
2475 enhancements.response = 0;
2476 psb_intel_sdvo_get_value(psb_intel_sdvo,
2477 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2478 &enhancements, sizeof(enhancements));
2479 if (enhancements.response == 0) {
2480 DRM_DEBUG_KMS("No enhancement is supported\n");
2481 return true;
2482 }
2483
2484 if (IS_TV(psb_intel_sdvo_connector))
2485 return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
2486 else if(IS_LVDS(psb_intel_sdvo_connector))
2487 return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
2488 else
2489 return true;
2490}
2491
2492static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
2493 struct i2c_msg *msgs,
2494 int num)
2495{
2496 struct psb_intel_sdvo *sdvo = adapter->algo_data;
2497
2498 if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
2499 return -EIO;
2500
2501 return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
2502}
2503
2504static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
2505{
2506 struct psb_intel_sdvo *sdvo = adapter->algo_data;
2507 return sdvo->i2c->algo->functionality(sdvo->i2c);
2508}
2509
2510static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
2511 .master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
2512 .functionality = psb_intel_sdvo_ddc_proxy_func
2513};
2514
2515static bool
2516psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
2517 struct drm_device *dev)
2518{
2519 sdvo->ddc.owner = THIS_MODULE;
2520 sdvo->ddc.class = I2C_CLASS_DDC;
2521 snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
2522 sdvo->ddc.dev.parent = &dev->pdev->dev;
2523 sdvo->ddc.algo_data = sdvo;
2524 sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
2525
2526 return i2c_add_adapter(&sdvo->ddc) == 0;
2527}
2528
2529bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2530{
2531 struct drm_psb_private *dev_priv = dev->dev_private;
2532 struct psb_intel_encoder *psb_intel_encoder;
2533 struct psb_intel_sdvo *psb_intel_sdvo;
2534 int i;
2535
2536 psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
2537 if (!psb_intel_sdvo)
2538 return false;
2539
2540 psb_intel_sdvo->sdvo_reg = sdvo_reg;
2541 psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2542 psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
2543 if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
2544 kfree(psb_intel_sdvo);
2545 return false;
2546 }
2547
2548 /* encoder type will be decided later */
2549 psb_intel_encoder = &psb_intel_sdvo->base;
2550 psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
2551 drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
2552
2553 /* Read the regs to test if we can talk to the device */
2554 for (i = 0; i < 0x40; i++) {
2555 u8 byte;
2556
2557 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
2558 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2559 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2560 goto err;
2561 }
2562 }
2563
2564 if (IS_SDVOB(sdvo_reg))
2565 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2566 else
2567 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2568
2569 drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
2570
2571 /* In default case sdvo lvds is false */
2572 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
2573 goto err;
2574
2575 if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
2576 psb_intel_sdvo->caps.output_flags) != true) {
2577 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2578 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2579 goto err;
2580 }
2581
2582 psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
2583
2584 /* Set the input timing to the screen. Assume always input 0. */
2585 if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
2586 goto err;
2587
2588 if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
2589 &psb_intel_sdvo->pixel_clock_min,
2590 &psb_intel_sdvo->pixel_clock_max))
2591 goto err;
2592
2593 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2594 "clock range %dMHz - %dMHz, "
2595 "input 1: %c, input 2: %c, "
2596 "output 1: %c, output 2: %c\n",
2597 SDVO_NAME(psb_intel_sdvo),
2598 psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
2599 psb_intel_sdvo->caps.device_rev_id,
2600 psb_intel_sdvo->pixel_clock_min / 1000,
2601 psb_intel_sdvo->pixel_clock_max / 1000,
2602 (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
2603 (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
2604 /* check currently supported outputs */
2605 psb_intel_sdvo->caps.output_flags &
2606 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
2607 psb_intel_sdvo->caps.output_flags &
2608 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2609 return true;
2610
2611err:
2612 drm_encoder_cleanup(&psb_intel_encoder->base);
2613 i2c_del_adapter(&psb_intel_sdvo->ddc);
2614 kfree(psb_intel_sdvo);
2615
2616 return false;
2617}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 00000000000..600e79744d6
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
1/*
2 * Copyright ? 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27/**
28 * @file SDVO command definitions and structures.
29 */
30
31#define SDVO_OUTPUT_FIRST (0)
32#define SDVO_OUTPUT_TMDS0 (1 << 0)
33#define SDVO_OUTPUT_RGB0 (1 << 1)
34#define SDVO_OUTPUT_CVBS0 (1 << 2)
35#define SDVO_OUTPUT_SVID0 (1 << 3)
36#define SDVO_OUTPUT_YPRPB0 (1 << 4)
37#define SDVO_OUTPUT_SCART0 (1 << 5)
38#define SDVO_OUTPUT_LVDS0 (1 << 6)
39#define SDVO_OUTPUT_TMDS1 (1 << 8)
40#define SDVO_OUTPUT_RGB1 (1 << 9)
41#define SDVO_OUTPUT_CVBS1 (1 << 10)
42#define SDVO_OUTPUT_SVID1 (1 << 11)
43#define SDVO_OUTPUT_YPRPB1 (1 << 12)
44#define SDVO_OUTPUT_SCART1 (1 << 13)
45#define SDVO_OUTPUT_LVDS1 (1 << 14)
46#define SDVO_OUTPUT_LAST (14)
47
48struct psb_intel_sdvo_caps {
49 u8 vendor_id;
50 u8 device_id;
51 u8 device_rev_id;
52 u8 sdvo_version_major;
53 u8 sdvo_version_minor;
54 unsigned int sdvo_inputs_mask:2;
55 unsigned int smooth_scaling:1;
56 unsigned int sharp_scaling:1;
57 unsigned int up_scaling:1;
58 unsigned int down_scaling:1;
59 unsigned int stall_support:1;
60 unsigned int pad:1;
61 u16 output_flags;
62} __attribute__((packed));
63
64/** This matches the EDID DTD structure, more or less */
65struct psb_intel_sdvo_dtd {
66 struct {
67 u16 clock; /**< pixel clock, in 10kHz units */
68 u8 h_active; /**< lower 8 bits (pixels) */
69 u8 h_blank; /**< lower 8 bits (pixels) */
70 u8 h_high; /**< upper 4 bits each h_active, h_blank */
71 u8 v_active; /**< lower 8 bits (lines) */
72 u8 v_blank; /**< lower 8 bits (lines) */
73 u8 v_high; /**< upper 4 bits each v_active, v_blank */
74 } part1;
75
76 struct {
77 u8 h_sync_off; /**< lower 8 bits, from hblank start */
78 u8 h_sync_width; /**< lower 8 bits (pixels) */
79 /** lower 4 bits each vsync offset, vsync width */
80 u8 v_sync_off_width;
81 /**
82 * 2 high bits of hsync offset, 2 high bits of hsync width,
83 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
84 */
85 u8 sync_off_width_high;
86 u8 dtd_flags;
87 u8 sdvo_flags;
88 /** bits 6-7 of vsync offset at bits 6-7 */
89 u8 v_sync_off_high;
90 u8 reserved;
91 } part2;
92} __attribute__((packed));
93
94struct psb_intel_sdvo_pixel_clock_range {
95 u16 min; /**< pixel clock, in 10kHz units */
96 u16 max; /**< pixel clock, in 10kHz units */
97} __attribute__((packed));
98
99struct psb_intel_sdvo_preferred_input_timing_args {
100 u16 clock;
101 u16 width;
102 u16 height;
103 u8 interlace:1;
104 u8 scaled:1;
105 u8 pad:6;
106} __attribute__((packed));
107
108/* I2C registers for SDVO */
109#define SDVO_I2C_ARG_0 0x07
110#define SDVO_I2C_ARG_1 0x06
111#define SDVO_I2C_ARG_2 0x05
112#define SDVO_I2C_ARG_3 0x04
113#define SDVO_I2C_ARG_4 0x03
114#define SDVO_I2C_ARG_5 0x02
115#define SDVO_I2C_ARG_6 0x01
116#define SDVO_I2C_ARG_7 0x00
117#define SDVO_I2C_OPCODE 0x08
118#define SDVO_I2C_CMD_STATUS 0x09
119#define SDVO_I2C_RETURN_0 0x0a
120#define SDVO_I2C_RETURN_1 0x0b
121#define SDVO_I2C_RETURN_2 0x0c
122#define SDVO_I2C_RETURN_3 0x0d
123#define SDVO_I2C_RETURN_4 0x0e
124#define SDVO_I2C_RETURN_5 0x0f
125#define SDVO_I2C_RETURN_6 0x10
126#define SDVO_I2C_RETURN_7 0x11
127#define SDVO_I2C_VENDOR_BEGIN 0x20
128
129/* Status results */
130#define SDVO_CMD_STATUS_POWER_ON 0x0
131#define SDVO_CMD_STATUS_SUCCESS 0x1
132#define SDVO_CMD_STATUS_NOTSUPP 0x2
133#define SDVO_CMD_STATUS_INVALID_ARG 0x3
134#define SDVO_CMD_STATUS_PENDING 0x4
135#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
136#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
137
138/* SDVO commands, argument/result registers */
139
140#define SDVO_CMD_RESET 0x01
141
142/** Returns a struct intel_sdvo_caps */
143#define SDVO_CMD_GET_DEVICE_CAPS 0x02
144
145#define SDVO_CMD_GET_FIRMWARE_REV 0x86
146# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
147# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
148# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
149
150/**
151 * Reports which inputs are trained (managed to sync).
152 *
153 * Devices must have trained within 2 vsyncs of a mode change.
154 */
155#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
156struct psb_intel_sdvo_get_trained_inputs_response {
157 unsigned int input0_trained:1;
158 unsigned int input1_trained:1;
159 unsigned int pad:6;
160} __attribute__((packed));
161
162/** Returns a struct intel_sdvo_output_flags of active outputs. */
163#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
164
165/**
166 * Sets the current set of active outputs.
167 *
168 * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
169 * on multi-output devices.
170 */
171#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
172
173/**
174 * Returns the current mapping of SDVO inputs to outputs on the device.
175 *
176 * Returns two struct intel_sdvo_output_flags structures.
177 */
178#define SDVO_CMD_GET_IN_OUT_MAP 0x06
179struct psb_intel_sdvo_in_out_map {
180 u16 in0, in1;
181};
182
183/**
184 * Sets the current mapping of SDVO inputs to outputs on the device.
185 *
186 * Takes two struct i380_sdvo_output_flags structures.
187 */
188#define SDVO_CMD_SET_IN_OUT_MAP 0x07
189
190/**
191 * Returns a struct intel_sdvo_output_flags of attached displays.
192 */
193#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
194
195/**
196 * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
197 */
198#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
199
200/**
201 * Takes a struct intel_sdvo_output_flags.
202 */
203#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
204
205/**
206 * Returns a struct intel_sdvo_output_flags of displays with hot plug
207 * interrupts enabled.
208 */
209#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
210
211#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
212struct intel_sdvo_get_interrupt_event_source_response {
213 u16 interrupt_status;
214 unsigned int ambient_light_interrupt:1;
215 unsigned int hdmi_audio_encrypt_change:1;
216 unsigned int pad:6;
217} __attribute__((packed));
218
219/**
220 * Selects which input is affected by future input commands.
221 *
222 * Commands affected include SET_INPUT_TIMINGS_PART[12],
223 * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
224 * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
225 */
226#define SDVO_CMD_SET_TARGET_INPUT 0x10
227struct psb_intel_sdvo_set_target_input_args {
228 unsigned int target_1:1;
229 unsigned int pad:7;
230} __attribute__((packed));
231
232/**
233 * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
234 * future output commands.
235 *
236 * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
237 * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
238 */
239#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
240
241#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
242#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
243#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
244#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
245#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
246#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
247#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
248#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
249/* Part 1 */
250# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
251# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
252# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
253# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
254# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
255# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
256# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
257# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
258/* Part 2 */
259# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
260# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
261# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
262# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
263# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
264# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
265# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
266# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
267# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
268# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
269# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
270# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
271# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
272# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
273# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
274# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
275# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
276# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
277
278/**
279 * Generates a DTD based on the given width, height, and flags.
280 *
281 * This will be supported by any device supporting scaling or interlaced
282 * modes.
283 */
284#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
285# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
286# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
287# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
288# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
289# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
290# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
291# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
292# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
293# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
294
295#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
296#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
297
298/** Returns a struct intel_sdvo_pixel_clock_range */
299#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
300/** Returns a struct intel_sdvo_pixel_clock_range */
301#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
302
303/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
304#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
305
306/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
307#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
308/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
309#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
310# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
311# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
312# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
313
314#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
315/** 6 bytes of bit flags for TV formats shared by all TV format functions */
316struct psb_intel_sdvo_tv_format {
317 unsigned int ntsc_m:1;
318 unsigned int ntsc_j:1;
319 unsigned int ntsc_443:1;
320 unsigned int pal_b:1;
321 unsigned int pal_d:1;
322 unsigned int pal_g:1;
323 unsigned int pal_h:1;
324 unsigned int pal_i:1;
325
326 unsigned int pal_m:1;
327 unsigned int pal_n:1;
328 unsigned int pal_nc:1;
329 unsigned int pal_60:1;
330 unsigned int secam_b:1;
331 unsigned int secam_d:1;
332 unsigned int secam_g:1;
333 unsigned int secam_k:1;
334
335 unsigned int secam_k1:1;
336 unsigned int secam_l:1;
337 unsigned int secam_60:1;
338 unsigned int hdtv_std_smpte_240m_1080i_59:1;
339 unsigned int hdtv_std_smpte_240m_1080i_60:1;
340 unsigned int hdtv_std_smpte_260m_1080i_59:1;
341 unsigned int hdtv_std_smpte_260m_1080i_60:1;
342 unsigned int hdtv_std_smpte_274m_1080i_50:1;
343
344 unsigned int hdtv_std_smpte_274m_1080i_59:1;
345 unsigned int hdtv_std_smpte_274m_1080i_60:1;
346 unsigned int hdtv_std_smpte_274m_1080p_23:1;
347 unsigned int hdtv_std_smpte_274m_1080p_24:1;
348 unsigned int hdtv_std_smpte_274m_1080p_25:1;
349 unsigned int hdtv_std_smpte_274m_1080p_29:1;
350 unsigned int hdtv_std_smpte_274m_1080p_30:1;
351 unsigned int hdtv_std_smpte_274m_1080p_50:1;
352
353 unsigned int hdtv_std_smpte_274m_1080p_59:1;
354 unsigned int hdtv_std_smpte_274m_1080p_60:1;
355 unsigned int hdtv_std_smpte_295m_1080i_50:1;
356 unsigned int hdtv_std_smpte_295m_1080p_50:1;
357 unsigned int hdtv_std_smpte_296m_720p_59:1;
358 unsigned int hdtv_std_smpte_296m_720p_60:1;
359 unsigned int hdtv_std_smpte_296m_720p_50:1;
360 unsigned int hdtv_std_smpte_293m_480p_59:1;
361
362 unsigned int hdtv_std_smpte_170m_480i_59:1;
363 unsigned int hdtv_std_iturbt601_576i_50:1;
364 unsigned int hdtv_std_iturbt601_576p_50:1;
365 unsigned int hdtv_std_eia_7702a_480i_60:1;
366 unsigned int hdtv_std_eia_7702a_480p_60:1;
367 unsigned int pad:3;
368} __attribute__((packed));
369
370#define SDVO_CMD_GET_TV_FORMAT 0x28
371
372#define SDVO_CMD_SET_TV_FORMAT 0x29
373
374/** Returns the resolutiosn that can be used with the given TV format */
375#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
376struct psb_intel_sdvo_sdtv_resolution_request {
377 unsigned int ntsc_m:1;
378 unsigned int ntsc_j:1;
379 unsigned int ntsc_443:1;
380 unsigned int pal_b:1;
381 unsigned int pal_d:1;
382 unsigned int pal_g:1;
383 unsigned int pal_h:1;
384 unsigned int pal_i:1;
385
386 unsigned int pal_m:1;
387 unsigned int pal_n:1;
388 unsigned int pal_nc:1;
389 unsigned int pal_60:1;
390 unsigned int secam_b:1;
391 unsigned int secam_d:1;
392 unsigned int secam_g:1;
393 unsigned int secam_k:1;
394
395 unsigned int secam_k1:1;
396 unsigned int secam_l:1;
397 unsigned int secam_60:1;
398 unsigned int pad:5;
399} __attribute__((packed));
400
401struct psb_intel_sdvo_sdtv_resolution_reply {
402 unsigned int res_320x200:1;
403 unsigned int res_320x240:1;
404 unsigned int res_400x300:1;
405 unsigned int res_640x350:1;
406 unsigned int res_640x400:1;
407 unsigned int res_640x480:1;
408 unsigned int res_704x480:1;
409 unsigned int res_704x576:1;
410
411 unsigned int res_720x350:1;
412 unsigned int res_720x400:1;
413 unsigned int res_720x480:1;
414 unsigned int res_720x540:1;
415 unsigned int res_720x576:1;
416 unsigned int res_768x576:1;
417 unsigned int res_800x600:1;
418 unsigned int res_832x624:1;
419
420 unsigned int res_920x766:1;
421 unsigned int res_1024x768:1;
422 unsigned int res_1280x1024:1;
423 unsigned int pad:5;
424} __attribute__((packed));
425
426/* Get supported resolution with squire pixel aspect ratio that can be
427 scaled for the requested HDTV format */
428#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
429
430struct psb_intel_sdvo_hdtv_resolution_request {
431 unsigned int hdtv_std_smpte_240m_1080i_59:1;
432 unsigned int hdtv_std_smpte_240m_1080i_60:1;
433 unsigned int hdtv_std_smpte_260m_1080i_59:1;
434 unsigned int hdtv_std_smpte_260m_1080i_60:1;
435 unsigned int hdtv_std_smpte_274m_1080i_50:1;
436 unsigned int hdtv_std_smpte_274m_1080i_59:1;
437 unsigned int hdtv_std_smpte_274m_1080i_60:1;
438 unsigned int hdtv_std_smpte_274m_1080p_23:1;
439
440 unsigned int hdtv_std_smpte_274m_1080p_24:1;
441 unsigned int hdtv_std_smpte_274m_1080p_25:1;
442 unsigned int hdtv_std_smpte_274m_1080p_29:1;
443 unsigned int hdtv_std_smpte_274m_1080p_30:1;
444 unsigned int hdtv_std_smpte_274m_1080p_50:1;
445 unsigned int hdtv_std_smpte_274m_1080p_59:1;
446 unsigned int hdtv_std_smpte_274m_1080p_60:1;
447 unsigned int hdtv_std_smpte_295m_1080i_50:1;
448
449 unsigned int hdtv_std_smpte_295m_1080p_50:1;
450 unsigned int hdtv_std_smpte_296m_720p_59:1;
451 unsigned int hdtv_std_smpte_296m_720p_60:1;
452 unsigned int hdtv_std_smpte_296m_720p_50:1;
453 unsigned int hdtv_std_smpte_293m_480p_59:1;
454 unsigned int hdtv_std_smpte_170m_480i_59:1;
455 unsigned int hdtv_std_iturbt601_576i_50:1;
456 unsigned int hdtv_std_iturbt601_576p_50:1;
457
458 unsigned int hdtv_std_eia_7702a_480i_60:1;
459 unsigned int hdtv_std_eia_7702a_480p_60:1;
460 unsigned int pad:6;
461} __attribute__((packed));
462
463struct psb_intel_sdvo_hdtv_resolution_reply {
464 unsigned int res_640x480:1;
465 unsigned int res_800x600:1;
466 unsigned int res_1024x768:1;
467 unsigned int res_1280x960:1;
468 unsigned int res_1400x1050:1;
469 unsigned int res_1600x1200:1;
470 unsigned int res_1920x1440:1;
471 unsigned int res_2048x1536:1;
472
473 unsigned int res_2560x1920:1;
474 unsigned int res_3200x2400:1;
475 unsigned int res_3840x2880:1;
476 unsigned int pad1:5;
477
478 unsigned int res_848x480:1;
479 unsigned int res_1064x600:1;
480 unsigned int res_1280x720:1;
481 unsigned int res_1360x768:1;
482 unsigned int res_1704x960:1;
483 unsigned int res_1864x1050:1;
484 unsigned int res_1920x1080:1;
485 unsigned int res_2128x1200:1;
486
487 unsigned int res_2560x1400:1;
488 unsigned int res_2728x1536:1;
489 unsigned int res_3408x1920:1;
490 unsigned int res_4264x2400:1;
491 unsigned int res_5120x2880:1;
492 unsigned int pad2:3;
493
494 unsigned int res_768x480:1;
495 unsigned int res_960x600:1;
496 unsigned int res_1152x720:1;
497 unsigned int res_1124x768:1;
498 unsigned int res_1536x960:1;
499 unsigned int res_1680x1050:1;
500 unsigned int res_1728x1080:1;
501 unsigned int res_1920x1200:1;
502
503 unsigned int res_2304x1440:1;
504 unsigned int res_2456x1536:1;
505 unsigned int res_3072x1920:1;
506 unsigned int res_3840x2400:1;
507 unsigned int res_4608x2880:1;
508 unsigned int pad3:3;
509
510 unsigned int res_1280x1024:1;
511 unsigned int pad4:7;
512
513 unsigned int res_1280x768:1;
514 unsigned int pad5:7;
515} __attribute__((packed));
516
517/* Get supported power state returns info for encoder and monitor, rely on
518 last SetTargetInput and SetTargetOutput calls */
519#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
520/* Get power state returns info for encoder and monitor, rely on last
521 SetTargetInput and SetTargetOutput calls */
522#define SDVO_CMD_GET_POWER_STATE 0x2b
523#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
524#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
525# define SDVO_ENCODER_STATE_ON (1 << 0)
526# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
527# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
528# define SDVO_ENCODER_STATE_OFF (1 << 3)
529# define SDVO_MONITOR_STATE_ON (1 << 4)
530# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
531# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
532# define SDVO_MONITOR_STATE_OFF (1 << 7)
533
534#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
535#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
536#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
537/**
538 * The panel power sequencing parameters are in units of milliseconds.
539 * The high fields are bits 8:9 of the 10-bit values.
540 */
541struct psb_sdvo_panel_power_sequencing {
542 u8 t0;
543 u8 t1;
544 u8 t2;
545 u8 t3;
546 u8 t4;
547
548 unsigned int t0_high:2;
549 unsigned int t1_high:2;
550 unsigned int t2_high:2;
551 unsigned int t3_high:2;
552
553 unsigned int t4_high:2;
554 unsigned int pad:6;
555} __attribute__((packed));
556
557#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
558struct sdvo_max_backlight_reply {
559 u8 max_value;
560 u8 default_value;
561} __attribute__((packed));
562
563#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
564#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
565
566#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
567struct sdvo_get_ambient_light_reply {
568 u16 trip_low;
569 u16 trip_high;
570 u16 value;
571} __attribute__((packed));
572#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
573struct sdvo_set_ambient_light_reply {
574 u16 trip_low;
575 u16 trip_high;
576 unsigned int enable:1;
577 unsigned int pad:7;
578} __attribute__((packed));
579
580/* Set display power state */
581#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
582# define SDVO_DISPLAY_STATE_ON (1 << 0)
583# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
584# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
585# define SDVO_DISPLAY_STATE_OFF (1 << 3)
586
587#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
588struct psb_intel_sdvo_enhancements_reply {
589 unsigned int flicker_filter:1;
590 unsigned int flicker_filter_adaptive:1;
591 unsigned int flicker_filter_2d:1;
592 unsigned int saturation:1;
593 unsigned int hue:1;
594 unsigned int brightness:1;
595 unsigned int contrast:1;
596 unsigned int overscan_h:1;
597
598 unsigned int overscan_v:1;
599 unsigned int hpos:1;
600 unsigned int vpos:1;
601 unsigned int sharpness:1;
602 unsigned int dot_crawl:1;
603 unsigned int dither:1;
604 unsigned int tv_chroma_filter:1;
605 unsigned int tv_luma_filter:1;
606} __attribute__((packed));
607
608/* Picture enhancement limits below are dependent on the current TV format,
609 * and thus need to be queried and set after it.
610 */
611#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
612#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
613#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
614#define SDVO_CMD_GET_MAX_SATURATION 0x55
615#define SDVO_CMD_GET_MAX_HUE 0x58
616#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
617#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
618#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
619#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
620#define SDVO_CMD_GET_MAX_HPOS 0x67
621#define SDVO_CMD_GET_MAX_VPOS 0x6a
622#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
623#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
624#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
625struct psb_intel_sdvo_enhancement_limits_reply {
626 u16 max_value;
627 u16 default_value;
628} __attribute__((packed));
629
630#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
631#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
632# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
633# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
634# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
635# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
636# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
637# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
638
639#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
640#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
641#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
642#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
643#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
644#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
645#define SDVO_CMD_GET_SATURATION 0x56
646#define SDVO_CMD_SET_SATURATION 0x57
647#define SDVO_CMD_GET_HUE 0x59
648#define SDVO_CMD_SET_HUE 0x5a
649#define SDVO_CMD_GET_BRIGHTNESS 0x5c
650#define SDVO_CMD_SET_BRIGHTNESS 0x5d
651#define SDVO_CMD_GET_CONTRAST 0x5f
652#define SDVO_CMD_SET_CONTRAST 0x60
653#define SDVO_CMD_GET_OVERSCAN_H 0x62
654#define SDVO_CMD_SET_OVERSCAN_H 0x63
655#define SDVO_CMD_GET_OVERSCAN_V 0x65
656#define SDVO_CMD_SET_OVERSCAN_V 0x66
657#define SDVO_CMD_GET_HPOS 0x68
658#define SDVO_CMD_SET_HPOS 0x69
659#define SDVO_CMD_GET_VPOS 0x6b
660#define SDVO_CMD_SET_VPOS 0x6c
661#define SDVO_CMD_GET_SHARPNESS 0x6e
662#define SDVO_CMD_SET_SHARPNESS 0x6f
663#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
664#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
665#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
666#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
667struct psb_intel_sdvo_enhancements_arg {
668 u16 value;
669}__attribute__((packed));
670
671#define SDVO_CMD_GET_DOT_CRAWL 0x70
672#define SDVO_CMD_SET_DOT_CRAWL 0x71
673# define SDVO_DOT_CRAWL_ON (1 << 0)
674# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
675
676#define SDVO_CMD_GET_DITHER 0x72
677#define SDVO_CMD_SET_DITHER 0x73
678# define SDVO_DITHER_ON (1 << 0)
679# define SDVO_DITHER_DEFAULT_ON (1 << 1)
680
681#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
682# define SDVO_CONTROL_BUS_PROM (1 << 0)
683# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
684# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
685# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
686
687/* HDMI op codes */
688#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
689#define SDVO_CMD_GET_ENCODE 0x9e
690#define SDVO_CMD_SET_ENCODE 0x9f
691 #define SDVO_ENCODE_DVI 0x0
692 #define SDVO_ENCODE_HDMI 0x1
693#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
694#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
695#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
696#define SDVO_CMD_SET_COLORIMETRY 0x8e
697 #define SDVO_COLORIMETRY_RGB256 0x0
698 #define SDVO_COLORIMETRY_RGB220 0x1
699 #define SDVO_COLORIMETRY_YCrCb422 0x3
700 #define SDVO_COLORIMETRY_YCrCb444 0x4
701#define SDVO_CMD_GET_COLORIMETRY 0x8f
702#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
703#define SDVO_CMD_SET_AUDIO_STAT 0x91
704#define SDVO_CMD_GET_AUDIO_STAT 0x92
705#define SDVO_CMD_SET_HBUF_INDEX 0x93
706#define SDVO_CMD_GET_HBUF_INDEX 0x94
707#define SDVO_CMD_GET_HBUF_INFO 0x95
708#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
709#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
710#define SDVO_CMD_SET_HBUF_DATA 0x98
711#define SDVO_CMD_GET_HBUF_DATA 0x99
712#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
713#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
714 #define SDVO_HBUF_TX_DISABLED (0 << 6)
715 #define SDVO_HBUF_TX_ONCE (2 << 6)
716 #define SDVO_HBUF_TX_VSYNC (3 << 6)
717#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
718#define SDVO_NEED_TO_STALL (1 << 7)
719
720struct psb_intel_sdvo_encode {
721 u8 dvi_rev;
722 u8 hdmi_rev;
723} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644
index 00000000000..7be802baceb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -0,0 +1,564 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
20 *
21 **************************************************************************/
22/*
23 */
24
25#include <drm/drmP.h>
26#include "psb_drv.h"
27#include "psb_reg.h"
28#include "psb_intel_reg.h"
29#include "power.h"
30
31/*
32 * inline functions
33 */
34
35static inline u32
36psb_pipestat(int pipe)
37{
38 if (pipe == 0)
39 return PIPEASTAT;
40 if (pipe == 1)
41 return PIPEBSTAT;
42 if (pipe == 2)
43 return PIPECSTAT;
44 BUG();
45}
46
47static inline u32
48mid_pipe_event(int pipe)
49{
50 if (pipe == 0)
51 return _PSB_PIPEA_EVENT_FLAG;
52 if (pipe == 1)
53 return _MDFLD_PIPEB_EVENT_FLAG;
54 if (pipe == 2)
55 return _MDFLD_PIPEC_EVENT_FLAG;
56 BUG();
57}
58
59static inline u32
60mid_pipe_vsync(int pipe)
61{
62 if (pipe == 0)
63 return _PSB_VSYNC_PIPEA_FLAG;
64 if (pipe == 1)
65 return _PSB_VSYNC_PIPEB_FLAG;
66 if (pipe == 2)
67 return _MDFLD_PIPEC_VBLANK_FLAG;
68 BUG();
69}
70
71static inline u32
72mid_pipeconf(int pipe)
73{
74 if (pipe == 0)
75 return PIPEACONF;
76 if (pipe == 1)
77 return PIPEBCONF;
78 if (pipe == 2)
79 return PIPECCONF;
80 BUG();
81}
82
83void
84psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
85{
86 if ((dev_priv->pipestat[pipe] & mask) != mask) {
87 u32 reg = psb_pipestat(pipe);
88 dev_priv->pipestat[pipe] |= mask;
89 /* Enable the interrupt, clear any pending status */
90 if (gma_power_begin(dev_priv->dev, false)) {
91 u32 writeVal = PSB_RVDC32(reg);
92 writeVal |= (mask | (mask >> 16));
93 PSB_WVDC32(writeVal, reg);
94 (void) PSB_RVDC32(reg);
95 gma_power_end(dev_priv->dev);
96 }
97 }
98}
99
100void
101psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
102{
103 if ((dev_priv->pipestat[pipe] & mask) != 0) {
104 u32 reg = psb_pipestat(pipe);
105 dev_priv->pipestat[pipe] &= ~mask;
106 if (gma_power_begin(dev_priv->dev, false)) {
107 u32 writeVal = PSB_RVDC32(reg);
108 writeVal &= ~mask;
109 PSB_WVDC32(writeVal, reg);
110 (void) PSB_RVDC32(reg);
111 gma_power_end(dev_priv->dev);
112 }
113 }
114}
115
116void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
117{
118 if (gma_power_begin(dev_priv->dev, false)) {
119 u32 pipe_event = mid_pipe_event(pipe);
120 dev_priv->vdc_irq_mask |= pipe_event;
121 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
122 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
123 gma_power_end(dev_priv->dev);
124 }
125}
126
127void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
128{
129 if (dev_priv->pipestat[pipe] == 0) {
130 if (gma_power_begin(dev_priv->dev, false)) {
131 u32 pipe_event = mid_pipe_event(pipe);
132 dev_priv->vdc_irq_mask &= ~pipe_event;
133 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
134 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
135 gma_power_end(dev_priv->dev);
136 }
137 }
138}
139
140/**
141 * Display controller interrupt handler for pipe event.
142 *
143 */
144static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
145{
146 struct drm_psb_private *dev_priv =
147 (struct drm_psb_private *) dev->dev_private;
148
149 uint32_t pipe_stat_val = 0;
150 uint32_t pipe_stat_reg = psb_pipestat(pipe);
151 uint32_t pipe_enable = dev_priv->pipestat[pipe];
152 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
153 uint32_t pipe_clear;
154 uint32_t i = 0;
155
156 spin_lock(&dev_priv->irqmask_lock);
157
158 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
159 pipe_stat_val &= pipe_enable | pipe_status;
160 pipe_stat_val &= pipe_stat_val >> 16;
161
162 spin_unlock(&dev_priv->irqmask_lock);
163
164 /* Clear the 2nd level interrupt status bits
165 * Sometimes the bits are very sticky so we repeat until they unstick */
166 for (i = 0; i < 0xffff; i++) {
167 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
168 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
169
170 if (pipe_clear == 0)
171 break;
172 }
173
174 if (pipe_clear)
175 dev_err(dev->dev,
176 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
177 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
178
179 if (pipe_stat_val & PIPE_VBLANK_STATUS)
180 drm_handle_vblank(dev, pipe);
181
182 if (pipe_stat_val & PIPE_TE_STATUS)
183 drm_handle_vblank(dev, pipe);
184}
185
186/*
187 * Display controller interrupt handler.
188 */
189static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
190{
191 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
192 mid_pipe_event_handler(dev, 0);
193
194 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
195 mid_pipe_event_handler(dev, 1);
196}
197
198irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
199{
200 struct drm_device *dev = (struct drm_device *) arg;
201 struct drm_psb_private *dev_priv =
202 (struct drm_psb_private *) dev->dev_private;
203
204 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
205 int handled = 0;
206
207 spin_lock(&dev_priv->irqmask_lock);
208
209 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
210
211 if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
212 dsp_int = 1;
213
214 /* FIXME: Handle Medfield
215 if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
216 dsp_int = 1;
217 */
218
219 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
220 sgx_int = 1;
221
222 vdc_stat &= dev_priv->vdc_irq_mask;
223 spin_unlock(&dev_priv->irqmask_lock);
224
225 if (dsp_int && gma_power_is_on(dev)) {
226 psb_vdc_interrupt(dev, vdc_stat);
227 handled = 1;
228 }
229
230 if (sgx_int) {
231 /* Not expected - we have it masked, shut it up */
232 u32 s, s2;
233 s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
234 s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
235 PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
236 PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
237 /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
238 we may as well poll even if we add that ! */
239 handled = 1;
240 }
241
242 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
243 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
244 DRM_READMEMORYBARRIER();
245
246 if (!handled)
247 return IRQ_NONE;
248
249 return IRQ_HANDLED;
250}
251
252void psb_irq_preinstall(struct drm_device *dev)
253{
254 struct drm_psb_private *dev_priv =
255 (struct drm_psb_private *) dev->dev_private;
256 unsigned long irqflags;
257
258 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
259
260 if (gma_power_is_on(dev))
261 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
262 if (dev->vblank_enabled[0])
263 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
264 if (dev->vblank_enabled[1])
265 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
266
267 /* FIXME: Handle Medfield irq mask
268 if (dev->vblank_enabled[1])
269 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
270 if (dev->vblank_enabled[2])
271 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
272 */
273
274 /* This register is safe even if display island is off */
275 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
276 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
277}
278
279int psb_irq_postinstall(struct drm_device *dev)
280{
281 struct drm_psb_private *dev_priv =
282 (struct drm_psb_private *) dev->dev_private;
283 unsigned long irqflags;
284
285 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
286
287 /* This register is safe even if display island is off */
288 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
289 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
290
291 if (dev->vblank_enabled[0])
292 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
293 else
294 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
295
296 if (dev->vblank_enabled[1])
297 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
298 else
299 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
300
301 if (dev->vblank_enabled[2])
302 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
303 else
304 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
305
306 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
307 return 0;
308}
309
310void psb_irq_uninstall(struct drm_device *dev)
311{
312 struct drm_psb_private *dev_priv =
313 (struct drm_psb_private *) dev->dev_private;
314 unsigned long irqflags;
315
316 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
317
318 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
319
320 if (dev->vblank_enabled[0])
321 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
322
323 if (dev->vblank_enabled[1])
324 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
325
326 if (dev->vblank_enabled[2])
327 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
328
329 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
330 _PSB_IRQ_MSVDX_FLAG |
331 _LNC_IRQ_TOPAZ_FLAG;
332
333 /* These two registers are safe even if display island is off */
334 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
335 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
336
337 wmb();
338
339 /* This register is safe even if display island is off */
340 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
341 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
342}
343
344void psb_irq_turn_on_dpst(struct drm_device *dev)
345{
346 struct drm_psb_private *dev_priv =
347 (struct drm_psb_private *) dev->dev_private;
348 u32 hist_reg;
349 u32 pwm_reg;
350
351 if (gma_power_begin(dev, false)) {
352 PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
353 hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
354 PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
355 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
356
357 PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
358 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
359 PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
360 | PWM_PHASEIN_INT_ENABLE,
361 PWM_CONTROL_LOGIC);
362 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
363
364 psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
365
366 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
367 PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
368 HISTOGRAM_INT_CONTROL);
369 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
370 PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
371 PWM_CONTROL_LOGIC);
372
373 gma_power_end(dev);
374 }
375}
376
377int psb_irq_enable_dpst(struct drm_device *dev)
378{
379 struct drm_psb_private *dev_priv =
380 (struct drm_psb_private *) dev->dev_private;
381 unsigned long irqflags;
382
383 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
384
385 /* enable DPST */
386 mid_enable_pipe_event(dev_priv, 0);
387 psb_irq_turn_on_dpst(dev);
388
389 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
390 return 0;
391}
392
393void psb_irq_turn_off_dpst(struct drm_device *dev)
394{
395 struct drm_psb_private *dev_priv =
396 (struct drm_psb_private *) dev->dev_private;
397 u32 hist_reg;
398 u32 pwm_reg;
399
400 if (gma_power_begin(dev, false)) {
401 PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
402 hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
403
404 psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
405
406 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
407 PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
408 PWM_CONTROL_LOGIC);
409 pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
410
411 gma_power_end(dev);
412 }
413}
414
415int psb_irq_disable_dpst(struct drm_device *dev)
416{
417 struct drm_psb_private *dev_priv =
418 (struct drm_psb_private *) dev->dev_private;
419 unsigned long irqflags;
420
421 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
422
423 mid_disable_pipe_event(dev_priv, 0);
424 psb_irq_turn_off_dpst(dev);
425
426 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
427
428 return 0;
429}
430
431#ifdef PSB_FIXME
432static int psb_vblank_do_wait(struct drm_device *dev,
433 unsigned int *sequence, atomic_t *counter)
434{
435 unsigned int cur_vblank;
436 int ret = 0;
437 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
438 (((cur_vblank = atomic_read(counter))
439 - *sequence) <= (1 << 23)));
440 *sequence = cur_vblank;
441
442 return ret;
443}
444#endif
445
446/*
447 * It is used to enable VBLANK interrupt
448 */
449int psb_enable_vblank(struct drm_device *dev, int pipe)
450{
451 struct drm_psb_private *dev_priv = dev->dev_private;
452 unsigned long irqflags;
453 uint32_t reg_val = 0;
454 uint32_t pipeconf_reg = mid_pipeconf(pipe);
455
456 if (gma_power_begin(dev, false)) {
457 reg_val = REG_READ(pipeconf_reg);
458 gma_power_end(dev);
459 }
460
461 if (!(reg_val & PIPEACONF_ENABLE))
462 return -EINVAL;
463
464 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
465
466 if (pipe == 0)
467 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
468 else if (pipe == 1)
469 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
470
471 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
472 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
473 psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
474
475 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
476
477 return 0;
478}
479
480/*
481 * It is used to disable VBLANK interrupt
482 */
483void psb_disable_vblank(struct drm_device *dev, int pipe)
484{
485 struct drm_psb_private *dev_priv = dev->dev_private;
486 unsigned long irqflags;
487
488 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
489
490 if (pipe == 0)
491 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
492 else if (pipe == 1)
493 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
494
495 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
496 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
497 psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
498
499 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
500}
501
502/* Called from drm generic code, passed a 'crtc', which
503 * we use as a pipe index
504 */
505u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
506{
507 uint32_t high_frame = PIPEAFRAMEHIGH;
508 uint32_t low_frame = PIPEAFRAMEPIXEL;
509 uint32_t pipeconf_reg = PIPEACONF;
510 uint32_t reg_val = 0;
511 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
512
513 switch (pipe) {
514 case 0:
515 break;
516 case 1:
517 high_frame = PIPEBFRAMEHIGH;
518 low_frame = PIPEBFRAMEPIXEL;
519 pipeconf_reg = PIPEBCONF;
520 break;
521 case 2:
522 high_frame = PIPECFRAMEHIGH;
523 low_frame = PIPECFRAMEPIXEL;
524 pipeconf_reg = PIPECCONF;
525 break;
526 default:
527 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
528 return 0;
529 }
530
531 if (!gma_power_begin(dev, false))
532 return 0;
533
534 reg_val = REG_READ(pipeconf_reg);
535
536 if (!(reg_val & PIPEACONF_ENABLE)) {
537 dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
538 pipe);
539 goto psb_get_vblank_counter_exit;
540 }
541
542 /*
543 * High & low register fields aren't synchronized, so make sure
544 * we get a low value that's stable across two reads of the high
545 * register.
546 */
547 do {
548 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
549 PIPE_FRAME_HIGH_SHIFT);
550 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
551 PIPE_FRAME_LOW_SHIFT);
552 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
553 PIPE_FRAME_HIGH_SHIFT);
554 } while (high1 != high2);
555
556 count = (high1 << 8) | low;
557
558psb_get_vblank_counter_exit:
559
560 gma_power_end(dev);
561
562 return count;
563}
564
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644
index 00000000000..216fda38b57
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -0,0 +1,45 @@
1/**************************************************************************
2 * Copyright (c) 2009-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Authors:
19 * Benjamin Defnet <benjamin.r.defnet@intel.com>
20 * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
21 *
22 **************************************************************************/
23
24#ifndef _SYSIRQ_H_
25#define _SYSIRQ_H_
26
27#include <drm/drmP.h>
28
29bool sysirq_init(struct drm_device *dev);
30void sysirq_uninit(struct drm_device *dev);
31
32void psb_irq_preinstall(struct drm_device *dev);
33int psb_irq_postinstall(struct drm_device *dev);
34void psb_irq_uninstall(struct drm_device *dev);
35irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
36
37int psb_irq_enable_dpst(struct drm_device *dev);
38int psb_irq_disable_dpst(struct drm_device *dev);
39void psb_irq_turn_on_dpst(struct drm_device *dev);
40void psb_irq_turn_off_dpst(struct drm_device *dev);
41int psb_enable_vblank(struct drm_device *dev, int pipe);
42void psb_disable_vblank(struct drm_device *dev, int pipe);
43u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
44
45#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644
index 00000000000..b867aabe6bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -0,0 +1,88 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
18 **************************************************************************/
19
20#include <drm/drmP.h>
21#include "psb_drv.h"
22#include "psb_reg.h"
23#include "psb_intel_reg.h"
24#include <linux/spinlock.h>
25
26static void psb_lid_timer_func(unsigned long data)
27{
28 struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
29 struct drm_device *dev = (struct drm_device *)dev_priv->dev;
30 struct timer_list *lid_timer = &dev_priv->lid_timer;
31 unsigned long irq_flags;
32 u32 *lid_state = dev_priv->lid_state;
33 u32 pp_status;
34
35 if (readl(lid_state) == dev_priv->lid_last_state)
36 goto lid_timer_schedule;
37
38 if ((readl(lid_state)) & 0x01) {
39 /*lid state is open*/
40 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
41 do {
42 pp_status = REG_READ(PP_STATUS);
43 } while ((pp_status & PP_ON) == 0);
44
45 /*FIXME: should be backlight level before*/
46 psb_intel_lvds_set_brightness(dev, 100);
47 } else {
48 psb_intel_lvds_set_brightness(dev, 0);
49
50 REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
51 do {
52 pp_status = REG_READ(PP_STATUS);
53 } while ((pp_status & PP_ON) == 0);
54 }
55 dev_priv->lid_last_state = readl(lid_state);
56
57lid_timer_schedule:
58 spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
59 if (!timer_pending(lid_timer)) {
60 lid_timer->expires = jiffies + PSB_LID_DELAY;
61 add_timer(lid_timer);
62 }
63 spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
64}
65
66void psb_lid_timer_init(struct drm_psb_private *dev_priv)
67{
68 struct timer_list *lid_timer = &dev_priv->lid_timer;
69 unsigned long irq_flags;
70
71 spin_lock_init(&dev_priv->lid_lock);
72 spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
73
74 init_timer(lid_timer);
75
76 lid_timer->data = (unsigned long)dev_priv;
77 lid_timer->function = psb_lid_timer_func;
78 lid_timer->expires = jiffies + PSB_LID_DELAY;
79
80 add_timer(lid_timer);
81 spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
82}
83
84void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
85{
86 del_timer_sync(&dev_priv->lid_timer);
87}
88
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644
index 00000000000..b81c7c1e9c2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -0,0 +1,582 @@
1/**************************************************************************
2 *
3 * Copyright (c) (2005-2007) Imagination Technologies Limited.
4 * Copyright (c) 2007, Intel Corporation.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
19 *
20 **************************************************************************/
21
22#ifndef _PSB_REG_H_
23#define _PSB_REG_H_
24
25#define PSB_CR_CLKGATECTL 0x0000
26#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
28#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
29#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
30#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
31#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
32#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
33#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
34#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
35#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
36#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
37#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
38#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
39#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
40#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
41#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
42
43#define PSB_CR_CORE_ID 0x0010
44#define _PSB_CC_ID_ID_SHIFT (16)
45#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
46#define _PSB_CC_ID_CONFIG_SHIFT (0)
47#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
48
49#define PSB_CR_CORE_REVISION 0x0014
50#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
51#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
52#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
53#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
54#define _PSB_CC_REVISION_MINOR_SHIFT (8)
55#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
56#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
57#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
58
59#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
60
61#define PSB_CR_SOFT_RESET 0x0080
62#define _PSB_CS_RESET_TSP_RESET (1 << 6)
63#define _PSB_CS_RESET_ISP_RESET (1 << 5)
64#define _PSB_CS_RESET_USE_RESET (1 << 4)
65#define _PSB_CS_RESET_TA_RESET (1 << 3)
66#define _PSB_CS_RESET_DPM_RESET (1 << 2)
67#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
68#define _PSB_CS_RESET_BIF_RESET (1 << 0)
69
70#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
71
72#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
73
74#define PSB_CR_EVENT_STATUS2 0x0118
75
76#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
77#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
78
79#define PSB_CR_EVENT_STATUS 0x012C
80
81#define PSB_CR_EVENT_HOST_ENABLE 0x0130
82
83#define PSB_CR_EVENT_HOST_CLEAR 0x0134
84#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
85#define _PSB_CE_TA_DPM_FAULT (1 << 28)
86#define _PSB_CE_TWOD_COMPLETE (1 << 27)
87#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
88#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
89#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
90#define _PSB_CE_SW_EVENT (1 << 14)
91#define _PSB_CE_TA_FINISHED (1 << 13)
92#define _PSB_CE_TA_TERMINATE (1 << 12)
93#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
94#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
95#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
96#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
97
98
99#define PSB_USE_OFFSET_MASK 0x0007FFFF
100#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
101#define PSB_CR_USE_CODE_BASE0 0x0A0C
102#define PSB_CR_USE_CODE_BASE1 0x0A10
103#define PSB_CR_USE_CODE_BASE2 0x0A14
104#define PSB_CR_USE_CODE_BASE3 0x0A18
105#define PSB_CR_USE_CODE_BASE4 0x0A1C
106#define PSB_CR_USE_CODE_BASE5 0x0A20
107#define PSB_CR_USE_CODE_BASE6 0x0A24
108#define PSB_CR_USE_CODE_BASE7 0x0A28
109#define PSB_CR_USE_CODE_BASE8 0x0A2C
110#define PSB_CR_USE_CODE_BASE9 0x0A30
111#define PSB_CR_USE_CODE_BASE10 0x0A34
112#define PSB_CR_USE_CODE_BASE11 0x0A38
113#define PSB_CR_USE_CODE_BASE12 0x0A3C
114#define PSB_CR_USE_CODE_BASE13 0x0A40
115#define PSB_CR_USE_CODE_BASE14 0x0A44
116#define PSB_CR_USE_CODE_BASE15 0x0A48
117#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
118#define _PSB_CUC_BASE_DM_SHIFT (25)
119#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
120#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
121#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
122#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
123#define _PSB_CUC_DM_VERTEX (0)
124#define _PSB_CUC_DM_PIXEL (1)
125#define _PSB_CUC_DM_RESERVED (2)
126#define _PSB_CUC_DM_EDM (3)
127
128#define PSB_CR_PDS_EXEC_BASE 0x0AB8
129#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
130#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
131
132#define PSB_CR_EVENT_KICKER 0x0AC4
133#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
134
135#define PSB_CR_EVENT_KICK 0x0AC8
136#define _PSB_CE_KICK_NOW (1 << 0)
137
138#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
139
140#define PSB_CR_BIF_CTRL 0x0C00
141#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
142#define _PSB_CB_CTRL_INVALDC (1 << 3)
143#define _PSB_CB_CTRL_FLUSH (1 << 2)
144
145#define PSB_CR_BIF_INT_STAT 0x0C04
146
147#define PSB_CR_BIF_FAULT 0x0C08
148#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
149#define _PSB_CBI_STAT_FAULT_SHIFT (0)
150#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
151#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
152#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
153#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
154#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
155#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
156#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
157#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
158#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
159#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
160
161#define PSB_CR_BIF_BANK0 0x0C78
162#define PSB_CR_BIF_BANK1 0x0C7C
163#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
164#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
165#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
166
167#define PSB_CR_2D_SOCIF 0x0E18
168#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
169#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
170#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
171
172#define PSB_CR_2D_BLIT_STATUS 0x0E04
173#define _PSB_C2B_STATUS_BUSY (1 << 24)
174#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
175#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
176
177/*
178 * 2D defs.
179 */
180
181/*
182 * 2D Slave Port Data : Block Header's Object Type
183 */
184
185#define PSB_2D_CLIP_BH (0x00000000)
186#define PSB_2D_PAT_BH (0x10000000)
187#define PSB_2D_CTRL_BH (0x20000000)
188#define PSB_2D_SRC_OFF_BH (0x30000000)
189#define PSB_2D_MASK_OFF_BH (0x40000000)
190#define PSB_2D_RESERVED1_BH (0x50000000)
191#define PSB_2D_RESERVED2_BH (0x60000000)
192#define PSB_2D_FENCE_BH (0x70000000)
193#define PSB_2D_BLIT_BH (0x80000000)
194#define PSB_2D_SRC_SURF_BH (0x90000000)
195#define PSB_2D_DST_SURF_BH (0xA0000000)
196#define PSB_2D_PAT_SURF_BH (0xB0000000)
197#define PSB_2D_SRC_PAL_BH (0xC0000000)
198#define PSB_2D_PAT_PAL_BH (0xD0000000)
199#define PSB_2D_MASK_SURF_BH (0xE0000000)
200#define PSB_2D_FLUSH_BH (0xF0000000)
201
202/*
203 * Clip Definition block (PSB_2D_CLIP_BH)
204 */
205#define PSB_2D_CLIPCOUNT_MAX (1)
206#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
207#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
208#define PSB_2D_CLIPCOUNT_SHIFT (0)
209/* clip rectangle min & max */
210#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
211#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
212#define PSB_2D_CLIP_XMAX_SHIFT (12)
213#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
214#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
215#define PSB_2D_CLIP_XMIN_SHIFT (0)
216/* clip rectangle offset */
217#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
218#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
219#define PSB_2D_CLIP_YMAX_SHIFT (12)
220#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
221#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
222#define PSB_2D_CLIP_YMIN_SHIFT (0)
223
224/*
225 * Pattern Control (PSB_2D_PAT_BH)
226 */
227#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
228#define PSB_2D_PAT_HEIGHT_SHIFT (0)
229#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
230#define PSB_2D_PAT_WIDTH_SHIFT (5)
231#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
232#define PSB_2D_PAT_YSTART_SHIFT (10)
233#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
234#define PSB_2D_PAT_XSTART_SHIFT (15)
235
236/*
237 * 2D Control block (PSB_2D_CTRL_BH)
238 */
239/* Present Flags */
240#define PSB_2D_SRCCK_CTRL (0x00000001)
241#define PSB_2D_DSTCK_CTRL (0x00000002)
242#define PSB_2D_ALPHA_CTRL (0x00000004)
243/* Colour Key Colour (SRC/DST)*/
244#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
245#define PSB_2D_CK_COL_CLRMASK (0x00000000)
246#define PSB_2D_CK_COL_SHIFT (0)
247/* Colour Key Mask (SRC/DST)*/
248#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
249#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
250#define PSB_2D_CK_MASK_SHIFT (0)
251/* Alpha Control (Alpha/RGB)*/
252#define PSB_2D_GBLALPHA_MASK (0x000FF000)
253#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
254#define PSB_2D_GBLALPHA_SHIFT (12)
255#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
256#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
257#define PSB_2D_SRCALPHA_OP_SHIFT (20)
258#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
259#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
260#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
261#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
262#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
263#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
264#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
265#define PSB_2D_SRCALPHA_INVERT (0x00800000)
266#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
267#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
268#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
269#define PSB_2D_DSTALPHA_OP_SHIFT (24)
270#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
271#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
272#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
273#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
274#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
275#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
276#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
277#define PSB_2D_DSTALPHA_INVERT (0x08000000)
278#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
279
280#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
281#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
282#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
283#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
284
285/*
286 *Source Offset (PSB_2D_SRC_OFF_BH)
287 */
288#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
289#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
290#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
291#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
292
293/*
294 * Mask Offset (PSB_2D_MASK_OFF_BH)
295 */
296#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
297#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
298#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
299#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
300
301/*
302 * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
303 */
304
305/*
306 *Blit Rectangle (PSB_2D_BLIT_BH)
307 */
308
309#define PSB_2D_ROT_MASK (3 << 25)
310#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
311#define PSB_2D_ROT_NONE (0 << 25)
312#define PSB_2D_ROT_90DEGS (1 << 25)
313#define PSB_2D_ROT_180DEGS (2 << 25)
314#define PSB_2D_ROT_270DEGS (3 << 25)
315
316#define PSB_2D_COPYORDER_MASK (3 << 23)
317#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
318#define PSB_2D_COPYORDER_TL2BR (0 << 23)
319#define PSB_2D_COPYORDER_BR2TL (1 << 23)
320#define PSB_2D_COPYORDER_TR2BL (2 << 23)
321#define PSB_2D_COPYORDER_BL2TR (3 << 23)
322
323#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
324#define PSB_2D_DSTCK_DISABLE (0x00000000)
325#define PSB_2D_DSTCK_PASS (0x00200000)
326#define PSB_2D_DSTCK_REJECT (0x00400000)
327
328#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
329#define PSB_2D_SRCCK_DISABLE (0x00000000)
330#define PSB_2D_SRCCK_PASS (0x00080000)
331#define PSB_2D_SRCCK_REJECT (0x00100000)
332
333#define PSB_2D_CLIP_ENABLE (0x00040000)
334
335#define PSB_2D_ALPHA_ENABLE (0x00020000)
336
337#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
338#define PSB_2D_PAT_MASK (0x00010000)
339#define PSB_2D_USE_PAT (0x00010000)
340#define PSB_2D_USE_FILL (0x00000000)
341/*
342 * Tungsten Graphics note on rop codes: If rop A and rop B are
343 * identical, the mask surface will not be read and need not be
344 * set up.
345 */
346
347#define PSB_2D_ROP3B_MASK (0x0000FF00)
348#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
349#define PSB_2D_ROP3B_SHIFT (8)
350/* rop code A */
351#define PSB_2D_ROP3A_MASK (0x000000FF)
352#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
353#define PSB_2D_ROP3A_SHIFT (0)
354
355#define PSB_2D_ROP4_MASK (0x0000FFFF)
356/*
357 * DWORD0: (Only pass if Pattern control == Use Fill Colour)
358 * Fill Colour RGBA8888
359 */
360#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
361#define PSB_2D_FILLCOLOUR_SHIFT (0)
362/*
363 * DWORD1: (Always Present)
364 * X Start (Dest)
365 * Y Start (Dest)
366 */
367#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
368#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
369#define PSB_2D_DST_XSTART_SHIFT (12)
370#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
371#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
372#define PSB_2D_DST_YSTART_SHIFT (0)
373/*
374 * DWORD2: (Always Present)
375 * X Size (Dest)
376 * Y Size (Dest)
377 */
378#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
379#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
380#define PSB_2D_DST_XSIZE_SHIFT (12)
381#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
382#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
383#define PSB_2D_DST_YSIZE_SHIFT (0)
384
385/*
386 * Source Surface (PSB_2D_SRC_SURF_BH)
387 */
388/*
389 * WORD 0
390 */
391
392#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
393#define PSB_2D_SRC_1_PAL (0x00000000)
394#define PSB_2D_SRC_2_PAL (0x00008000)
395#define PSB_2D_SRC_4_PAL (0x00010000)
396#define PSB_2D_SRC_8_PAL (0x00018000)
397#define PSB_2D_SRC_8_ALPHA (0x00020000)
398#define PSB_2D_SRC_4_ALPHA (0x00028000)
399#define PSB_2D_SRC_332RGB (0x00030000)
400#define PSB_2D_SRC_4444ARGB (0x00038000)
401#define PSB_2D_SRC_555RGB (0x00040000)
402#define PSB_2D_SRC_1555ARGB (0x00048000)
403#define PSB_2D_SRC_565RGB (0x00050000)
404#define PSB_2D_SRC_0888ARGB (0x00058000)
405#define PSB_2D_SRC_8888ARGB (0x00060000)
406#define PSB_2D_SRC_8888UYVY (0x00068000)
407#define PSB_2D_SRC_RESERVED (0x00070000)
408#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
409
410
411#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
412#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
413#define PSB_2D_SRC_STRIDE_SHIFT (0)
414/*
415 * WORD 1 - Base Address
416 */
417#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
418#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
419#define PSB_2D_SRC_ADDR_SHIFT (2)
420#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
421
422/*
423 * Pattern Surface (PSB_2D_PAT_SURF_BH)
424 */
425/*
426 * WORD 0
427 */
428
429#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
430#define PSB_2D_PAT_1_PAL (0x00000000)
431#define PSB_2D_PAT_2_PAL (0x00008000)
432#define PSB_2D_PAT_4_PAL (0x00010000)
433#define PSB_2D_PAT_8_PAL (0x00018000)
434#define PSB_2D_PAT_8_ALPHA (0x00020000)
435#define PSB_2D_PAT_4_ALPHA (0x00028000)
436#define PSB_2D_PAT_332RGB (0x00030000)
437#define PSB_2D_PAT_4444ARGB (0x00038000)
438#define PSB_2D_PAT_555RGB (0x00040000)
439#define PSB_2D_PAT_1555ARGB (0x00048000)
440#define PSB_2D_PAT_565RGB (0x00050000)
441#define PSB_2D_PAT_0888ARGB (0x00058000)
442#define PSB_2D_PAT_8888ARGB (0x00060000)
443
444#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
445#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
446#define PSB_2D_PAT_STRIDE_SHIFT (0)
447/*
448 * WORD 1 - Base Address
449 */
450#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
451#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
452#define PSB_2D_PAT_ADDR_SHIFT (2)
453#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
454
455/*
456 * Destination Surface (PSB_2D_DST_SURF_BH)
457 */
458/*
459 * WORD 0
460 */
461
462#define PSB_2D_DST_FORMAT_MASK (0x00078000)
463#define PSB_2D_DST_332RGB (0x00030000)
464#define PSB_2D_DST_4444ARGB (0x00038000)
465#define PSB_2D_DST_555RGB (0x00040000)
466#define PSB_2D_DST_1555ARGB (0x00048000)
467#define PSB_2D_DST_565RGB (0x00050000)
468#define PSB_2D_DST_0888ARGB (0x00058000)
469#define PSB_2D_DST_8888ARGB (0x00060000)
470#define PSB_2D_DST_8888AYUV (0x00070000)
471
472#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
473#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
474#define PSB_2D_DST_STRIDE_SHIFT (0)
475/*
476 * WORD 1 - Base Address
477 */
478#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
479#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
480#define PSB_2D_DST_ADDR_SHIFT (2)
481#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
482
483/*
484 * Mask Surface (PSB_2D_MASK_SURF_BH)
485 */
486/*
487 * WORD 0
488 */
489#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
490#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
491#define PSB_2D_MASK_STRIDE_SHIFT (0)
492/*
493 * WORD 1 - Base Address
494 */
495#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
496#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
497#define PSB_2D_MASK_ADDR_SHIFT (2)
498#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
499
500/*
501 * Source Palette (PSB_2D_SRC_PAL_BH)
502 */
503
504#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
505#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
506#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
507#define PSB_2D_SRCPAL_BYTEALIGN (1024)
508
509/*
510 * Pattern Palette (PSB_2D_PAT_PAL_BH)
511 */
512
513#define PSB_2D_PATPAL_ADDR_SHIFT (0)
514#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
515#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
516#define PSB_2D_PATPAL_BYTEALIGN (1024)
517
518/*
519 * Rop3 Codes (2 LS bytes)
520 */
521
522#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
523#define PSB_2D_ROP3_PATCOPY (0xF0F0)
524#define PSB_2D_ROP3_WHITENESS (0xFFFF)
525#define PSB_2D_ROP3_BLACKNESS (0x0000)
526#define PSB_2D_ROP3_SRC (0xCC)
527#define PSB_2D_ROP3_PAT (0xF0)
528#define PSB_2D_ROP3_DST (0xAA)
529
530/*
531 * Sizes.
532 */
533
534#define PSB_SCENE_HW_COOKIE_SIZE 16
535#define PSB_TA_MEM_HW_COOKIE_SIZE 16
536
537/*
538 * Scene stuff.
539 */
540
541#define PSB_NUM_HW_SCENES 2
542
543/*
544 * Scheduler completion actions.
545 */
546
547#define PSB_RASTER_BLOCK 0
548#define PSB_RASTER 1
549#define PSB_RETURN 2
550#define PSB_TA 3
551
552/* Power management */
553#define PSB_PUNIT_PORT 0x04
554#define PSB_OSPMBA 0x78
555#define PSB_APMBA 0x7a
556#define PSB_APM_CMD 0x0
557#define PSB_APM_STS 0x04
558#define PSB_PWRGT_VID_ENC_MASK 0x30
559#define PSB_PWRGT_VID_DEC_MASK 0xc
560#define PSB_PWRGT_GL3_MASK 0xc0
561
562#define PSB_PM_SSC 0x20
563#define PSB_PM_SSS 0x30
564#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
565#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
566#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
567#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
568#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
569#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
570/* Display SSS register bits are different in A0 vs. B0 */
571#define PSB_PWRGT_GFX_MASK 0x3
572#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
573#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
574#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
575#define PSB_PWRGT_GFX_MASK_B0 0xc3
576#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
577#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
578#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
579#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
580#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
581#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
582#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 8f371e8d630..f7c17b23983 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
222 pci_free_consistent(dev->pdev, PAGE_SIZE, 222 pci_free_consistent(dev->pdev, PAGE_SIZE,
223 dev_priv->hw_status_page, 223 dev_priv->hw_status_page,
224 dev_priv->dma_status_page); 224 dev_priv->dma_status_page);
225 /* Need to rewrite hardware status page */
226 I810_WRITE(0x02080, 0x1ffff000);
227 } 225 }
228 kfree(dev->dev_private); 226 kfree(dev->dev_private);
229 dev->dev_private = NULL; 227 dev->dev_private = NULL;
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
888} 886}
889 887
890/* Must be called with the lock held */ 888/* Must be called with the lock held */
891static void i810_reclaim_buffers(struct drm_device *dev, 889void i810_driver_reclaim_buffers(struct drm_device *dev,
892 struct drm_file *file_priv) 890 struct drm_file *file_priv)
893{ 891{
894 struct drm_device_dma *dma = dev->dma; 892 struct drm_device_dma *dma = dev->dma;
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1225 if (dev_priv->page_flipping) 1223 if (dev_priv->page_flipping)
1226 i810_do_cleanup_pageflip(dev); 1224 i810_do_cleanup_pageflip(dev);
1227 } 1225 }
1228}
1229 1226
1230void i810_driver_reclaim_buffers_locked(struct drm_device *dev, 1227 if (file_priv->master && file_priv->master->lock.hw_lock) {
1231 struct drm_file *file_priv) 1228 drm_idlelock_take(&file_priv->master->lock);
1232{ 1229 i810_driver_reclaim_buffers(dev, file_priv);
1233 i810_reclaim_buffers(dev, file_priv); 1230 drm_idlelock_release(&file_priv->master->lock);
1231 } else {
1232 /* master disappeared, clean up stuff anyway and hope nothing
1233 * goes wrong */
1234 i810_driver_reclaim_buffers(dev, file_priv);
1235 }
1236
1234} 1237}
1235 1238
1236int i810_driver_dma_quiescent(struct drm_device *dev) 1239int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d4266bdf6fb..053f1ee5839 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
43 i810_PCI_IDS 43 i810_PCI_IDS
44}; 44};
45 45
46static const struct file_operations i810_driver_fops = {
47 .owner = THIS_MODULE,
48 .open = drm_open,
49 .release = drm_release,
50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap,
52 .poll = drm_poll,
53 .fasync = drm_fasync,
54 .llseek = noop_llseek,
55};
56
46static struct drm_driver driver = { 57static struct drm_driver driver = {
47 .driver_features = 58 .driver_features =
48 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 59 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -52,20 +63,9 @@ static struct drm_driver driver = {
52 .lastclose = i810_driver_lastclose, 63 .lastclose = i810_driver_lastclose,
53 .preclose = i810_driver_preclose, 64 .preclose = i810_driver_preclose,
54 .device_is_agp = i810_driver_device_is_agp, 65 .device_is_agp = i810_driver_device_is_agp,
55 .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
56 .dma_quiescent = i810_driver_dma_quiescent, 66 .dma_quiescent = i810_driver_dma_quiescent,
57 .ioctls = i810_ioctls, 67 .ioctls = i810_ioctls,
58 .fops = { 68 .fops = &i810_driver_fops,
59 .owner = THIS_MODULE,
60 .open = drm_open,
61 .release = drm_release,
62 .unlocked_ioctl = drm_ioctl,
63 .mmap = drm_mmap,
64 .poll = drm_poll,
65 .fasync = drm_fasync,
66 .llseek = noop_llseek,
67 },
68
69 .name = DRIVER_NAME, 69 .name = DRIVER_NAME,
70 .desc = DRIVER_DESC, 70 .desc = DRIVER_DESC,
71 .date = DRIVER_DATE, 71 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f48179..6e0acad9e0f 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
116 116
117 /* i810_dma.c */ 117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device *dev); 118extern int i810_driver_dma_quiescent(struct drm_device *dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, 119void i810_driver_reclaim_buffers(struct drm_device *dev,
120 struct drm_file *file_priv); 120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device *dev); 122extern void i810_driver_lastclose(struct drm_device *dev);
123extern void i810_driver_preclose(struct drm_device *dev, 123extern void i810_driver_preclose(struct drm_device *dev,
124 struct drm_file *file_priv); 124 struct drm_file *file_priv);
125extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
126 struct drm_file *file_priv);
127extern int i810_driver_device_is_agp(struct drm_device *dev); 125extern int i810_driver_device_is_agp(struct drm_device *dev);
128 126
129extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ae6a7c5020..808b255d7fc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
28 intel_dvo.o \ 28 intel_dvo.o \
29 intel_ringbuffer.o \ 29 intel_ringbuffer.o \
30 intel_overlay.o \ 30 intel_overlay.o \
31 intel_sprite.o \
31 intel_opregion.o \ 32 intel_opregion.o \
32 dvo_ch7xxx.o \ 33 dvo_ch7xxx.o \
33 dvo_ch7017.o \ 34 dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 004b048c519..11807989f91 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1001,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
1001 return 0; 1001 return 0;
1002} 1002}
1003 1003
1004static int i915_drpc_info(struct seq_file *m, void *unused) 1004static int ironlake_drpc_info(struct seq_file *m)
1005{ 1005{
1006 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 struct drm_info_node *node = (struct drm_info_node *) m->private;
1007 struct drm_device *dev = node->minor->dev; 1007 struct drm_device *dev = node->minor->dev;
@@ -1068,6 +1068,90 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1068 return 0; 1068 return 0;
1069} 1069}
1070 1070
1071static int gen6_drpc_info(struct seq_file *m)
1072{
1073
1074 struct drm_info_node *node = (struct drm_info_node *) m->private;
1075 struct drm_device *dev = node->minor->dev;
1076 struct drm_i915_private *dev_priv = dev->dev_private;
1077 u32 rpmodectl1, gt_core_status, rcctl1;
1078 int count=0, ret;
1079
1080
1081 ret = mutex_lock_interruptible(&dev->struct_mutex);
1082 if (ret)
1083 return ret;
1084
1085 if (atomic_read(&dev_priv->forcewake_count)) {
1086 seq_printf(m, "RC information inaccurate because userspace "
1087 "holds a reference \n");
1088 } else {
1089 /* NB: we cannot use forcewake, else we read the wrong values */
1090 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1091 udelay(10);
1092 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1093 }
1094
1095 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1096 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1097
1098 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1099 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1100 mutex_unlock(&dev->struct_mutex);
1101
1102 seq_printf(m, "Video Turbo Mode: %s\n",
1103 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1104 seq_printf(m, "HW control enabled: %s\n",
1105 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1106 seq_printf(m, "SW control enabled: %s\n",
1107 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1108 GEN6_RP_MEDIA_SW_MODE));
1109 seq_printf(m, "RC6 Enabled: %s\n",
1110 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1111 seq_printf(m, "RC6 Enabled: %s\n",
1112 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1113 seq_printf(m, "Deep RC6 Enabled: %s\n",
1114 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1115 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1116 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1117 seq_printf(m, "Current RC state: ");
1118 switch (gt_core_status & GEN6_RCn_MASK) {
1119 case GEN6_RC0:
1120 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1121 seq_printf(m, "Core Power Down\n");
1122 else
1123 seq_printf(m, "on\n");
1124 break;
1125 case GEN6_RC3:
1126 seq_printf(m, "RC3\n");
1127 break;
1128 case GEN6_RC6:
1129 seq_printf(m, "RC6\n");
1130 break;
1131 case GEN6_RC7:
1132 seq_printf(m, "RC7\n");
1133 break;
1134 default:
1135 seq_printf(m, "Unknown\n");
1136 break;
1137 }
1138
1139 seq_printf(m, "Core Power Down: %s\n",
1140 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1141 return 0;
1142}
1143
1144static int i915_drpc_info(struct seq_file *m, void *unused)
1145{
1146 struct drm_info_node *node = (struct drm_info_node *) m->private;
1147 struct drm_device *dev = node->minor->dev;
1148
1149 if (IS_GEN6(dev) || IS_GEN7(dev))
1150 return gen6_drpc_info(m);
1151 else
1152 return ironlake_drpc_info(m);
1153}
1154
1071static int i915_fbc_status(struct seq_file *m, void *unused) 1155static int i915_fbc_status(struct seq_file *m, void *unused)
1072{ 1156{
1073 struct drm_info_node *node = (struct drm_info_node *) m->private; 1157 struct drm_info_node *node = (struct drm_info_node *) m->private;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9ae374861e..5f4d5893e98 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
781 case I915_PARAM_HAS_RELAXED_DELTA: 781 case I915_PARAM_HAS_RELAXED_DELTA:
782 value = 1; 782 value = 1;
783 break; 783 break;
784 case I915_PARAM_HAS_GEN7_SOL_RESET:
785 value = 1;
786 break;
784 default: 787 default:
785 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 788 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
786 param->param); 789 param->param);
@@ -2305,6 +2308,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
2305 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2308 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2306 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2309 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2307 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2310 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2311 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2312 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2308}; 2313};
2309 2314
2310int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2315int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a1103fc6597..8f7187915b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -810,6 +810,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
810 .close = drm_gem_vm_close, 810 .close = drm_gem_vm_close,
811}; 811};
812 812
813static const struct file_operations i915_driver_fops = {
814 .owner = THIS_MODULE,
815 .open = drm_open,
816 .release = drm_release,
817 .unlocked_ioctl = drm_ioctl,
818 .mmap = drm_gem_mmap,
819 .poll = drm_poll,
820 .fasync = drm_fasync,
821 .read = drm_read,
822#ifdef CONFIG_COMPAT
823 .compat_ioctl = i915_compat_ioctl,
824#endif
825 .llseek = noop_llseek,
826};
827
813static struct drm_driver driver = { 828static struct drm_driver driver = {
814 /* Don't use MTRRs here; the Xserver or userspace app should 829 /* Don't use MTRRs here; the Xserver or userspace app should
815 * deal with them for Intel hardware. 830 * deal with them for Intel hardware.
@@ -843,21 +858,7 @@ static struct drm_driver driver = {
843 .dumb_map_offset = i915_gem_mmap_gtt, 858 .dumb_map_offset = i915_gem_mmap_gtt,
844 .dumb_destroy = i915_gem_dumb_destroy, 859 .dumb_destroy = i915_gem_dumb_destroy,
845 .ioctls = i915_ioctls, 860 .ioctls = i915_ioctls,
846 .fops = { 861 .fops = &i915_driver_fops,
847 .owner = THIS_MODULE,
848 .open = drm_open,
849 .release = drm_release,
850 .unlocked_ioctl = drm_ioctl,
851 .mmap = drm_gem_mmap,
852 .poll = drm_poll,
853 .fasync = drm_fasync,
854 .read = drm_read,
855#ifdef CONFIG_COMPAT
856 .compat_ioctl = i915_compat_ioctl,
857#endif
858 .llseek = noop_llseek,
859 },
860
861 .name = DRIVER_NAME, 862 .name = DRIVER_NAME,
862 .desc = DRIVER_DESC, 863 .desc = DRIVER_DESC,
863 .date = DRIVER_DATE, 864 .date = DRIVER_DATE,
@@ -922,13 +923,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
922MODULE_DESCRIPTION(DRIVER_DESC); 923MODULE_DESCRIPTION(DRIVER_DESC);
923MODULE_LICENSE("GPL and additional rights"); 924MODULE_LICENSE("GPL and additional rights");
924 925
925/* We give fast paths for the really cool registers */
926#define NEEDS_FORCE_WAKE(dev_priv, reg) \
927 (((dev_priv)->info->gen >= 6) && \
928 ((reg) < 0x40000) && \
929 ((reg) != FORCEWAKE) && \
930 ((reg) != ECOBUS))
931
932#define __i915_read(x, y) \ 926#define __i915_read(x, y) \
933u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 927u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
934 u##x val = 0; \ 928 u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 554bef7a3b9..602bc80baab 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -207,6 +207,8 @@ struct drm_i915_display_funcs {
207 int (*get_display_clock_speed)(struct drm_device *dev); 207 int (*get_display_clock_speed)(struct drm_device *dev);
208 int (*get_fifo_size)(struct drm_device *dev, int plane); 208 int (*get_fifo_size)(struct drm_device *dev, int plane);
209 void (*update_wm)(struct drm_device *dev); 209 void (*update_wm)(struct drm_device *dev);
210 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
211 uint32_t sprite_width, int pixel_size);
210 int (*crtc_mode_set)(struct drm_crtc *crtc, 212 int (*crtc_mode_set)(struct drm_crtc *crtc,
211 struct drm_display_mode *mode, 213 struct drm_display_mode *mode,
212 struct drm_display_mode *adjusted_mode, 214 struct drm_display_mode *adjusted_mode,
@@ -337,6 +339,8 @@ typedef struct drm_i915_private {
337 struct timer_list hangcheck_timer; 339 struct timer_list hangcheck_timer;
338 int hangcheck_count; 340 int hangcheck_count;
339 uint32_t last_acthd; 341 uint32_t last_acthd;
342 uint32_t last_acthd_bsd;
343 uint32_t last_acthd_blt;
340 uint32_t last_instdone; 344 uint32_t last_instdone;
341 uint32_t last_instdone1; 345 uint32_t last_instdone1;
342 346
@@ -350,6 +354,7 @@ typedef struct drm_i915_private {
350 354
351 /* overlay */ 355 /* overlay */
352 struct intel_overlay *overlay; 356 struct intel_overlay *overlay;
357 bool sprite_scaling_enabled;
353 358
354 /* LVDS info */ 359 /* LVDS info */
355 int backlight_level; /* restore backlight to this value */ 360 int backlight_level; /* restore backlight to this value */
@@ -1362,8 +1367,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1362#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1367#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1363 (((dev_priv)->info->gen >= 6) && \ 1368 (((dev_priv)->info->gen >= 6) && \
1364 ((reg) < 0x40000) && \ 1369 ((reg) < 0x40000) && \
1365 ((reg) != FORCEWAKE) && \ 1370 ((reg) != FORCEWAKE))
1366 ((reg) != ECOBUS))
1367 1371
1368#define __i915_read(x, y) \ 1372#define __i915_read(x, y) \
1369 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1373 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8359dc77704..e55badb2d86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
2006 || atomic_read(&dev_priv->mm.wedged)); 2006 || atomic_read(&dev_priv->mm.wedged));
2007 2007
2008 ring->irq_put(ring); 2008 ring->irq_put(ring);
2009 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring), 2009 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
2010 seqno) || 2010 seqno) ||
2011 atomic_read(&dev_priv->mm.wedged), 3000)) 2011 atomic_read(&dev_priv->mm.wedged), 3000))
2012 ret = -EBUSY; 2012 ret = -EBUSY;
2013 ring->waiting_seqno = 0; 2013 ring->waiting_seqno = 0;
2014 2014
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3309 3309
3310 if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) 3310 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3311 ret = -EIO; 3311 ret = -EIO;
3312 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3313 seqno) ||
3314 atomic_read(&dev_priv->mm.wedged), 3000)) {
3315 ret = -EBUSY;
3312 } 3316 }
3313 } 3317 }
3314 3318
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b9da8900ae4..65e1f0043f9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -971,6 +971,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
971} 971}
972 972
973static int 973static int
974i915_reset_gen7_sol_offsets(struct drm_device *dev,
975 struct intel_ring_buffer *ring)
976{
977 drm_i915_private_t *dev_priv = dev->dev_private;
978 int ret, i;
979
980 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
981 return 0;
982
983 ret = intel_ring_begin(ring, 4 * 3);
984 if (ret)
985 return ret;
986
987 for (i = 0; i < 4; i++) {
988 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
989 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
990 intel_ring_emit(ring, 0);
991 }
992
993 intel_ring_advance(ring);
994
995 return 0;
996}
997
998static int
974i915_gem_do_execbuffer(struct drm_device *dev, void *data, 999i915_gem_do_execbuffer(struct drm_device *dev, void *data,
975 struct drm_file *file, 1000 struct drm_file *file,
976 struct drm_i915_gem_execbuffer2 *args, 1001 struct drm_i915_gem_execbuffer2 *args,
@@ -984,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
984 struct intel_ring_buffer *ring; 1009 struct intel_ring_buffer *ring;
985 u32 exec_start, exec_len; 1010 u32 exec_start, exec_len;
986 u32 seqno; 1011 u32 seqno;
1012 u32 mask;
987 int ret, mode, i; 1013 int ret, mode, i;
988 1014
989 if (!i915_gem_check_execbuffer(args)) { 1015 if (!i915_gem_check_execbuffer(args)) {
@@ -1021,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1021 } 1047 }
1022 1048
1023 mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1049 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1050 mask = I915_EXEC_CONSTANTS_MASK;
1024 switch (mode) { 1051 switch (mode) {
1025 case I915_EXEC_CONSTANTS_REL_GENERAL: 1052 case I915_EXEC_CONSTANTS_REL_GENERAL:
1026 case I915_EXEC_CONSTANTS_ABSOLUTE: 1053 case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1034,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1034 mode == I915_EXEC_CONSTANTS_REL_SURFACE) 1061 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1035 return -EINVAL; 1062 return -EINVAL;
1036 1063
1037 ret = intel_ring_begin(ring, 4); 1064 /* The HW changed the meaning on this bit on gen6 */
1038 if (ret) 1065 if (INTEL_INFO(dev)->gen >= 6)
1039 return ret; 1066 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1040
1041 intel_ring_emit(ring, MI_NOOP);
1042 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1043 intel_ring_emit(ring, INSTPM);
1044 intel_ring_emit(ring,
1045 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1046 intel_ring_advance(ring);
1047
1048 dev_priv->relative_constants_mode = mode;
1049 } 1067 }
1050 break; 1068 break;
1051 default: 1069 default:
@@ -1176,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1176 } 1194 }
1177 } 1195 }
1178 1196
1197 if (ring == &dev_priv->ring[RCS] &&
1198 mode != dev_priv->relative_constants_mode) {
1199 ret = intel_ring_begin(ring, 4);
1200 if (ret)
1201 goto err;
1202
1203 intel_ring_emit(ring, MI_NOOP);
1204 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1205 intel_ring_emit(ring, INSTPM);
1206 intel_ring_emit(ring, mask << 16 | mode);
1207 intel_ring_advance(ring);
1208
1209 dev_priv->relative_constants_mode = mode;
1210 }
1211
1212 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1213 ret = i915_reset_gen7_sol_offsets(dev, ring);
1214 if (ret)
1215 goto err;
1216 }
1217
1179 trace_i915_gem_ring_dispatch(ring, seqno); 1218 trace_i915_gem_ring_dispatch(ring, seqno);
1180 1219
1181 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1220 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b40004b5597..5d433fc11ac 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1205 } else { 1205 } else {
1206 int dspaddr = DSPADDR(intel_crtc->plane); 1206 int dspaddr = DSPADDR(intel_crtc->plane);
1207 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1207 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1208 crtc->y * crtc->fb->pitch + 1208 crtc->y * crtc->fb->pitches[0] +
1209 crtc->x * crtc->fb->bits_per_pixel/8); 1209 crtc->x * crtc->fb->bits_per_pixel/8);
1210 } 1210 }
1211 1211
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
1649 I915_WRITE_CTL(ring, tmp); 1649 I915_WRITE_CTL(ring, tmp);
1650 return true; 1650 return true;
1651 } 1651 }
1652 if (IS_GEN6(dev) &&
1653 (tmp & RING_WAIT_SEMAPHORE)) {
1654 DRM_ERROR("Kicking stuck semaphore on %s\n",
1655 ring->name);
1656 I915_WRITE_CTL(ring, tmp);
1657 return true;
1658 }
1659 return false; 1652 return false;
1660} 1653}
1661 1654
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1669{ 1662{
1670 struct drm_device *dev = (struct drm_device *)data; 1663 struct drm_device *dev = (struct drm_device *)data;
1671 drm_i915_private_t *dev_priv = dev->dev_private; 1664 drm_i915_private_t *dev_priv = dev->dev_private;
1672 uint32_t acthd, instdone, instdone1; 1665 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1673 bool err = false; 1666 bool err = false;
1674 1667
1675 if (!i915_enable_hangcheck) 1668 if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
1686 } 1679 }
1687 1680
1688 if (INTEL_INFO(dev)->gen < 4) { 1681 if (INTEL_INFO(dev)->gen < 4) {
1689 acthd = I915_READ(ACTHD);
1690 instdone = I915_READ(INSTDONE); 1682 instdone = I915_READ(INSTDONE);
1691 instdone1 = 0; 1683 instdone1 = 0;
1692 } else { 1684 } else {
1693 acthd = I915_READ(ACTHD_I965);
1694 instdone = I915_READ(INSTDONE_I965); 1685 instdone = I915_READ(INSTDONE_I965);
1695 instdone1 = I915_READ(INSTDONE1); 1686 instdone1 = I915_READ(INSTDONE1);
1696 } 1687 }
1688 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
1689 acthd_bsd = HAS_BSD(dev) ?
1690 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
1691 acthd_blt = HAS_BLT(dev) ?
1692 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
1697 1693
1698 if (dev_priv->last_acthd == acthd && 1694 if (dev_priv->last_acthd == acthd &&
1695 dev_priv->last_acthd_bsd == acthd_bsd &&
1696 dev_priv->last_acthd_blt == acthd_blt &&
1699 dev_priv->last_instdone == instdone && 1697 dev_priv->last_instdone == instdone &&
1700 dev_priv->last_instdone1 == instdone1) { 1698 dev_priv->last_instdone1 == instdone1) {
1701 if (dev_priv->hangcheck_count++ > 1) { 1699 if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
1727 dev_priv->hangcheck_count = 0; 1725 dev_priv->hangcheck_count = 0;
1728 1726
1729 dev_priv->last_acthd = acthd; 1727 dev_priv->last_acthd = acthd;
1728 dev_priv->last_acthd_bsd = acthd_bsd;
1729 dev_priv->last_acthd_blt = acthd_blt;
1730 dev_priv->last_instdone = instdone; 1730 dev_priv->last_instdone = instdone;
1731 dev_priv->last_instdone1 = instdone1; 1731 dev_priv->last_instdone1 = instdone1;
1732 } 1732 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a26d5b0a369..c3afb783cb9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,6 +442,7 @@
442#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts 442#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
443 will not assert AGPBUSY# and will only 443 will not assert AGPBUSY# and will only
444 be delivered when out of C3. */ 444 be delivered when out of C3. */
445#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
445#define ACTHD 0x020c8 446#define ACTHD 0x020c8
446#define FW_BLC 0x020d8 447#define FW_BLC 0x020d8
447#define FW_BLC2 0x020dc 448#define FW_BLC2 0x020dc
@@ -2321,6 +2322,7 @@
2321#define PIPECONF_PROGRESSIVE (0 << 21) 2322#define PIPECONF_PROGRESSIVE (0 << 21)
2322#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 2323#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
2323#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 2324#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
2325#define PIPECONF_INTERLACE_MASK (7 << 21)
2324#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2326#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2325#define PIPECONF_BPP_MASK (0x000000e0) 2327#define PIPECONF_BPP_MASK (0x000000e0)
2326#define PIPECONF_BPP_8 (0<<5) 2328#define PIPECONF_BPP_8 (0<<5)
@@ -2459,6 +2461,8 @@
2459#define WM3_LP_ILK 0x45110 2461#define WM3_LP_ILK 0x45110
2460#define WM3_LP_EN (1<<31) 2462#define WM3_LP_EN (1<<31)
2461#define WM1S_LP_ILK 0x45120 2463#define WM1S_LP_ILK 0x45120
2464#define WM2S_LP_IVB 0x45124
2465#define WM3S_LP_IVB 0x45128
2462#define WM1S_LP_EN (1<<31) 2466#define WM1S_LP_EN (1<<31)
2463 2467
2464/* Memory latency timer register */ 2468/* Memory latency timer register */
@@ -2675,6 +2679,140 @@
2675#define _DSPBSURF 0x7119C 2679#define _DSPBSURF 0x7119C
2676#define _DSPBTILEOFF 0x711A4 2680#define _DSPBTILEOFF 0x711A4
2677 2681
2682/* Sprite A control */
2683#define _DVSACNTR 0x72180
2684#define DVS_ENABLE (1<<31)
2685#define DVS_GAMMA_ENABLE (1<<30)
2686#define DVS_PIXFORMAT_MASK (3<<25)
2687#define DVS_FORMAT_YUV422 (0<<25)
2688#define DVS_FORMAT_RGBX101010 (1<<25)
2689#define DVS_FORMAT_RGBX888 (2<<25)
2690#define DVS_FORMAT_RGBX161616 (3<<25)
2691#define DVS_SOURCE_KEY (1<<22)
2692#define DVS_RGB_ORDER_RGBX (1<<20)
2693#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
2694#define DVS_YUV_ORDER_YUYV (0<<16)
2695#define DVS_YUV_ORDER_UYVY (1<<16)
2696#define DVS_YUV_ORDER_YVYU (2<<16)
2697#define DVS_YUV_ORDER_VYUY (3<<16)
2698#define DVS_DEST_KEY (1<<2)
2699#define DVS_TRICKLE_FEED_DISABLE (1<<14)
2700#define DVS_TILED (1<<10)
2701#define _DVSALINOFF 0x72184
2702#define _DVSASTRIDE 0x72188
2703#define _DVSAPOS 0x7218c
2704#define _DVSASIZE 0x72190
2705#define _DVSAKEYVAL 0x72194
2706#define _DVSAKEYMSK 0x72198
2707#define _DVSASURF 0x7219c
2708#define _DVSAKEYMAXVAL 0x721a0
2709#define _DVSATILEOFF 0x721a4
2710#define _DVSASURFLIVE 0x721ac
2711#define _DVSASCALE 0x72204
2712#define DVS_SCALE_ENABLE (1<<31)
2713#define DVS_FILTER_MASK (3<<29)
2714#define DVS_FILTER_MEDIUM (0<<29)
2715#define DVS_FILTER_ENHANCING (1<<29)
2716#define DVS_FILTER_SOFTENING (2<<29)
2717#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
2718#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
2719#define _DVSAGAMC 0x72300
2720
2721#define _DVSBCNTR 0x73180
2722#define _DVSBLINOFF 0x73184
2723#define _DVSBSTRIDE 0x73188
2724#define _DVSBPOS 0x7318c
2725#define _DVSBSIZE 0x73190
2726#define _DVSBKEYVAL 0x73194
2727#define _DVSBKEYMSK 0x73198
2728#define _DVSBSURF 0x7319c
2729#define _DVSBKEYMAXVAL 0x731a0
2730#define _DVSBTILEOFF 0x731a4
2731#define _DVSBSURFLIVE 0x731ac
2732#define _DVSBSCALE 0x73204
2733#define _DVSBGAMC 0x73300
2734
2735#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
2736#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
2737#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
2738#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
2739#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
2740#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
2741#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
2742#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
2743#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
2744#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
2745#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
2746
2747#define _SPRA_CTL 0x70280
2748#define SPRITE_ENABLE (1<<31)
2749#define SPRITE_GAMMA_ENABLE (1<<30)
2750#define SPRITE_PIXFORMAT_MASK (7<<25)
2751#define SPRITE_FORMAT_YUV422 (0<<25)
2752#define SPRITE_FORMAT_RGBX101010 (1<<25)
2753#define SPRITE_FORMAT_RGBX888 (2<<25)
2754#define SPRITE_FORMAT_RGBX161616 (3<<25)
2755#define SPRITE_FORMAT_YUV444 (4<<25)
2756#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
2757#define SPRITE_CSC_ENABLE (1<<24)
2758#define SPRITE_SOURCE_KEY (1<<22)
2759#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
2760#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
2761#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
2762#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
2763#define SPRITE_YUV_ORDER_YUYV (0<<16)
2764#define SPRITE_YUV_ORDER_UYVY (1<<16)
2765#define SPRITE_YUV_ORDER_YVYU (2<<16)
2766#define SPRITE_YUV_ORDER_VYUY (3<<16)
2767#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
2768#define SPRITE_INT_GAMMA_ENABLE (1<<13)
2769#define SPRITE_TILED (1<<10)
2770#define SPRITE_DEST_KEY (1<<2)
2771#define _SPRA_LINOFF 0x70284
2772#define _SPRA_STRIDE 0x70288
2773#define _SPRA_POS 0x7028c
2774#define _SPRA_SIZE 0x70290
2775#define _SPRA_KEYVAL 0x70294
2776#define _SPRA_KEYMSK 0x70298
2777#define _SPRA_SURF 0x7029c
2778#define _SPRA_KEYMAX 0x702a0
2779#define _SPRA_TILEOFF 0x702a4
2780#define _SPRA_SCALE 0x70304
2781#define SPRITE_SCALE_ENABLE (1<<31)
2782#define SPRITE_FILTER_MASK (3<<29)
2783#define SPRITE_FILTER_MEDIUM (0<<29)
2784#define SPRITE_FILTER_ENHANCING (1<<29)
2785#define SPRITE_FILTER_SOFTENING (2<<29)
2786#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
2787#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
2788#define _SPRA_GAMC 0x70400
2789
2790#define _SPRB_CTL 0x71280
2791#define _SPRB_LINOFF 0x71284
2792#define _SPRB_STRIDE 0x71288
2793#define _SPRB_POS 0x7128c
2794#define _SPRB_SIZE 0x71290
2795#define _SPRB_KEYVAL 0x71294
2796#define _SPRB_KEYMSK 0x71298
2797#define _SPRB_SURF 0x7129c
2798#define _SPRB_KEYMAX 0x712a0
2799#define _SPRB_TILEOFF 0x712a4
2800#define _SPRB_SCALE 0x71304
2801#define _SPRB_GAMC 0x71400
2802
2803#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
2804#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
2805#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
2806#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
2807#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
2808#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
2809#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
2810#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
2811#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
2812#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
2813#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
2814#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
2815
2678/* VBIOS regs */ 2816/* VBIOS regs */
2679#define VGACNTRL 0x71400 2817#define VGACNTRL 0x71400
2680# define VGA_DISP_DISABLE (1 << 31) 2818# define VGA_DISP_DISABLE (1 << 31)
@@ -2882,6 +3020,10 @@
2882#define ILK_DPFC_DIS1 (1<<8) 3020#define ILK_DPFC_DIS1 (1<<8)
2883#define ILK_DPFC_DIS2 (1<<9) 3021#define ILK_DPFC_DIS2 (1<<9)
2884 3022
3023#define IVB_CHICKEN3 0x4200c
3024# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
3025# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
3026
2885#define DISP_ARB_CTL 0x45000 3027#define DISP_ARB_CTL 0x45000
2886#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 3028#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
2887#define DISP_FBC_WM_DIS (1<<15) 3029#define DISP_FBC_WM_DIS (1<<15)
@@ -3500,7 +3642,11 @@
3500#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 3642#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
3501#define GEN6_RP_CONTROL 0xA024 3643#define GEN6_RP_CONTROL 0xA024
3502#define GEN6_RP_MEDIA_TURBO (1<<11) 3644#define GEN6_RP_MEDIA_TURBO (1<<11)
3503#define GEN6_RP_USE_NORMAL_FREQ (1<<9) 3645#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
3646#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
3647#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
3648#define GEN6_RP_MEDIA_HW_MODE (1<<9)
3649#define GEN6_RP_MEDIA_SW_MODE (0<<9)
3504#define GEN6_RP_MEDIA_IS_GFX (1<<8) 3650#define GEN6_RP_MEDIA_IS_GFX (1<<8)
3505#define GEN6_RP_ENABLE (1<<7) 3651#define GEN6_RP_ENABLE (1<<7)
3506#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 3652#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
@@ -3557,6 +3703,14 @@
3557#define GEN6_PCODE_DATA 0x138128 3703#define GEN6_PCODE_DATA 0x138128
3558#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 3704#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
3559 3705
3706#define GEN6_GT_CORE_STATUS 0x138060
3707#define GEN6_CORE_CPD_STATE_MASK (7<<4)
3708#define GEN6_RCn_MASK 7
3709#define GEN6_RC0 0
3710#define GEN6_RC3 2
3711#define GEN6_RC6 3
3712#define GEN6_RC7 4
3713
3560#define G4X_AUD_VID_DID 0x62020 3714#define G4X_AUD_VID_DID 0x62020
3561#define INTEL_AUDIO_DEVCL 0x808629FB 3715#define INTEL_AUDIO_DEVCL 0x808629FB
3562#define INTEL_AUDIO_DEVBLC 0x80862801 3716#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -3569,17 +3723,23 @@
3569#define G4X_ELD_ACK (1 << 4) 3723#define G4X_ELD_ACK (1 << 4)
3570#define G4X_HDMIW_HDMIEDID 0x6210C 3724#define G4X_HDMIW_HDMIEDID 0x6210C
3571 3725
3572#define GEN5_HDMIW_HDMIEDID_A 0xE2050 3726#define IBX_HDMIW_HDMIEDID_A 0xE2050
3573#define GEN5_AUD_CNTL_ST_A 0xE20B4 3727#define IBX_AUD_CNTL_ST_A 0xE20B4
3574#define GEN5_ELD_BUFFER_SIZE (0x1f << 10) 3728#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
3575#define GEN5_ELD_ADDRESS (0x1f << 5) 3729#define IBX_ELD_ADDRESS (0x1f << 5)
3576#define GEN5_ELD_ACK (1 << 4) 3730#define IBX_ELD_ACK (1 << 4)
3577#define GEN5_AUD_CNTL_ST2 0xE20C0 3731#define IBX_AUD_CNTL_ST2 0xE20C0
3578#define GEN5_ELD_VALIDB (1 << 0) 3732#define IBX_ELD_VALIDB (1 << 0)
3579#define GEN5_CP_READYB (1 << 1) 3733#define IBX_CP_READYB (1 << 1)
3580 3734
3581#define GEN7_HDMIW_HDMIEDID_A 0xE5050 3735#define CPT_HDMIW_HDMIEDID_A 0xE5050
3582#define GEN7_AUD_CNTRL_ST_A 0xE50B4 3736#define CPT_AUD_CNTL_ST_A 0xE50B4
3583#define GEN7_AUD_CNTRL_ST2 0xE50C0 3737#define CPT_AUD_CNTRL_ST2 0xE50C0
3738
3739/* These are the 4 32-bit write offset registers for each stream
3740 * output buffer. It determines the offset from the
3741 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
3742 */
3743#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
3584 3744
3585#endif /* _I915_REG_H_ */ 3745#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index daa5743ccbd..2a3f707caab 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
915 pipe_name(pipe)); 915 pipe_name(pipe));
916} 916}
917 917
918static void assert_pipe(struct drm_i915_private *dev_priv, 918void assert_pipe(struct drm_i915_private *dev_priv,
919 enum pipe pipe, bool state) 919 enum pipe pipe, bool state)
920{ 920{
921 int reg; 921 int reg;
922 u32 val; 922 u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
929 "pipe %c assertion failure (expected %s, current %s)\n", 929 "pipe %c assertion failure (expected %s, current %s)\n",
930 pipe_name(pipe), state_string(state), state_string(cur_state)); 930 pipe_name(pipe), state_string(state), state_string(cur_state));
931} 931}
932#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
933#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
934 932
935static void assert_plane_enabled(struct drm_i915_private *dev_priv, 933static void assert_plane_enabled(struct drm_i915_private *dev_priv,
936 enum plane plane) 934 enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1206 enum pipe pipe) 1204 enum pipe pipe)
1207{ 1205{
1208 int reg; 1206 int reg;
1209 u32 val; 1207 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1208 pll_sel = TRANSC_DPLL_ENABLE;
1210 1209
1211 if (pipe > 1) 1210 if (pipe > 1)
1212 return; 1211 return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1217 /* Make sure transcoder isn't still depending on us */ 1216 /* Make sure transcoder isn't still depending on us */
1218 assert_transcoder_disabled(dev_priv, pipe); 1217 assert_transcoder_disabled(dev_priv, pipe);
1219 1218
1219 if (pipe == 0)
1220 pll_sel |= TRANSC_DPLLA_SEL;
1221 else if (pipe == 1)
1222 pll_sel |= TRANSC_DPLLB_SEL;
1223
1224
1225 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1226 return;
1227
1220 reg = PCH_DPLL(pipe); 1228 reg = PCH_DPLL(pipe);
1221 val = I915_READ(reg); 1229 val = I915_READ(reg);
1222 val &= ~DPLL_VCO_ENABLE; 1230 val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1511 u32 fbc_ctl, fbc_ctl2; 1519 u32 fbc_ctl, fbc_ctl2;
1512 1520
1513 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 1521 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1514 if (fb->pitch < cfb_pitch) 1522 if (fb->pitches[0] < cfb_pitch)
1515 cfb_pitch = fb->pitch; 1523 cfb_pitch = fb->pitches[0];
1516 1524
1517 /* FBC_CTL wants 64B units */ 1525 /* FBC_CTL wants 64B units */
1518 cfb_pitch = (cfb_pitch / 64) - 1; 1526 cfb_pitch = (cfb_pitch / 64) - 1;
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2073 I915_WRITE(reg, dspcntr); 2081 I915_WRITE(reg, dspcntr);
2074 2082
2075 Start = obj->gtt_offset; 2083 Start = obj->gtt_offset;
2076 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 2084 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2077 2085
2078 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2086 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2079 Start, Offset, x, y, fb->pitch); 2087 Start, Offset, x, y, fb->pitches[0]);
2080 I915_WRITE(DSPSTRIDE(plane), fb->pitch); 2088 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2081 if (INTEL_INFO(dev)->gen >= 4) { 2089 if (INTEL_INFO(dev)->gen >= 4) {
2082 I915_WRITE(DSPSURF(plane), Start); 2090 I915_WRITE(DSPSURF(plane), Start);
2083 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2091 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2154 I915_WRITE(reg, dspcntr); 2162 I915_WRITE(reg, dspcntr);
2155 2163
2156 Start = obj->gtt_offset; 2164 Start = obj->gtt_offset;
2157 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 2165 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2158 2166
2159 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2167 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2160 Start, Offset, x, y, fb->pitch); 2168 Start, Offset, x, y, fb->pitches[0]);
2161 I915_WRITE(DSPSTRIDE(plane), fb->pitch); 2169 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2162 I915_WRITE(DSPSURF(plane), Start); 2170 I915_WRITE(DSPSURF(plane), Start);
2163 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2171 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2164 I915_WRITE(DSPADDR(plane), Offset); 2172 I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
4509 */ 4517 */
4510} 4518}
4511 4519
4512static void sandybridge_update_wm(struct drm_device *dev) 4520void sandybridge_update_wm(struct drm_device *dev)
4513{ 4521{
4514 struct drm_i915_private *dev_priv = dev->dev_private; 4522 struct drm_i915_private *dev_priv = dev->dev_private;
4515 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 4523 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
4569 I915_WRITE(WM2_LP_ILK, 0); 4577 I915_WRITE(WM2_LP_ILK, 0);
4570 I915_WRITE(WM1_LP_ILK, 0); 4578 I915_WRITE(WM1_LP_ILK, 0);
4571 4579
4572 if (!single_plane_enabled(enabled)) 4580 if (!single_plane_enabled(enabled) ||
4581 dev_priv->sprite_scaling_enabled)
4573 return; 4582 return;
4574 enabled = ffs(enabled) - 1; 4583 enabled = ffs(enabled) - 1;
4575 4584
@@ -4619,6 +4628,149 @@ static void sandybridge_update_wm(struct drm_device *dev)
4619 cursor_wm); 4628 cursor_wm);
4620} 4629}
4621 4630
4631static bool
4632sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4633 uint32_t sprite_width, int pixel_size,
4634 const struct intel_watermark_params *display,
4635 int display_latency_ns, int *sprite_wm)
4636{
4637 struct drm_crtc *crtc;
4638 int clock;
4639 int entries, tlb_miss;
4640
4641 crtc = intel_get_crtc_for_plane(dev, plane);
4642 if (crtc->fb == NULL || !crtc->enabled) {
4643 *sprite_wm = display->guard_size;
4644 return false;
4645 }
4646
4647 clock = crtc->mode.clock;
4648
4649 /* Use the small buffer method to calculate the sprite watermark */
4650 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4651 tlb_miss = display->fifo_size*display->cacheline_size -
4652 sprite_width * 8;
4653 if (tlb_miss > 0)
4654 entries += tlb_miss;
4655 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4656 *sprite_wm = entries + display->guard_size;
4657 if (*sprite_wm > (int)display->max_wm)
4658 *sprite_wm = display->max_wm;
4659
4660 return true;
4661}
4662
4663static bool
4664sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4665 uint32_t sprite_width, int pixel_size,
4666 const struct intel_watermark_params *display,
4667 int latency_ns, int *sprite_wm)
4668{
4669 struct drm_crtc *crtc;
4670 unsigned long line_time_us;
4671 int clock;
4672 int line_count, line_size;
4673 int small, large;
4674 int entries;
4675
4676 if (!latency_ns) {
4677 *sprite_wm = 0;
4678 return false;
4679 }
4680
4681 crtc = intel_get_crtc_for_plane(dev, plane);
4682 clock = crtc->mode.clock;
4683
4684 line_time_us = (sprite_width * 1000) / clock;
4685 line_count = (latency_ns / line_time_us + 1000) / 1000;
4686 line_size = sprite_width * pixel_size;
4687
4688 /* Use the minimum of the small and large buffer method for primary */
4689 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4690 large = line_count * line_size;
4691
4692 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4693 *sprite_wm = entries + display->guard_size;
4694
4695 return *sprite_wm > 0x3ff ? false : true;
4696}
4697
4698static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4699 uint32_t sprite_width, int pixel_size)
4700{
4701 struct drm_i915_private *dev_priv = dev->dev_private;
4702 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4703 int sprite_wm, reg;
4704 int ret;
4705
4706 switch (pipe) {
4707 case 0:
4708 reg = WM0_PIPEA_ILK;
4709 break;
4710 case 1:
4711 reg = WM0_PIPEB_ILK;
4712 break;
4713 case 2:
4714 reg = WM0_PIPEC_IVB;
4715 break;
4716 default:
4717 return; /* bad pipe */
4718 }
4719
4720 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4721 &sandybridge_display_wm_info,
4722 latency, &sprite_wm);
4723 if (!ret) {
4724 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4725 pipe);
4726 return;
4727 }
4728
4729 I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4730 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4731
4732
4733 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4734 pixel_size,
4735 &sandybridge_display_srwm_info,
4736 SNB_READ_WM1_LATENCY() * 500,
4737 &sprite_wm);
4738 if (!ret) {
4739 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4740 pipe);
4741 return;
4742 }
4743 I915_WRITE(WM1S_LP_ILK, sprite_wm);
4744
4745 /* Only IVB has two more LP watermarks for sprite */
4746 if (!IS_IVYBRIDGE(dev))
4747 return;
4748
4749 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4750 pixel_size,
4751 &sandybridge_display_srwm_info,
4752 SNB_READ_WM2_LATENCY() * 500,
4753 &sprite_wm);
4754 if (!ret) {
4755 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4756 pipe);
4757 return;
4758 }
4759 I915_WRITE(WM2S_LP_IVB, sprite_wm);
4760
4761 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4762 pixel_size,
4763 &sandybridge_display_srwm_info,
4764 SNB_READ_WM3_LATENCY() * 500,
4765 &sprite_wm);
4766 if (!ret) {
4767 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4768 pipe);
4769 return;
4770 }
4771 I915_WRITE(WM3S_LP_IVB, sprite_wm);
4772}
4773
4622/** 4774/**
4623 * intel_update_watermarks - update FIFO watermark values based on current modes 4775 * intel_update_watermarks - update FIFO watermark values based on current modes
4624 * 4776 *
@@ -4659,6 +4811,16 @@ static void intel_update_watermarks(struct drm_device *dev)
4659 dev_priv->display.update_wm(dev); 4811 dev_priv->display.update_wm(dev);
4660} 4812}
4661 4813
4814void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4815 uint32_t sprite_width, int pixel_size)
4816{
4817 struct drm_i915_private *dev_priv = dev->dev_private;
4818
4819 if (dev_priv->display.update_sprite_wm)
4820 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4821 pixel_size);
4822}
4823
4662static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4824static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4663{ 4825{
4664 if (i915_panel_use_ssc >= 0) 4826 if (i915_panel_use_ssc >= 0)
@@ -5155,7 +5317,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5155 adjusted_mode->crtc_vsync_end -= 1; 5317 adjusted_mode->crtc_vsync_end -= 1;
5156 adjusted_mode->crtc_vsync_start -= 1; 5318 adjusted_mode->crtc_vsync_start -= 1;
5157 } else 5319 } else
5158 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 5320 pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
5159 5321
5160 I915_WRITE(HTOTAL(pipe), 5322 I915_WRITE(HTOTAL(pipe),
5161 (adjusted_mode->crtc_hdisplay - 1) | 5323 (adjusted_mode->crtc_hdisplay - 1) |
@@ -5822,14 +5984,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5822 5984
5823 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5985 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5824 x, y, old_fb); 5986 x, y, old_fb);
5825
5826 drm_vblank_post_modeset(dev, pipe); 5987 drm_vblank_post_modeset(dev, pipe);
5827 5988
5828 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; 5989 if (ret)
5990 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
5991 else
5992 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
5829 5993
5830 return ret; 5994 return ret;
5831} 5995}
5832 5996
5997static bool intel_eld_uptodate(struct drm_connector *connector,
5998 int reg_eldv, uint32_t bits_eldv,
5999 int reg_elda, uint32_t bits_elda,
6000 int reg_edid)
6001{
6002 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6003 uint8_t *eld = connector->eld;
6004 uint32_t i;
6005
6006 i = I915_READ(reg_eldv);
6007 i &= bits_eldv;
6008
6009 if (!eld[0])
6010 return !i;
6011
6012 if (!i)
6013 return false;
6014
6015 i = I915_READ(reg_elda);
6016 i &= ~bits_elda;
6017 I915_WRITE(reg_elda, i);
6018
6019 for (i = 0; i < eld[2]; i++)
6020 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6021 return false;
6022
6023 return true;
6024}
6025
5833static void g4x_write_eld(struct drm_connector *connector, 6026static void g4x_write_eld(struct drm_connector *connector,
5834 struct drm_crtc *crtc) 6027 struct drm_crtc *crtc)
5835{ 6028{
@@ -5846,6 +6039,12 @@ static void g4x_write_eld(struct drm_connector *connector,
5846 else 6039 else
5847 eldv = G4X_ELDV_DEVCTG; 6040 eldv = G4X_ELDV_DEVCTG;
5848 6041
6042 if (intel_eld_uptodate(connector,
6043 G4X_AUD_CNTL_ST, eldv,
6044 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6045 G4X_HDMIW_HDMIEDID))
6046 return;
6047
5849 i = I915_READ(G4X_AUD_CNTL_ST); 6048 i = I915_READ(G4X_AUD_CNTL_ST);
5850 i &= ~(eldv | G4X_ELD_ADDR); 6049 i &= ~(eldv | G4X_ELD_ADDR);
5851 len = (i >> 9) & 0x1f; /* ELD buffer size */ 6050 len = (i >> 9) & 0x1f; /* ELD buffer size */
@@ -5876,14 +6075,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
5876 int aud_cntl_st; 6075 int aud_cntl_st;
5877 int aud_cntrl_st2; 6076 int aud_cntrl_st2;
5878 6077
5879 if (IS_IVYBRIDGE(connector->dev)) { 6078 if (HAS_PCH_IBX(connector->dev)) {
5880 hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A; 6079 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
5881 aud_cntl_st = GEN7_AUD_CNTRL_ST_A; 6080 aud_cntl_st = IBX_AUD_CNTL_ST_A;
5882 aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2; 6081 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
5883 } else { 6082 } else {
5884 hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A; 6083 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
5885 aud_cntl_st = GEN5_AUD_CNTL_ST_A; 6084 aud_cntl_st = CPT_AUD_CNTL_ST_A;
5886 aud_cntrl_st2 = GEN5_AUD_CNTL_ST2; 6085 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
5887 } 6086 }
5888 6087
5889 i = to_intel_crtc(crtc)->pipe; 6088 i = to_intel_crtc(crtc)->pipe;
@@ -5897,14 +6096,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
5897 if (!i) { 6096 if (!i) {
5898 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 6097 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5899 /* operate blindly on all ports */ 6098 /* operate blindly on all ports */
5900 eldv = GEN5_ELD_VALIDB; 6099 eldv = IBX_ELD_VALIDB;
5901 eldv |= GEN5_ELD_VALIDB << 4; 6100 eldv |= IBX_ELD_VALIDB << 4;
5902 eldv |= GEN5_ELD_VALIDB << 8; 6101 eldv |= IBX_ELD_VALIDB << 8;
5903 } else { 6102 } else {
5904 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); 6103 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
5905 eldv = GEN5_ELD_VALIDB << ((i - 1) * 4); 6104 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6105 }
6106
6107 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6108 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6109 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5906 } 6110 }
5907 6111
6112 if (intel_eld_uptodate(connector,
6113 aud_cntrl_st2, eldv,
6114 aud_cntl_st, IBX_ELD_ADDRESS,
6115 hdmiw_hdmiedid))
6116 return;
6117
5908 i = I915_READ(aud_cntrl_st2); 6118 i = I915_READ(aud_cntrl_st2);
5909 i &= ~eldv; 6119 i &= ~eldv;
5910 I915_WRITE(aud_cntrl_st2, i); 6120 I915_WRITE(aud_cntrl_st2, i);
@@ -5912,13 +6122,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
5912 if (!eld[0]) 6122 if (!eld[0])
5913 return; 6123 return;
5914 6124
5915 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5916 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5917 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5918 }
5919
5920 i = I915_READ(aud_cntl_st); 6125 i = I915_READ(aud_cntl_st);
5921 i &= ~GEN5_ELD_ADDRESS; 6126 i &= ~IBX_ELD_ADDRESS;
5922 I915_WRITE(aud_cntl_st, i); 6127 I915_WRITE(aud_cntl_st, i);
5923 6128
5924 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 6129 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
@@ -6298,7 +6503,7 @@ static struct drm_display_mode load_detect_mode = {
6298 6503
6299static struct drm_framebuffer * 6504static struct drm_framebuffer *
6300intel_framebuffer_create(struct drm_device *dev, 6505intel_framebuffer_create(struct drm_device *dev,
6301 struct drm_mode_fb_cmd *mode_cmd, 6506 struct drm_mode_fb_cmd2 *mode_cmd,
6302 struct drm_i915_gem_object *obj) 6507 struct drm_i915_gem_object *obj)
6303{ 6508{
6304 struct intel_framebuffer *intel_fb; 6509 struct intel_framebuffer *intel_fb;
@@ -6340,7 +6545,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
6340 int depth, int bpp) 6545 int depth, int bpp)
6341{ 6546{
6342 struct drm_i915_gem_object *obj; 6547 struct drm_i915_gem_object *obj;
6343 struct drm_mode_fb_cmd mode_cmd; 6548 struct drm_mode_fb_cmd2 mode_cmd;
6344 6549
6345 obj = i915_gem_alloc_object(dev, 6550 obj = i915_gem_alloc_object(dev,
6346 intel_framebuffer_size_for_mode(mode, bpp)); 6551 intel_framebuffer_size_for_mode(mode, bpp));
@@ -6349,9 +6554,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
6349 6554
6350 mode_cmd.width = mode->hdisplay; 6555 mode_cmd.width = mode->hdisplay;
6351 mode_cmd.height = mode->vdisplay; 6556 mode_cmd.height = mode->vdisplay;
6352 mode_cmd.depth = depth; 6557 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6353 mode_cmd.bpp = bpp; 6558 bpp);
6354 mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); 6559 mode_cmd.pixel_format = 0;
6355 6560
6356 return intel_framebuffer_create(dev, &mode_cmd, obj); 6561 return intel_framebuffer_create(dev, &mode_cmd, obj);
6357} 6562}
@@ -6372,11 +6577,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
6372 return NULL; 6577 return NULL;
6373 6578
6374 fb = &dev_priv->fbdev->ifb.base; 6579 fb = &dev_priv->fbdev->ifb.base;
6375 if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, 6580 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6376 fb->bits_per_pixel)) 6581 fb->bits_per_pixel))
6377 return NULL; 6582 return NULL;
6378 6583
6379 if (obj->base.size < mode->vdisplay * fb->pitch) 6584 if (obj->base.size < mode->vdisplay * fb->pitches[0])
6380 return NULL; 6585 return NULL;
6381 6586
6382 return fb; 6587 return fb;
@@ -7009,7 +7214,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7009 goto out; 7214 goto out;
7010 7215
7011 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 7216 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7012 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 7217 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7013 7218
7014 ret = BEGIN_LP_RING(6); 7219 ret = BEGIN_LP_RING(6);
7015 if (ret) 7220 if (ret)
@@ -7026,7 +7231,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7026 OUT_RING(MI_NOOP); 7231 OUT_RING(MI_NOOP);
7027 OUT_RING(MI_DISPLAY_FLIP | 7232 OUT_RING(MI_DISPLAY_FLIP |
7028 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7233 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7029 OUT_RING(fb->pitch); 7234 OUT_RING(fb->pitches[0]);
7030 OUT_RING(obj->gtt_offset + offset); 7235 OUT_RING(obj->gtt_offset + offset);
7031 OUT_RING(MI_NOOP); 7236 OUT_RING(MI_NOOP);
7032 ADVANCE_LP_RING(); 7237 ADVANCE_LP_RING();
@@ -7050,7 +7255,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7050 goto out; 7255 goto out;
7051 7256
7052 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 7257 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7053 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 7258 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7054 7259
7055 ret = BEGIN_LP_RING(6); 7260 ret = BEGIN_LP_RING(6);
7056 if (ret) 7261 if (ret)
@@ -7064,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7064 OUT_RING(MI_NOOP); 7269 OUT_RING(MI_NOOP);
7065 OUT_RING(MI_DISPLAY_FLIP_I915 | 7270 OUT_RING(MI_DISPLAY_FLIP_I915 |
7066 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7271 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7067 OUT_RING(fb->pitch); 7272 OUT_RING(fb->pitches[0]);
7068 OUT_RING(obj->gtt_offset + offset); 7273 OUT_RING(obj->gtt_offset + offset);
7069 OUT_RING(MI_NOOP); 7274 OUT_RING(MI_NOOP);
7070 7275
@@ -7097,7 +7302,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7097 */ 7302 */
7098 OUT_RING(MI_DISPLAY_FLIP | 7303 OUT_RING(MI_DISPLAY_FLIP |
7099 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7304 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7100 OUT_RING(fb->pitch); 7305 OUT_RING(fb->pitches[0]);
7101 OUT_RING(obj->gtt_offset | obj->tiling_mode); 7306 OUT_RING(obj->gtt_offset | obj->tiling_mode);
7102 7307
7103 /* XXX Enabling the panel-fitter across page-flip is so far 7308 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7132,7 +7337,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7132 7337
7133 OUT_RING(MI_DISPLAY_FLIP | 7338 OUT_RING(MI_DISPLAY_FLIP |
7134 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7339 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7135 OUT_RING(fb->pitch | obj->tiling_mode); 7340 OUT_RING(fb->pitches[0] | obj->tiling_mode);
7136 OUT_RING(obj->gtt_offset); 7341 OUT_RING(obj->gtt_offset);
7137 7342
7138 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 7343 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7168,7 +7373,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7168 goto out; 7373 goto out;
7169 7374
7170 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 7375 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7171 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode)); 7376 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7172 intel_ring_emit(ring, (obj->gtt_offset)); 7377 intel_ring_emit(ring, (obj->gtt_offset));
7173 intel_ring_emit(ring, (MI_NOOP)); 7378 intel_ring_emit(ring, (MI_NOOP));
7174 intel_ring_advance(ring); 7379 intel_ring_advance(ring);
@@ -7594,7 +7799,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
7594 7799
7595int intel_framebuffer_init(struct drm_device *dev, 7800int intel_framebuffer_init(struct drm_device *dev,
7596 struct intel_framebuffer *intel_fb, 7801 struct intel_framebuffer *intel_fb,
7597 struct drm_mode_fb_cmd *mode_cmd, 7802 struct drm_mode_fb_cmd2 *mode_cmd,
7598 struct drm_i915_gem_object *obj) 7803 struct drm_i915_gem_object *obj)
7599{ 7804{
7600 int ret; 7805 int ret;
@@ -7602,21 +7807,25 @@ int intel_framebuffer_init(struct drm_device *dev,
7602 if (obj->tiling_mode == I915_TILING_Y) 7807 if (obj->tiling_mode == I915_TILING_Y)
7603 return -EINVAL; 7808 return -EINVAL;
7604 7809
7605 if (mode_cmd->pitch & 63) 7810 if (mode_cmd->pitches[0] & 63)
7606 return -EINVAL; 7811 return -EINVAL;
7607 7812
7608 switch (mode_cmd->bpp) { 7813 switch (mode_cmd->pixel_format) {
7609 case 8: 7814 case DRM_FORMAT_RGB332:
7610 case 16: 7815 case DRM_FORMAT_RGB565:
7611 /* Only pre-ILK can handle 5:5:5 */ 7816 case DRM_FORMAT_XRGB8888:
7612 if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev)) 7817 case DRM_FORMAT_ARGB8888:
7613 return -EINVAL; 7818 case DRM_FORMAT_XRGB2101010:
7819 case DRM_FORMAT_ARGB2101010:
7820 /* RGB formats are common across chipsets */
7614 break; 7821 break;
7615 7822 case DRM_FORMAT_YUYV:
7616 case 24: 7823 case DRM_FORMAT_UYVY:
7617 case 32: 7824 case DRM_FORMAT_YVYU:
7825 case DRM_FORMAT_VYUY:
7618 break; 7826 break;
7619 default: 7827 default:
7828 DRM_ERROR("unsupported pixel format\n");
7620 return -EINVAL; 7829 return -EINVAL;
7621 } 7830 }
7622 7831
@@ -7634,11 +7843,12 @@ int intel_framebuffer_init(struct drm_device *dev,
7634static struct drm_framebuffer * 7843static struct drm_framebuffer *
7635intel_user_framebuffer_create(struct drm_device *dev, 7844intel_user_framebuffer_create(struct drm_device *dev,
7636 struct drm_file *filp, 7845 struct drm_file *filp,
7637 struct drm_mode_fb_cmd *mode_cmd) 7846 struct drm_mode_fb_cmd2 *mode_cmd)
7638{ 7847{
7639 struct drm_i915_gem_object *obj; 7848 struct drm_i915_gem_object *obj;
7640 7849
7641 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 7850 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
7851 mode_cmd->handles[0]));
7642 if (&obj->base == NULL) 7852 if (&obj->base == NULL)
7643 return ERR_PTR(-ENOENT); 7853 return ERR_PTR(-ENOENT);
7644 7854
@@ -7995,7 +8205,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7995 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 8205 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7996 I915_WRITE(GEN6_RP_CONTROL, 8206 I915_WRITE(GEN6_RP_CONTROL,
7997 GEN6_RP_MEDIA_TURBO | 8207 GEN6_RP_MEDIA_TURBO |
7998 GEN6_RP_USE_NORMAL_FREQ | 8208 GEN6_RP_MEDIA_HW_MODE |
7999 GEN6_RP_MEDIA_IS_GFX | 8209 GEN6_RP_MEDIA_IS_GFX |
8000 GEN6_RP_ENABLE | 8210 GEN6_RP_ENABLE |
8001 GEN6_RP_UP_BUSY_AVG | 8211 GEN6_RP_UP_BUSY_AVG |
@@ -8250,6 +8460,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
8250 8460
8251 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 8461 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8252 8462
8463 I915_WRITE(IVB_CHICKEN3,
8464 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8465 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8466
8253 for_each_pipe(pipe) { 8467 for_each_pipe(pipe) {
8254 I915_WRITE(DSPCNTR(pipe), 8468 I915_WRITE(DSPCNTR(pipe),
8255 I915_READ(DSPCNTR(pipe)) | 8469 I915_READ(DSPCNTR(pipe)) |
@@ -8543,9 +8757,15 @@ static void intel_init_display(struct drm_device *dev)
8543 if (IS_IVYBRIDGE(dev)) { 8757 if (IS_IVYBRIDGE(dev)) {
8544 u32 ecobus; 8758 u32 ecobus;
8545 8759
8760 /* A small trick here - if the bios hasn't configured MT forcewake,
8761 * and if the device is in RC6, then force_wake_mt_get will not wake
8762 * the device and the ECOBUS read will return zero. Which will be
8763 * (correctly) interpreted by the test below as MT forcewake being
8764 * disabled.
8765 */
8546 mutex_lock(&dev->struct_mutex); 8766 mutex_lock(&dev->struct_mutex);
8547 __gen6_gt_force_wake_mt_get(dev_priv); 8767 __gen6_gt_force_wake_mt_get(dev_priv);
8548 ecobus = I915_READ(ECOBUS); 8768 ecobus = I915_READ_NOTRACE(ECOBUS);
8549 __gen6_gt_force_wake_mt_put(dev_priv); 8769 __gen6_gt_force_wake_mt_put(dev_priv);
8550 mutex_unlock(&dev->struct_mutex); 8770 mutex_unlock(&dev->struct_mutex);
8551 8771
@@ -8577,6 +8797,7 @@ static void intel_init_display(struct drm_device *dev)
8577 } else if (IS_GEN6(dev)) { 8797 } else if (IS_GEN6(dev)) {
8578 if (SNB_READ_WM0_LATENCY()) { 8798 if (SNB_READ_WM0_LATENCY()) {
8579 dev_priv->display.update_wm = sandybridge_update_wm; 8799 dev_priv->display.update_wm = sandybridge_update_wm;
8800 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
8580 } else { 8801 } else {
8581 DRM_DEBUG_KMS("Failed to read display plane latency. " 8802 DRM_DEBUG_KMS("Failed to read display plane latency. "
8582 "Disable CxSR\n"); 8803 "Disable CxSR\n");
@@ -8590,6 +8811,7 @@ static void intel_init_display(struct drm_device *dev)
8590 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 8811 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8591 if (SNB_READ_WM0_LATENCY()) { 8812 if (SNB_READ_WM0_LATENCY()) {
8592 dev_priv->display.update_wm = sandybridge_update_wm; 8813 dev_priv->display.update_wm = sandybridge_update_wm;
8814 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
8593 } else { 8815 } else {
8594 DRM_DEBUG_KMS("Failed to read display plane latency. " 8816 DRM_DEBUG_KMS("Failed to read display plane latency. "
8595 "Disable CxSR\n"); 8817 "Disable CxSR\n");
@@ -8773,7 +8995,7 @@ static void i915_disable_vga(struct drm_device *dev)
8773void intel_modeset_init(struct drm_device *dev) 8995void intel_modeset_init(struct drm_device *dev)
8774{ 8996{
8775 struct drm_i915_private *dev_priv = dev->dev_private; 8997 struct drm_i915_private *dev_priv = dev->dev_private;
8776 int i; 8998 int i, ret;
8777 8999
8778 drm_mode_config_init(dev); 9000 drm_mode_config_init(dev);
8779 9001
@@ -8803,6 +9025,12 @@ void intel_modeset_init(struct drm_device *dev)
8803 9025
8804 for (i = 0; i < dev_priv->num_pipe; i++) { 9026 for (i = 0; i < dev_priv->num_pipe; i++) {
8805 intel_crtc_init(dev, i); 9027 intel_crtc_init(dev, i);
9028 if (HAS_PCH_SPLIT(dev)) {
9029 ret = intel_plane_init(dev, i);
9030 if (ret)
9031 DRM_ERROR("plane %d init failed: %d\n",
9032 i, ret);
9033 }
8806 } 9034 }
8807 9035
8808 /* Just disable it once at startup */ 9036 /* Just disable it once at startup */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 92b041b66e4..db3b461ad41 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1926,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1926 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1926 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1927 } 1927 }
1928 1928
1929 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
1929 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1930 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1930 POSTING_READ(intel_dp->output_reg); 1931 POSTING_READ(intel_dp->output_reg);
1931 msleep(intel_dp->panel_power_down_delay); 1932 msleep(intel_dp->panel_power_down_delay);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a1b4343814e..1348705faf6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include "i915_drm.h"
29#include "i915_drv.h" 30#include "i915_drv.h"
30#include "drm_crtc.h" 31#include "drm_crtc.h"
31#include "drm_crtc_helper.h" 32#include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
39 ret__ = -ETIMEDOUT; \ 40 ret__ = -ETIMEDOUT; \
40 break; \ 41 break; \
41 } \ 42 } \
42 if (W && !(in_atomic() || in_dbg_master())) msleep(W); \ 43 if (W && drm_can_sleep()) msleep(W); \
43 } \ 44 } \
44 ret__; \ 45 ret__; \
45}) 46})
@@ -47,13 +48,6 @@
47#define wait_for(COND, MS) _wait_for(COND, MS, 1) 48#define wait_for(COND, MS) _wait_for(COND, MS, 1)
48#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 49#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
49 50
50#define MSLEEP(x) do { \
51 if (in_dbg_master()) \
52 mdelay(x); \
53 else \
54 msleep(x); \
55} while (0)
56
57#define KHz(x) (1000*x) 51#define KHz(x) (1000*x)
58#define MHz(x) KHz(1000*x) 52#define MHz(x) KHz(1000*x)
59 53
@@ -177,10 +171,32 @@ struct intel_crtc {
177 bool use_pll_a; 171 bool use_pll_a;
178}; 172};
179 173
174struct intel_plane {
175 struct drm_plane base;
176 enum pipe pipe;
177 struct drm_i915_gem_object *obj;
178 bool primary_disabled;
179 int max_downscale;
180 u32 lut_r[1024], lut_g[1024], lut_b[1024];
181 void (*update_plane)(struct drm_plane *plane,
182 struct drm_framebuffer *fb,
183 struct drm_i915_gem_object *obj,
184 int crtc_x, int crtc_y,
185 unsigned int crtc_w, unsigned int crtc_h,
186 uint32_t x, uint32_t y,
187 uint32_t src_w, uint32_t src_h);
188 void (*disable_plane)(struct drm_plane *plane);
189 int (*update_colorkey)(struct drm_plane *plane,
190 struct drm_intel_sprite_colorkey *key);
191 void (*get_colorkey)(struct drm_plane *plane,
192 struct drm_intel_sprite_colorkey *key);
193};
194
180#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 195#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
181#define to_intel_connector(x) container_of(x, struct intel_connector, base) 196#define to_intel_connector(x) container_of(x, struct intel_connector, base)
182#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 197#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
183#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 198#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
199#define to_intel_plane(x) container_of(x, struct intel_plane, base)
184 200
185#define DIP_HEADER_SIZE 5 201#define DIP_HEADER_SIZE 5
186 202
@@ -290,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
290extern bool intel_dpd_is_edp(struct drm_device *dev); 306extern bool intel_dpd_is_edp(struct drm_device *dev);
291extern void intel_edp_link_config(struct intel_encoder *, int *, int *); 307extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
292extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); 308extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
309extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
293 310
294/* intel_panel.c */ 311/* intel_panel.c */
295extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 312extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -360,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
360 377
361extern int intel_framebuffer_init(struct drm_device *dev, 378extern int intel_framebuffer_init(struct drm_device *dev,
362 struct intel_framebuffer *ifb, 379 struct intel_framebuffer *ifb,
363 struct drm_mode_fb_cmd *mode_cmd, 380 struct drm_mode_fb_cmd2 *mode_cmd,
364 struct drm_i915_gem_object *obj); 381 struct drm_i915_gem_object *obj);
365extern int intel_fbdev_init(struct drm_device *dev); 382extern int intel_fbdev_init(struct drm_device *dev);
366extern void intel_fbdev_fini(struct drm_device *dev); 383extern void intel_fbdev_fini(struct drm_device *dev);
@@ -380,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
380extern void intel_fb_output_poll_changed(struct drm_device *dev); 397extern void intel_fb_output_poll_changed(struct drm_device *dev);
381extern void intel_fb_restore_mode(struct drm_device *dev); 398extern void intel_fb_restore_mode(struct drm_device *dev);
382 399
400extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
401 bool state);
402#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
403#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
404
383extern void intel_init_clock_gating(struct drm_device *dev); 405extern void intel_init_clock_gating(struct drm_device *dev);
384extern void intel_write_eld(struct drm_encoder *encoder, 406extern void intel_write_eld(struct drm_encoder *encoder,
385 struct drm_display_mode *mode); 407 struct drm_display_mode *mode);
386extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); 408extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
387 409
410/* For use by IVB LP watermark workaround in intel_sprite.c */
411extern void sandybridge_update_wm(struct drm_device *dev);
412extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
413 uint32_t sprite_width,
414 int pixel_size);
415
416extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
417 struct drm_file *file_priv);
418extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
419 struct drm_file *file_priv);
420
388#endif /* __INTEL_DRV_H__ */ 421#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ec49bae7338..571375a3eef 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
65 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct fb_info *info; 66 struct fb_info *info;
67 struct drm_framebuffer *fb; 67 struct drm_framebuffer *fb;
68 struct drm_mode_fb_cmd mode_cmd; 68 struct drm_mode_fb_cmd2 mode_cmd;
69 struct drm_i915_gem_object *obj; 69 struct drm_i915_gem_object *obj;
70 struct device *device = &dev->pdev->dev; 70 struct device *device = &dev->pdev->dev;
71 int size, ret; 71 int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
77 mode_cmd.width = sizes->surface_width; 77 mode_cmd.width = sizes->surface_width;
78 mode_cmd.height = sizes->surface_height; 78 mode_cmd.height = sizes->surface_height;
79 79
80 mode_cmd.bpp = sizes->surface_bpp; 80 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
81 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); 81 8), 64);
82 mode_cmd.depth = sizes->surface_depth; 82 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
83 sizes->surface_depth);
83 84
84 size = mode_cmd.pitch * mode_cmd.height; 85 size = mode_cmd.pitches[0] * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 86 size = ALIGN(size, PAGE_SIZE);
86 obj = i915_gem_alloc_object(dev, size); 87 obj = i915_gem_alloc_object(dev, size);
87 if (!obj) { 88 if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
148 149
149// memset(info->screen_base, 0, size); 150// memset(info->screen_base, 0, size);
150 151
151 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 152 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
152 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 153 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
153 154
154 info->pixmap.size = 64*1024; 155 info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
269{ 270{
270 int ret; 271 int ret;
271 drm_i915_private_t *dev_priv = dev->dev_private; 272 drm_i915_private_t *dev_priv = dev->dev_private;
273 struct drm_mode_config *config = &dev->mode_config;
274 struct drm_plane *plane;
272 275
273 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 276 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
274 if (ret) 277 if (ret)
275 DRM_DEBUG("failed to restore crtc mode\n"); 278 DRM_DEBUG("failed to restore crtc mode\n");
279
280 /* Be sure to shut off any planes that may be active */
281 list_for_each_entry(plane, &config->plane_list, head)
282 plane->funcs->disable_plane(plane);
276} 283}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d4f5a0b2120..64541f7ef90 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
269 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_private *dev_priv = dev->dev_private;
270 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 270 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
271 u32 temp; 271 u32 temp;
272 u32 enable_bits = SDVO_ENABLE;
273
274 if (intel_hdmi->has_audio)
275 enable_bits |= SDVO_AUDIO_ENABLE;
272 276
273 temp = I915_READ(intel_hdmi->sdvox_reg); 277 temp = I915_READ(intel_hdmi->sdvox_reg);
274 278
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
281 } 285 }
282 286
283 if (mode != DRM_MODE_DPMS_ON) { 287 if (mode != DRM_MODE_DPMS_ON) {
284 temp &= ~SDVO_ENABLE; 288 temp &= ~enable_bits;
285 } else { 289 } else {
286 temp |= SDVO_ENABLE; 290 temp |= enable_bits;
287 } 291 }
288 292
289 I915_WRITE(intel_hdmi->sdvox_reg, temp); 293 I915_WRITE(intel_hdmi->sdvox_reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca70e2f1044..77e729d4e4f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
414 return ret; 414 return ret;
415 } 415 }
416 416
417 if (INTEL_INFO(dev)->gen >= 6) {
418 I915_WRITE(INSTPM,
419 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
420 }
421
417 return ret; 422 return ret;
418} 423}
419 424
@@ -787,6 +792,17 @@ ring_add_request(struct intel_ring_buffer *ring,
787} 792}
788 793
789static bool 794static bool
795gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
796{
797 /* The BLT ring on IVB appears to have broken synchronization
798 * between the seqno write and the interrupt, so that the
799 * interrupt appears first. Returning false here makes
800 * i915_wait_request() do a polling loop, instead.
801 */
802 return false;
803}
804
805static bool
790gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 806gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
791{ 807{
792 struct drm_device *dev = ring->dev; 808 struct drm_device *dev = ring->dev;
@@ -1119,7 +1135,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1119 } 1135 }
1120 1136
1121 trace_i915_ring_wait_begin(ring); 1137 trace_i915_ring_wait_begin(ring);
1122 end = jiffies + 3 * HZ; 1138 if (drm_core_check_feature(dev, DRIVER_GEM))
1139 /* With GEM the hangcheck timer should kick us out of the loop,
1140 * leaving it early runs the risk of corrupting GEM state (due
1141 * to running on almost untested codepaths). But on resume
1142 * timers don't work yet, so prevent a complete hang in that
1143 * case by choosing an insanely large timeout. */
1144 end = jiffies + 60 * HZ;
1145 else
1146 end = jiffies + 3 * HZ;
1147
1123 do { 1148 do {
1124 ring->head = I915_READ_HEAD(ring); 1149 ring->head = I915_READ_HEAD(ring);
1125 ring->space = ring_space(ring); 1150 ring->space = ring_space(ring);
@@ -1552,5 +1577,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1552 1577
1553 *ring = gen6_blt_ring; 1578 *ring = gen6_blt_ring;
1554 1579
1580 if (IS_GEN7(dev))
1581 ring->irq_get = gen7_blt_ring_get_irq;
1582
1555 return intel_init_ring_buffer(dev, ring); 1583 return intel_init_ring_buffer(dev, ring);
1556} 1584}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 00000000000..d13989fda50
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,668 @@
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Jesse Barnes <jbarnes@virtuousgeek.org>
25 *
26 * New plane/sprite handling.
27 *
28 * The older chips had a separate interface for programming plane related
29 * registers; newer ones are much simpler and we can use the new DRM plane
30 * support.
31 */
32#include "drmP.h"
33#include "drm_crtc.h"
34#include "drm_fourcc.h"
35#include "intel_drv.h"
36#include "i915_drm.h"
37#include "i915_drv.h"
38
39static void
40ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
41 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
42 unsigned int crtc_w, unsigned int crtc_h,
43 uint32_t x, uint32_t y,
44 uint32_t src_w, uint32_t src_h)
45{
46 struct drm_device *dev = plane->dev;
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct intel_plane *intel_plane = to_intel_plane(plane);
49 int pipe = intel_plane->pipe;
50 u32 sprctl, sprscale = 0;
51 int pixel_size;
52
53 sprctl = I915_READ(SPRCTL(pipe));
54
55 /* Mask out pixel format bits in case we change it */
56 sprctl &= ~SPRITE_PIXFORMAT_MASK;
57 sprctl &= ~SPRITE_RGB_ORDER_RGBX;
58 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
59
60 switch (fb->pixel_format) {
61 case DRM_FORMAT_XBGR8888:
62 sprctl |= SPRITE_FORMAT_RGBX888;
63 pixel_size = 4;
64 break;
65 case DRM_FORMAT_XRGB8888:
66 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
67 pixel_size = 4;
68 break;
69 case DRM_FORMAT_YUYV:
70 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
71 pixel_size = 2;
72 break;
73 case DRM_FORMAT_YVYU:
74 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
75 pixel_size = 2;
76 break;
77 case DRM_FORMAT_UYVY:
78 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
79 pixel_size = 2;
80 break;
81 case DRM_FORMAT_VYUY:
82 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
83 pixel_size = 2;
84 break;
85 default:
86 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
87 sprctl |= DVS_FORMAT_RGBX888;
88 pixel_size = 4;
89 break;
90 }
91
92 if (obj->tiling_mode != I915_TILING_NONE)
93 sprctl |= SPRITE_TILED;
94
95 /* must disable */
96 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
97 sprctl |= SPRITE_ENABLE;
98 sprctl |= SPRITE_DEST_KEY;
99
100 /* Sizes are 0 based */
101 src_w--;
102 src_h--;
103 crtc_w--;
104 crtc_h--;
105
106 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
107
108 /*
109 * IVB workaround: must disable low power watermarks for at least
110 * one frame before enabling scaling. LP watermarks can be re-enabled
111 * when scaling is disabled.
112 */
113 if (crtc_w != src_w || crtc_h != src_h) {
114 dev_priv->sprite_scaling_enabled = true;
115 sandybridge_update_wm(dev);
116 intel_wait_for_vblank(dev, pipe);
117 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
118 } else {
119 dev_priv->sprite_scaling_enabled = false;
120 /* potentially re-enable LP watermarks */
121 sandybridge_update_wm(dev);
122 }
123
124 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
125 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
126 if (obj->tiling_mode != I915_TILING_NONE) {
127 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
128 } else {
129 unsigned long offset;
130
131 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
132 I915_WRITE(SPRLINOFF(pipe), offset);
133 }
134 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
135 I915_WRITE(SPRSCALE(pipe), sprscale);
136 I915_WRITE(SPRCTL(pipe), sprctl);
137 I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
138 POSTING_READ(SPRSURF(pipe));
139}
140
141static void
142ivb_disable_plane(struct drm_plane *plane)
143{
144 struct drm_device *dev = plane->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct intel_plane *intel_plane = to_intel_plane(plane);
147 int pipe = intel_plane->pipe;
148
149 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
150 /* Can't leave the scaler enabled... */
151 I915_WRITE(SPRSCALE(pipe), 0);
152 /* Activate double buffered register update */
153 I915_WRITE(SPRSURF(pipe), 0);
154 POSTING_READ(SPRSURF(pipe));
155}
156
157static int
158ivb_update_colorkey(struct drm_plane *plane,
159 struct drm_intel_sprite_colorkey *key)
160{
161 struct drm_device *dev = plane->dev;
162 struct drm_i915_private *dev_priv = dev->dev_private;
163 struct intel_plane *intel_plane;
164 u32 sprctl;
165 int ret = 0;
166
167 intel_plane = to_intel_plane(plane);
168
169 I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
170 I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
171 I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
172
173 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
174 sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
175 if (key->flags & I915_SET_COLORKEY_DESTINATION)
176 sprctl |= SPRITE_DEST_KEY;
177 else if (key->flags & I915_SET_COLORKEY_SOURCE)
178 sprctl |= SPRITE_SOURCE_KEY;
179 I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
180
181 POSTING_READ(SPRKEYMSK(intel_plane->pipe));
182
183 return ret;
184}
185
186static void
187ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
188{
189 struct drm_device *dev = plane->dev;
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 struct intel_plane *intel_plane;
192 u32 sprctl;
193
194 intel_plane = to_intel_plane(plane);
195
196 key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
197 key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
198 key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
199 key->flags = 0;
200
201 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
202
203 if (sprctl & SPRITE_DEST_KEY)
204 key->flags = I915_SET_COLORKEY_DESTINATION;
205 else if (sprctl & SPRITE_SOURCE_KEY)
206 key->flags = I915_SET_COLORKEY_SOURCE;
207 else
208 key->flags = I915_SET_COLORKEY_NONE;
209}
210
211static void
212snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
213 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
214 unsigned int crtc_w, unsigned int crtc_h,
215 uint32_t x, uint32_t y,
216 uint32_t src_w, uint32_t src_h)
217{
218 struct drm_device *dev = plane->dev;
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 struct intel_plane *intel_plane = to_intel_plane(plane);
221 int pipe = intel_plane->pipe, pixel_size;
222 u32 dvscntr, dvsscale = 0;
223
224 dvscntr = I915_READ(DVSCNTR(pipe));
225
226 /* Mask out pixel format bits in case we change it */
227 dvscntr &= ~DVS_PIXFORMAT_MASK;
228 dvscntr &= ~DVS_RGB_ORDER_RGBX;
229 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
230
231 switch (fb->pixel_format) {
232 case DRM_FORMAT_XBGR8888:
233 dvscntr |= DVS_FORMAT_RGBX888;
234 pixel_size = 4;
235 break;
236 case DRM_FORMAT_XRGB8888:
237 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
238 pixel_size = 4;
239 break;
240 case DRM_FORMAT_YUYV:
241 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
242 pixel_size = 2;
243 break;
244 case DRM_FORMAT_YVYU:
245 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
246 pixel_size = 2;
247 break;
248 case DRM_FORMAT_UYVY:
249 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
250 pixel_size = 2;
251 break;
252 case DRM_FORMAT_VYUY:
253 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
254 pixel_size = 2;
255 break;
256 default:
257 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
258 dvscntr |= DVS_FORMAT_RGBX888;
259 pixel_size = 4;
260 break;
261 }
262
263 if (obj->tiling_mode != I915_TILING_NONE)
264 dvscntr |= DVS_TILED;
265
266 /* must disable */
267 dvscntr |= DVS_TRICKLE_FEED_DISABLE;
268 dvscntr |= DVS_ENABLE;
269
270 /* Sizes are 0 based */
271 src_w--;
272 src_h--;
273 crtc_w--;
274 crtc_h--;
275
276 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
277
278 if (crtc_w != src_w || crtc_h != src_h)
279 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
280
281 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
282 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
283 if (obj->tiling_mode != I915_TILING_NONE) {
284 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
285 } else {
286 unsigned long offset;
287
288 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
289 I915_WRITE(DVSLINOFF(pipe), offset);
290 }
291 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
292 I915_WRITE(DVSSCALE(pipe), dvsscale);
293 I915_WRITE(DVSCNTR(pipe), dvscntr);
294 I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
295 POSTING_READ(DVSSURF(pipe));
296}
297
298static void
299snb_disable_plane(struct drm_plane *plane)
300{
301 struct drm_device *dev = plane->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_plane *intel_plane = to_intel_plane(plane);
304 int pipe = intel_plane->pipe;
305
306 I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
307 /* Disable the scaler */
308 I915_WRITE(DVSSCALE(pipe), 0);
309 /* Flush double buffered register updates */
310 I915_WRITE(DVSSURF(pipe), 0);
311 POSTING_READ(DVSSURF(pipe));
312}
313
314static void
315intel_enable_primary(struct drm_crtc *crtc)
316{
317 struct drm_device *dev = crtc->dev;
318 struct drm_i915_private *dev_priv = dev->dev_private;
319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
320 int reg = DSPCNTR(intel_crtc->plane);
321
322 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
323}
324
325static void
326intel_disable_primary(struct drm_crtc *crtc)
327{
328 struct drm_device *dev = crtc->dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
331 int reg = DSPCNTR(intel_crtc->plane);
332
333 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
334}
335
336static int
337snb_update_colorkey(struct drm_plane *plane,
338 struct drm_intel_sprite_colorkey *key)
339{
340 struct drm_device *dev = plane->dev;
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 struct intel_plane *intel_plane;
343 u32 dvscntr;
344 int ret = 0;
345
346 intel_plane = to_intel_plane(plane);
347
348 I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
349 I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
350 I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
351
352 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
353 dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
354 if (key->flags & I915_SET_COLORKEY_DESTINATION)
355 dvscntr |= DVS_DEST_KEY;
356 else if (key->flags & I915_SET_COLORKEY_SOURCE)
357 dvscntr |= DVS_SOURCE_KEY;
358 I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
359
360 POSTING_READ(DVSKEYMSK(intel_plane->pipe));
361
362 return ret;
363}
364
365static void
366snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
367{
368 struct drm_device *dev = plane->dev;
369 struct drm_i915_private *dev_priv = dev->dev_private;
370 struct intel_plane *intel_plane;
371 u32 dvscntr;
372
373 intel_plane = to_intel_plane(plane);
374
375 key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
376 key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
377 key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
378 key->flags = 0;
379
380 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
381
382 if (dvscntr & DVS_DEST_KEY)
383 key->flags = I915_SET_COLORKEY_DESTINATION;
384 else if (dvscntr & DVS_SOURCE_KEY)
385 key->flags = I915_SET_COLORKEY_SOURCE;
386 else
387 key->flags = I915_SET_COLORKEY_NONE;
388}
389
390static int
391intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
392 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
393 unsigned int crtc_w, unsigned int crtc_h,
394 uint32_t src_x, uint32_t src_y,
395 uint32_t src_w, uint32_t src_h)
396{
397 struct drm_device *dev = plane->dev;
398 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
400 struct intel_plane *intel_plane = to_intel_plane(plane);
401 struct intel_framebuffer *intel_fb;
402 struct drm_i915_gem_object *obj, *old_obj;
403 int pipe = intel_plane->pipe;
404 int ret = 0;
405 int x = src_x >> 16, y = src_y >> 16;
406 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
407 bool disable_primary = false;
408
409 intel_fb = to_intel_framebuffer(fb);
410 obj = intel_fb->obj;
411
412 old_obj = intel_plane->obj;
413
414 /* Pipe must be running... */
415 if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
416 return -EINVAL;
417
418 if (crtc_x >= primary_w || crtc_y >= primary_h)
419 return -EINVAL;
420
421 /* Don't modify another pipe's plane */
422 if (intel_plane->pipe != intel_crtc->pipe)
423 return -EINVAL;
424
425 /*
426 * Clamp the width & height into the visible area. Note we don't
427 * try to scale the source if part of the visible region is offscreen.
428 * The caller must handle that by adjusting source offset and size.
429 */
430 if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
431 crtc_w += crtc_x;
432 crtc_x = 0;
433 }
434 if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
435 goto out;
436 if ((crtc_x + crtc_w) > primary_w)
437 crtc_w = primary_w - crtc_x;
438
439 if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
440 crtc_h += crtc_y;
441 crtc_y = 0;
442 }
443 if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
444 goto out;
445 if (crtc_y + crtc_h > primary_h)
446 crtc_h = primary_h - crtc_y;
447
448 if (!crtc_w || !crtc_h) /* Again, nothing to display */
449 goto out;
450
451 /*
452 * We can take a larger source and scale it down, but
453 * only so much... 16x is the max on SNB.
454 */
455 if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
456 return -EINVAL;
457
458 /*
459 * If the sprite is completely covering the primary plane,
460 * we can disable the primary and save power.
461 */
462 if ((crtc_x == 0) && (crtc_y == 0) &&
463 (crtc_w == primary_w) && (crtc_h == primary_h))
464 disable_primary = true;
465
466 mutex_lock(&dev->struct_mutex);
467
468 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
469 if (ret) {
470 DRM_ERROR("failed to pin object\n");
471 goto out_unlock;
472 }
473
474 intel_plane->obj = obj;
475
476 /*
477 * Be sure to re-enable the primary before the sprite is no longer
478 * covering it fully.
479 */
480 if (!disable_primary && intel_plane->primary_disabled) {
481 intel_enable_primary(crtc);
482 intel_plane->primary_disabled = false;
483 }
484
485 intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
486 crtc_w, crtc_h, x, y, src_w, src_h);
487
488 if (disable_primary) {
489 intel_disable_primary(crtc);
490 intel_plane->primary_disabled = true;
491 }
492
493 /* Unpin old obj after new one is active to avoid ugliness */
494 if (old_obj) {
495 /*
496 * It's fairly common to simply update the position of
497 * an existing object. In that case, we don't need to
498 * wait for vblank to avoid ugliness, we only need to
499 * do the pin & ref bookkeeping.
500 */
501 if (old_obj != obj) {
502 mutex_unlock(&dev->struct_mutex);
503 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
504 mutex_lock(&dev->struct_mutex);
505 }
506 i915_gem_object_unpin(old_obj);
507 }
508
509out_unlock:
510 mutex_unlock(&dev->struct_mutex);
511out:
512 return ret;
513}
514
515static int
516intel_disable_plane(struct drm_plane *plane)
517{
518 struct drm_device *dev = plane->dev;
519 struct intel_plane *intel_plane = to_intel_plane(plane);
520 int ret = 0;
521
522 if (intel_plane->primary_disabled) {
523 intel_enable_primary(plane->crtc);
524 intel_plane->primary_disabled = false;
525 }
526
527 intel_plane->disable_plane(plane);
528
529 if (!intel_plane->obj)
530 goto out;
531
532 mutex_lock(&dev->struct_mutex);
533 i915_gem_object_unpin(intel_plane->obj);
534 intel_plane->obj = NULL;
535 mutex_unlock(&dev->struct_mutex);
536out:
537
538 return ret;
539}
540
541static void intel_destroy_plane(struct drm_plane *plane)
542{
543 struct intel_plane *intel_plane = to_intel_plane(plane);
544 intel_disable_plane(plane);
545 drm_plane_cleanup(plane);
546 kfree(intel_plane);
547}
548
549int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
550 struct drm_file *file_priv)
551{
552 struct drm_intel_sprite_colorkey *set = data;
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_mode_object *obj;
555 struct drm_plane *plane;
556 struct intel_plane *intel_plane;
557 int ret = 0;
558
559 if (!dev_priv)
560 return -EINVAL;
561
562 /* Make sure we don't try to enable both src & dest simultaneously */
563 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
564 return -EINVAL;
565
566 mutex_lock(&dev->mode_config.mutex);
567
568 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
569 if (!obj) {
570 ret = -EINVAL;
571 goto out_unlock;
572 }
573
574 plane = obj_to_plane(obj);
575 intel_plane = to_intel_plane(plane);
576 ret = intel_plane->update_colorkey(plane, set);
577
578out_unlock:
579 mutex_unlock(&dev->mode_config.mutex);
580 return ret;
581}
582
583int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
584 struct drm_file *file_priv)
585{
586 struct drm_intel_sprite_colorkey *get = data;
587 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_mode_object *obj;
589 struct drm_plane *plane;
590 struct intel_plane *intel_plane;
591 int ret = 0;
592
593 if (!dev_priv)
594 return -EINVAL;
595
596 mutex_lock(&dev->mode_config.mutex);
597
598 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
599 if (!obj) {
600 ret = -EINVAL;
601 goto out_unlock;
602 }
603
604 plane = obj_to_plane(obj);
605 intel_plane = to_intel_plane(plane);
606 intel_plane->get_colorkey(plane, get);
607
608out_unlock:
609 mutex_unlock(&dev->mode_config.mutex);
610 return ret;
611}
612
613static const struct drm_plane_funcs intel_plane_funcs = {
614 .update_plane = intel_update_plane,
615 .disable_plane = intel_disable_plane,
616 .destroy = intel_destroy_plane,
617};
618
619static uint32_t snb_plane_formats[] = {
620 DRM_FORMAT_XBGR8888,
621 DRM_FORMAT_XRGB8888,
622 DRM_FORMAT_YUYV,
623 DRM_FORMAT_YVYU,
624 DRM_FORMAT_UYVY,
625 DRM_FORMAT_VYUY,
626};
627
628int
629intel_plane_init(struct drm_device *dev, enum pipe pipe)
630{
631 struct intel_plane *intel_plane;
632 unsigned long possible_crtcs;
633 int ret;
634
635 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
636 DRM_ERROR("new plane code only for SNB+\n");
637 return -ENODEV;
638 }
639
640 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
641 if (!intel_plane)
642 return -ENOMEM;
643
644 if (IS_GEN6(dev)) {
645 intel_plane->max_downscale = 16;
646 intel_plane->update_plane = snb_update_plane;
647 intel_plane->disable_plane = snb_disable_plane;
648 intel_plane->update_colorkey = snb_update_colorkey;
649 intel_plane->get_colorkey = snb_get_colorkey;
650 } else if (IS_GEN7(dev)) {
651 intel_plane->max_downscale = 2;
652 intel_plane->update_plane = ivb_update_plane;
653 intel_plane->disable_plane = ivb_disable_plane;
654 intel_plane->update_colorkey = ivb_update_colorkey;
655 intel_plane->get_colorkey = ivb_get_colorkey;
656 }
657
658 intel_plane->pipe = pipe;
659 possible_crtcs = (1 << pipe);
660 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
661 &intel_plane_funcs, snb_plane_formats,
662 ARRAY_SIZE(snb_plane_formats), false);
663 if (ret)
664 kfree(intel_plane);
665
666 return ret;
667}
668
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 33daa29eea6..f9a925d5881 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
44 mga_PCI_IDS 44 mga_PCI_IDS
45}; 45};
46 46
47static const struct file_operations mga_driver_fops = {
48 .owner = THIS_MODULE,
49 .open = drm_open,
50 .release = drm_release,
51 .unlocked_ioctl = drm_ioctl,
52 .mmap = drm_mmap,
53 .poll = drm_poll,
54 .fasync = drm_fasync,
55#ifdef CONFIG_COMPAT
56 .compat_ioctl = mga_compat_ioctl,
57#endif
58 .llseek = noop_llseek,
59};
60
47static struct drm_driver driver = { 61static struct drm_driver driver = {
48 .driver_features = 62 .driver_features =
49 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 63 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
64 .reclaim_buffers = drm_core_reclaim_buffers, 78 .reclaim_buffers = drm_core_reclaim_buffers,
65 .ioctls = mga_ioctls, 79 .ioctls = mga_ioctls,
66 .dma_ioctl = mga_dma_buffers, 80 .dma_ioctl = mga_dma_buffers,
67 .fops = { 81 .fops = &mga_driver_fops,
68 .owner = THIS_MODULE,
69 .open = drm_open,
70 .release = drm_release,
71 .unlocked_ioctl = drm_ioctl,
72 .mmap = drm_mmap,
73 .poll = drm_poll,
74 .fasync = drm_fasync,
75#ifdef CONFIG_COMPAT
76 .compat_ioctl = mga_compat_ioctl,
77#endif
78 .llseek = noop_llseek,
79 },
80
81 .name = DRIVER_NAME, 82 .name = DRIVER_NAME,
82 .desc = DRIVER_DESC, 83 .desc = DRIVER_DESC,
83 .date = DRIVER_DATE, 84 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 35ef5b1e356..9f27e3d9e69 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o nouveau_ramht.o \ 12 nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
14 nouveau_mm.o nouveau_vm.o \ 14 nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
15 nv04_timer.o \ 15 nv04_timer.o \
16 nv04_mc.o nv40_mc.o nv50_mc.o \ 16 nv04_mc.o nv40_mc.o nv50_mc.o \
17 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ 17 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
19 nv04_graph.o nv10_graph.o nv20_graph.o \ 19 nv04_graph.o nv10_graph.o nv20_graph.o \
20 nv40_graph.o nv50_graph.o nvc0_graph.o \ 20 nv40_graph.o nv50_graph.o nvc0_graph.o \
21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
22 nv84_crypt.o \ 22 nv84_crypt.o nv98_crypt.o \
23 nva3_copy.o nvc0_copy.o \ 23 nva3_copy.o nvc0_copy.o \
24 nv31_mpeg.o nv50_mpeg.o \ 24 nv31_mpeg.o nv50_mpeg.o \
25 nv84_bsp.o \
26 nv84_vp.o \
27 nv98_ppp.o \
25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 28 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
26 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 29 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
27 nv04_crtc.o nv04_display.o nv04_cursor.o \ 30 nv04_crtc.o nv04_display.o nv04_cursor.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5fc201b49d3..e5cbead85e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,6 +27,7 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_hw.h" 28#include "nouveau_hw.h"
29#include "nouveau_encoder.h" 29#include "nouveau_encoder.h"
30#include "nouveau_gpio.h"
30 31
31#include <linux/io-mapping.h> 32#include <linux/io-mapping.h>
32 33
@@ -34,9 +35,6 @@
34#define NV_CIO_CRE_44_HEADA 0x0 35#define NV_CIO_CRE_44_HEADA 0x0
35#define NV_CIO_CRE_44_HEADB 0x3 36#define NV_CIO_CRE_44_HEADB 0x3
36#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */ 37#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
37#define LEGACY_I2C_CRT 0x80
38#define LEGACY_I2C_PANEL 0x81
39#define LEGACY_I2C_TV 0x82
40 38
41#define EDID1_LEN 128 39#define EDID1_LEN 128
42 40
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
723 return dcb_entry; 721 return dcb_entry;
724} 722}
725 723
726static int
727read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
728{
729 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
730 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
731 int recordoffset = 0, rdofs = 1, wrofs = 0;
732 uint8_t port_type = 0;
733
734 if (!i2ctable)
735 return -EINVAL;
736
737 if (dcb_version >= 0x30) {
738 if (i2ctable[0] != dcb_version) /* necessary? */
739 NV_WARN(dev,
740 "DCB I2C table version mismatch (%02X vs %02X)\n",
741 i2ctable[0], dcb_version);
742 dcb_i2c_ver = i2ctable[0];
743 headerlen = i2ctable[1];
744 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
745 i2c_entries = i2ctable[2];
746 else
747 NV_WARN(dev,
748 "DCB I2C table has more entries than indexable "
749 "(%d entries, max %d)\n", i2ctable[2],
750 DCB_MAX_NUM_I2C_ENTRIES);
751 entry_len = i2ctable[3];
752 /* [4] is i2c_default_indices, read in parse_dcb_table() */
753 }
754 /*
755 * It's your own fault if you call this function on a DCB 1.1 BIOS --
756 * the test below is for DCB 1.2
757 */
758 if (dcb_version < 0x14) {
759 recordoffset = 2;
760 rdofs = 0;
761 wrofs = 1;
762 }
763
764 if (index == 0xf)
765 return 0;
766 if (index >= i2c_entries) {
767 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
768 index, i2ctable[2]);
769 return -ENOENT;
770 }
771 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
772 NV_ERROR(dev, "DCB I2C entry invalid\n");
773 return -EINVAL;
774 }
775
776 if (dcb_i2c_ver >= 0x30) {
777 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
778
779 /*
780 * Fixup for chips using same address offset for read and
781 * write.
782 */
783 if (port_type == 4) /* seen on C51 */
784 rdofs = wrofs = 1;
785 if (port_type >= 5) /* G80+ */
786 rdofs = wrofs = 0;
787 }
788
789 if (dcb_i2c_ver >= 0x40) {
790 if (port_type != 5 && port_type != 6)
791 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
792
793 i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
794 }
795
796 i2c->port_type = port_type;
797 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
798 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
799
800 return 0;
801}
802
803static struct nouveau_i2c_chan * 724static struct nouveau_i2c_chan *
804init_i2c_device_find(struct drm_device *dev, int i2c_index) 725init_i2c_device_find(struct drm_device *dev, int i2c_index)
805{ 726{
806 struct drm_nouveau_private *dev_priv = dev->dev_private;
807 struct dcb_table *dcb = &dev_priv->vbios.dcb;
808
809 if (i2c_index == 0xff) { 727 if (i2c_index == 0xff) {
728 struct drm_nouveau_private *dev_priv = dev->dev_private;
729 struct dcb_table *dcb = &dev_priv->vbios.dcb;
810 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ 730 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
811 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; 731 int idx = dcb_entry_idx_from_crtchead(dev);
812 int default_indices = dcb->i2c_default_indices;
813 732
733 i2c_index = NV_I2C_DEFAULT(0);
814 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default) 734 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
815 shift = 4; 735 i2c_index = NV_I2C_DEFAULT(1);
816
817 i2c_index = (default_indices >> shift) & 0xf;
818 } 736 }
819 if (i2c_index == 0x80) /* g80+ */
820 i2c_index = dcb->i2c_default_indices & 0xf;
821 else
822 if (i2c_index == 0x81)
823 i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
824
825 if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
826 NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
827 return NULL;
828 }
829
830 /* Make sure i2c table entry has been parsed, it may not
831 * have been if this is a bus not referenced by a DCB encoder
832 */
833 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
834 i2c_index, &dcb->i2c[i2c_index]);
835 737
836 return nouveau_i2c_find(dev, i2c_index); 738 return nouveau_i2c_find(dev, i2c_index);
837} 739}
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1199 1101
1200 switch (cond) { 1102 switch (cond) {
1201 case 0: 1103 case 0:
1202 { 1104 entry = dcb_conn(dev, dcb->connector);
1203 struct dcb_connector_table_entry *ent = 1105 if (!entry || entry[0] != DCB_CONNECTOR_eDP)
1204 &bios->dcb.connector.entry[dcb->connector];
1205
1206 if (ent->type != DCB_CONNECTOR_eDP)
1207 iexec->execute = false; 1106 iexec->execute = false;
1208 }
1209 break; 1107 break;
1210 case 1: 1108 case 1:
1211 case 2: 1109 case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3227 return 1; 3125 return 1;
3228} 3126}
3229 3127
3230static void
3231init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
3232{
3233 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
3234 u32 r, s, v;
3235
3236 /* Not a clue, needs de-magicing */
3237 r = nv50_gpio_ctl[gpio->line >> 4];
3238 s = (gpio->line & 0x0f);
3239 v = bios_rd32(bios, r) & ~(0x00010001 << s);
3240 switch ((gpio->entry & 0x06000000) >> 25) {
3241 case 1:
3242 v |= (0x00000001 << s);
3243 break;
3244 case 2:
3245 v |= (0x00010000 << s);
3246 break;
3247 default:
3248 break;
3249 }
3250
3251 bios_wr32(bios, r, v);
3252}
3253
3254static void
3255init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
3256{
3257 u32 v, i;
3258
3259 v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
3260 v &= 0xffffff00;
3261 v |= (gpio->entry & 0x00ff0000) >> 16;
3262 bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
3263
3264 i = (gpio->entry & 0x1f000000) >> 24;
3265 if (i) {
3266 v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
3267 v &= 0xffffff00;
3268 v |= gpio->line;
3269 bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
3270 }
3271}
3272
3273static int 3128static int
3274init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 3129init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3275{ 3130{
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3282 * each GPIO according to various values listed in each entry 3137 * each GPIO according to various values listed in each entry
3283 */ 3138 */
3284 3139
3285 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 3140 if (iexec->execute && bios->execute)
3286 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 3141 nouveau_gpio_reset(bios->dev);
3287 int i;
3288
3289 if (dev_priv->card_type < NV_50) {
3290 NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
3291 return 1;
3292 }
3293
3294 if (!iexec->execute)
3295 return 1;
3296
3297 for (i = 0; i < bios->dcb.gpio.entries; i++) {
3298 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
3299
3300 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
3301
3302 BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
3303 offset, gpio->tag, gpio->state_default);
3304
3305 if (!bios->execute)
3306 continue;
3307
3308 pgpio->set(bios->dev, gpio->tag, gpio->state_default);
3309 if (dev_priv->card_type < NV_D0)
3310 init_gpio_unknv50(bios, gpio);
3311 else
3312 init_gpio_unknvd0(bios, gpio);
3313 }
3314 3142
3315 return 1; 3143 return 1;
3316} 3144}
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4407 break; 4235 break;
4408 } 4236 }
4409 4237
4410 /* Dell Latitude D620 reports a too-high value for the dual-link
4411 * transition freq, causing us to program the panel incorrectly.
4412 *
4413 * It doesn't appear the VBIOS actually uses its transition freq
4414 * (90000kHz), instead it uses the "Number of LVDS channels" field
4415 * out of the panel ID structure (http://www.spwg.org/).
4416 *
4417 * For the moment, a quirk will do :)
4418 */
4419 if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
4420 bios->fp.duallink_transition_clk = 80000;
4421
4422 /* set dual_link flag for EDID case */ 4238 /* set dual_link flag for EDID case */
4423 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 4239 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
4424 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); 4240 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4541 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", 4357 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
4542 dcbent->type, dcbent->location, dcbent->or); 4358 dcbent->type, dcbent->location, dcbent->or);
4543 for (i = 0; i < table[3]; i++) { 4359 for (i = 0; i < table[3]; i++) {
4544 otable = ROMPTR(bios, table[table[1] + (i * table[2])]); 4360 otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
4545 if (otable && bios_encoder_match(dcbent, ROM32(otable[0]))) 4361 if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
4546 break; 4362 break;
4547 } 4363 }
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
4719 { PLL_CORE , 0x004028 }, 4535 { PLL_CORE , 0x004028 },
4720 { PLL_SHADER, 0x004020 }, 4536 { PLL_SHADER, 0x004020 },
4721 { PLL_MEMORY, 0x004008 }, 4537 { PLL_MEMORY, 0x004008 },
4722 { PLL_UNK05 , 0x004030 }, 4538 { PLL_VDEC , 0x004030 },
4723 { PLL_UNK41 , 0x00e818 }, 4539 { PLL_UNK41 , 0x00e818 },
4724 { PLL_VPLL0 , 0x614100 }, 4540 { PLL_VPLL0 , 0x614100 },
4725 { PLL_VPLL1 , 0x614900 }, 4541 { PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
5485 struct nvbios *bios = &dev_priv->vbios; 5301 struct nvbios *bios = &dev_priv->vbios;
5486 u8 entries, *entry; 5302 u8 entries, *entry;
5487 5303
5304 if (bios->type != NVBIOS_BIT)
5305 return -ENODEV;
5306
5488 entries = bios->data[bios->offset + 10]; 5307 entries = bios->data[bios->offset + 10];
5489 entry = &bios->data[bios->offset + 12]; 5308 entry = &bios->data[bios->offset + 12];
5490 while (entries--) { 5309 while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
5493 bit->version = entry[1]; 5312 bit->version = entry[1];
5494 bit->length = ROM16(entry[2]); 5313 bit->length = ROM16(entry[2]);
5495 bit->offset = ROM16(entry[4]); 5314 bit->offset = ROM16(entry[4]);
5496 bit->data = ROMPTR(bios, entry[4]); 5315 bit->data = ROMPTR(dev, entry[4]);
5497 return 0; 5316 return 0;
5498 } 5317 }
5499 5318
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5598 uint16_t legacy_scripts_offset, legacy_i2c_offset; 5417 uint16_t legacy_scripts_offset, legacy_i2c_offset;
5599 5418
5600 /* load needed defaults in case we can't parse this info */ 5419 /* load needed defaults in case we can't parse this info */
5601 bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
5602 bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
5603 bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
5604 bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
5605 bios->digital_min_front_porch = 0x4b; 5420 bios->digital_min_front_porch = 0x4b;
5606 bios->fmaxvco = 256000; 5421 bios->fmaxvco = 256000;
5607 bios->fminvco = 128000; 5422 bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5709 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; 5524 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
5710 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; 5525 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
5711 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; 5526 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
5712 if (bios->data[legacy_i2c_offset + 4])
5713 bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
5714 if (bios->data[legacy_i2c_offset + 5])
5715 bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
5716 if (bios->data[legacy_i2c_offset + 6])
5717 bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
5718 if (bios->data[legacy_i2c_offset + 7])
5719 bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
5720 5527
5721 if (bmplength > 74) { 5528 if (bmplength > 74) {
5722 bios->fmaxvco = ROM32(bmp[67]); 5529 bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5767 return 0; 5574 return 0;
5768} 5575}
5769 5576
5770static struct dcb_gpio_entry * 5577void *
5771new_gpio_entry(struct nvbios *bios) 5578dcb_table(struct drm_device *dev)
5772{
5773 struct drm_device *dev = bios->dev;
5774 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5775
5776 if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
5777 NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
5778 return NULL;
5779 }
5780
5781 return &gpio->entry[gpio->entries++];
5782}
5783
5784struct dcb_gpio_entry *
5785nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5786{ 5579{
5787 struct drm_nouveau_private *dev_priv = dev->dev_private; 5580 struct drm_nouveau_private *dev_priv = dev->dev_private;
5788 struct nvbios *bios = &dev_priv->vbios; 5581 u8 *dcb = NULL;
5789 int i;
5790
5791 for (i = 0; i < bios->dcb.gpio.entries; i++) {
5792 if (bios->dcb.gpio.entry[i].tag != tag)
5793 continue;
5794 5582
5795 return &bios->dcb.gpio.entry[i]; 5583 if (dev_priv->card_type > NV_04)
5584 dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
5585 if (!dcb) {
5586 NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
5587 return NULL;
5796 } 5588 }
5797 5589
5798 return NULL; 5590 if (dcb[0] >= 0x41) {
5799} 5591 NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
5800 5592 return NULL;
5801static void 5593 } else
5802parse_dcb_gpio_table(struct nvbios *bios)
5803{
5804 struct drm_device *dev = bios->dev;
5805 struct dcb_gpio_entry *e;
5806 u8 headerlen, entries, recordlen;
5807 u8 *dcb, *gpio = NULL, *entry;
5808 int i;
5809
5810 dcb = ROMPTR(bios, bios->data[0x36]);
5811 if (dcb[0] >= 0x30) { 5594 if (dcb[0] >= 0x30) {
5812 gpio = ROMPTR(bios, dcb[10]); 5595 if (ROM32(dcb[6]) == 0x4edcbdcb)
5813 if (!gpio) 5596 return dcb;
5814 goto no_table;
5815
5816 headerlen = gpio[1];
5817 entries = gpio[2];
5818 recordlen = gpio[3];
5819 } else 5597 } else
5820 if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) { 5598 if (dcb[0] >= 0x20) {
5821 gpio = ROMPTR(bios, dcb[-15]); 5599 if (ROM32(dcb[4]) == 0x4edcbdcb)
5822 if (!gpio) 5600 return dcb;
5823 goto no_table;
5824
5825 headerlen = 3;
5826 entries = gpio[2];
5827 recordlen = gpio[1];
5828 } else 5601 } else
5829 if (dcb[0] >= 0x22) { 5602 if (dcb[0] >= 0x15) {
5830 /* No GPIO table present, parse the TVDAC GPIO data. */ 5603 if (!memcmp(&dcb[-7], "DEV_REC", 7))
5831 uint8_t *tvdac_gpio = &dcb[-5]; 5604 return dcb;
5832
5833 if (tvdac_gpio[0] & 1) {
5834 e = new_gpio_entry(bios);
5835 e->tag = DCB_GPIO_TVDAC0;
5836 e->line = tvdac_gpio[1] >> 4;
5837 e->invert = tvdac_gpio[0] & 2;
5838 }
5839
5840 goto no_table;
5841 } else { 5605 } else {
5842 NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]); 5606 /*
5843 goto no_table; 5607 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
5844 } 5608 * always has the same single (crt) entry, even when tv-out
5845 5609 * present, so the conclusion is this version cannot really
5846 entry = gpio + headerlen; 5610 * be used.
5847 for (i = 0; i < entries; i++, entry += recordlen) { 5611 *
5848 e = new_gpio_entry(bios); 5612 * v1.2 tables (some NV6/10, and NV15+) normally have the
5849 if (!e) 5613 * same 5 entries, which are not specific to the card and so
5850 break; 5614 * no use.
5851 5615 *
5852 if (gpio[0] < 0x40) { 5616 * v1.2 does have an I2C table that read_dcb_i2c_table can
5853 e->entry = ROM16(entry[0]); 5617 * handle, but cards exist (nv11 in #14821) with a bad i2c
5854 e->tag = (e->entry & 0x07e0) >> 5; 5618 * table pointer, so use the indices parsed in
5855 if (e->tag == 0x3f) { 5619 * parse_bmp_structure.
5856 bios->dcb.gpio.entries--; 5620 *
5857 continue; 5621 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5858 } 5622 */
5859 5623 NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
5860 e->line = (e->entry & 0x001f);
5861 e->invert = ((e->entry & 0xf800) >> 11) != 4;
5862 } else {
5863 e->entry = ROM32(entry[0]);
5864 e->tag = (e->entry & 0x0000ff00) >> 8;
5865 if (e->tag == 0xff) {
5866 bios->dcb.gpio.entries--;
5867 continue;
5868 }
5869
5870 e->line = (e->entry & 0x0000001f) >> 0;
5871 if (gpio[0] == 0x40) {
5872 e->state_default = (e->entry & 0x01000000) >> 24;
5873 e->state[0] = (e->entry & 0x18000000) >> 27;
5874 e->state[1] = (e->entry & 0x60000000) >> 29;
5875 } else {
5876 e->state_default = (e->entry & 0x00000080) >> 7;
5877 e->state[0] = (entry[4] >> 4) & 3;
5878 e->state[1] = (entry[4] >> 6) & 3;
5879 }
5880 }
5881 }
5882
5883no_table:
5884 /* Apple iMac G4 NV18 */
5885 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5886 e = new_gpio_entry(bios);
5887 if (e) {
5888 e->tag = DCB_GPIO_TVDAC0;
5889 e->line = 4;
5890 }
5891 }
5892}
5893
5894struct dcb_connector_table_entry *
5895nouveau_bios_connector_entry(struct drm_device *dev, int index)
5896{
5897 struct drm_nouveau_private *dev_priv = dev->dev_private;
5898 struct nvbios *bios = &dev_priv->vbios;
5899 struct dcb_connector_table_entry *cte;
5900
5901 if (index >= bios->dcb.connector.entries)
5902 return NULL;
5903
5904 cte = &bios->dcb.connector.entry[index];
5905 if (cte->type == 0xff)
5906 return NULL; 5624 return NULL;
5625 }
5907 5626
5908 return cte; 5627 NV_WARNONCE(dev, "DCB header validation failed\n");
5628 return NULL;
5909} 5629}
5910 5630
5911static enum dcb_connector_type 5631void *
5912divine_connector_type(struct nvbios *bios, int index) 5632dcb_outp(struct drm_device *dev, u8 idx)
5913{ 5633{
5914 struct dcb_table *dcb = &bios->dcb; 5634 u8 *dcb = dcb_table(dev);
5915 unsigned encoders = 0, type = DCB_CONNECTOR_NONE; 5635 if (dcb && dcb[0] >= 0x30) {
5916 int i; 5636 if (idx < dcb[2])
5917 5637 return dcb + dcb[1] + (idx * dcb[3]);
5918 for (i = 0; i < dcb->entries; i++) {
5919 if (dcb->entry[i].connector == index)
5920 encoders |= (1 << dcb->entry[i].type);
5921 }
5922
5923 if (encoders & (1 << OUTPUT_DP)) {
5924 if (encoders & (1 << OUTPUT_TMDS))
5925 type = DCB_CONNECTOR_DP;
5926 else
5927 type = DCB_CONNECTOR_eDP;
5928 } else
5929 if (encoders & (1 << OUTPUT_TMDS)) {
5930 if (encoders & (1 << OUTPUT_ANALOG))
5931 type = DCB_CONNECTOR_DVI_I;
5932 else
5933 type = DCB_CONNECTOR_DVI_D;
5934 } else
5935 if (encoders & (1 << OUTPUT_ANALOG)) {
5936 type = DCB_CONNECTOR_VGA;
5937 } else 5638 } else
5938 if (encoders & (1 << OUTPUT_LVDS)) { 5639 if (dcb && dcb[0] >= 0x20) {
5939 type = DCB_CONNECTOR_LVDS; 5640 u8 *i2c = ROMPTR(dev, dcb[2]);
5641 u8 *ent = dcb + 8 + (idx * 8);
5642 if (i2c && ent < i2c)
5643 return ent;
5940 } else 5644 } else
5941 if (encoders & (1 << OUTPUT_TV)) { 5645 if (dcb && dcb[0] >= 0x15) {
5942 type = DCB_CONNECTOR_TV_0; 5646 u8 *i2c = ROMPTR(dev, dcb[2]);
5647 u8 *ent = dcb + 4 + (idx * 10);
5648 if (i2c && ent < i2c)
5649 return ent;
5943 } 5650 }
5944 5651
5945 return type; 5652 return NULL;
5946} 5653}
5947 5654
5948static void 5655int
5949apply_dcb_connector_quirks(struct nvbios *bios, int idx) 5656dcb_outp_foreach(struct drm_device *dev, void *data,
5950{ 5657 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
5951 struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx]; 5658{
5952 struct drm_device *dev = bios->dev; 5659 int ret, idx = -1;
5660 u8 *outp = NULL;
5661 while ((outp = dcb_outp(dev, ++idx))) {
5662 if (ROM32(outp[0]) == 0x00000000)
5663 break; /* seen on an NV11 with DCB v1.5 */
5664 if (ROM32(outp[0]) == 0xffffffff)
5665 break; /* seen on an NV17 with DCB v2.0 */
5666
5667 if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
5668 continue;
5669 if ((outp[0] & 0x0f) == OUTPUT_EOL)
5670 break;
5953 5671
5954 /* Gigabyte NX85T */ 5672 ret = exec(dev, data, idx, outp);
5955 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) { 5673 if (ret)
5956 if (cte->type == DCB_CONNECTOR_HDMI_1) 5674 return ret;
5957 cte->type = DCB_CONNECTOR_DVI_I;
5958 } 5675 }
5959 5676
5960 /* Gigabyte GV-NX86T512H */ 5677 return 0;
5961 if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
5962 if (cte->type == DCB_CONNECTOR_HDMI_1)
5963 cte->type = DCB_CONNECTOR_DVI_I;
5964 }
5965} 5678}
5966 5679
5967static const u8 hpd_gpio[16] = { 5680u8 *
5968 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff, 5681dcb_conntab(struct drm_device *dev)
5969 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
5970};
5971
5972static void
5973parse_dcb_connector_table(struct nvbios *bios)
5974{ 5682{
5975 struct drm_device *dev = bios->dev; 5683 u8 *dcb = dcb_table(dev);
5976 struct dcb_connector_table *ct = &bios->dcb.connector; 5684 if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
5977 struct dcb_connector_table_entry *cte; 5685 u8 *conntab = ROMPTR(dev, dcb[0x14]);
5978 uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr]; 5686 if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
5979 uint8_t *entry; 5687 return conntab;
5980 int i;
5981
5982 if (!bios->dcb.connector_table_ptr) {
5983 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5984 return;
5985 }
5986
5987 NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
5988 conntab[0], conntab[1], conntab[2], conntab[3]);
5989 if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
5990 (conntab[3] != 2 && conntab[3] != 4)) {
5991 NV_ERROR(dev, " Unknown! Please report.\n");
5992 return;
5993 } 5688 }
5689 return NULL;
5690}
5994 5691
5995 ct->entries = conntab[2]; 5692u8 *
5996 5693dcb_conn(struct drm_device *dev, u8 idx)
5997 entry = conntab + conntab[1]; 5694{
5998 cte = &ct->entry[0]; 5695 u8 *conntab = dcb_conntab(dev);
5999 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { 5696 if (conntab && idx < conntab[2])
6000 cte->index = i; 5697 return conntab + conntab[1] + (idx * conntab[3]);
6001 if (conntab[3] == 2) 5698 return NULL;
6002 cte->entry = ROM16(entry[0]);
6003 else
6004 cte->entry = ROM32(entry[0]);
6005
6006 cte->type = (cte->entry & 0x000000ff) >> 0;
6007 cte->index2 = (cte->entry & 0x00000f00) >> 8;
6008
6009 cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
6010 cte->gpio_tag = hpd_gpio[cte->gpio_tag];
6011
6012 if (cte->type == 0xff)
6013 continue;
6014
6015 apply_dcb_connector_quirks(bios, i);
6016
6017 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
6018 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
6019
6020 /* check for known types, fallback to guessing the type
6021 * from attached encoders if we hit an unknown.
6022 */
6023 switch (cte->type) {
6024 case DCB_CONNECTOR_VGA:
6025 case DCB_CONNECTOR_TV_0:
6026 case DCB_CONNECTOR_TV_1:
6027 case DCB_CONNECTOR_TV_3:
6028 case DCB_CONNECTOR_DVI_I:
6029 case DCB_CONNECTOR_DVI_D:
6030 case DCB_CONNECTOR_LVDS:
6031 case DCB_CONNECTOR_LVDS_SPWG:
6032 case DCB_CONNECTOR_DP:
6033 case DCB_CONNECTOR_eDP:
6034 case DCB_CONNECTOR_HDMI_0:
6035 case DCB_CONNECTOR_HDMI_1:
6036 break;
6037 default:
6038 cte->type = divine_connector_type(bios, cte->index);
6039 NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
6040 break;
6041 }
6042
6043 if (nouveau_override_conntype) {
6044 int type = divine_connector_type(bios, cte->index);
6045 if (type != cte->type)
6046 NV_WARN(dev, " -> type 0x%02x\n", cte->type);
6047 }
6048
6049 }
6050} 5699}
6051 5700
6052static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) 5701static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6079 entry->type = conn & 0xf; 5728 entry->type = conn & 0xf;
6080 entry->i2c_index = (conn >> 4) & 0xf; 5729 entry->i2c_index = (conn >> 4) & 0xf;
6081 entry->heads = (conn >> 8) & 0xf; 5730 entry->heads = (conn >> 8) & 0xf;
6082 if (dcb->version >= 0x40) 5731 entry->connector = (conn >> 12) & 0xf;
6083 entry->connector = (conn >> 12) & 0xf;
6084 entry->bus = (conn >> 16) & 0xf; 5732 entry->bus = (conn >> 16) & 0xf;
6085 entry->location = (conn >> 20) & 0x3; 5733 entry->location = (conn >> 20) & 0x3;
6086 entry->or = (conn >> 24) & 0xf; 5734 entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6252 return true; 5900 return true;
6253} 5901}
6254 5902
6255static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
6256 uint32_t conn, uint32_t conf)
6257{
6258 struct dcb_entry *entry = new_dcb_entry(dcb);
6259 bool ret;
6260
6261 if (dcb->version >= 0x20)
6262 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
6263 else
6264 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
6265 if (!ret)
6266 return ret;
6267
6268 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
6269 entry->i2c_index, &dcb->i2c[entry->i2c_index]);
6270
6271 return true;
6272}
6273
6274static 5903static
6275void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) 5904void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
6276{ 5905{
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6431#endif 6060#endif
6432 6061
6433 /* Make up some sane defaults */ 6062 /* Make up some sane defaults */
6434 fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1); 6063 fabricate_dcb_output(dcb, OUTPUT_ANALOG,
6064 bios->legacy.i2c_indices.crt, 1, 1);
6435 6065
6436 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) 6066 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6437 fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV, 6067 fabricate_dcb_output(dcb, OUTPUT_TV,
6068 bios->legacy.i2c_indices.tv,
6438 all_heads, 0); 6069 all_heads, 0);
6439 6070
6440 else if (bios->tmds.output0_script_ptr || 6071 else if (bios->tmds.output0_script_ptr ||
6441 bios->tmds.output1_script_ptr) 6072 bios->tmds.output1_script_ptr)
6442 fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL, 6073 fabricate_dcb_output(dcb, OUTPUT_TMDS,
6074 bios->legacy.i2c_indices.panel,
6443 all_heads, 1); 6075 all_heads, 1);
6444} 6076}
6445 6077
6446static int 6078static int
6447parse_dcb_table(struct drm_device *dev, struct nvbios *bios) 6079parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
6448{ 6080{
6449 struct drm_nouveau_private *dev_priv = dev->dev_private; 6081 struct drm_nouveau_private *dev_priv = dev->dev_private;
6450 struct dcb_table *dcb = &bios->dcb; 6082 struct dcb_table *dcb = &dev_priv->vbios.dcb;
6451 uint16_t dcbptr = 0, i2ctabptr = 0; 6083 u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
6452 uint8_t *dcbtable; 6084 u32 conn = ROM32(outp[0]);
6453 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 6085 bool ret;
6454 bool configblock = true;
6455 int recordlength = 8, confofs = 4;
6456 int i;
6457
6458 /* get the offset from 0x36 */
6459 if (dev_priv->card_type > NV_04) {
6460 dcbptr = ROM16(bios->data[0x36]);
6461 if (dcbptr == 0x0000)
6462 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
6463 }
6464
6465 /* this situation likely means a really old card, pre DCB */
6466 if (dcbptr == 0x0) {
6467 fabricate_dcb_encoder_table(dev, bios);
6468 return 0;
6469 }
6470
6471 dcbtable = &bios->data[dcbptr];
6472
6473 /* get DCB version */
6474 dcb->version = dcbtable[0];
6475 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
6476 dcb->version >> 4, dcb->version & 0xf);
6477
6478 if (dcb->version >= 0x20) { /* NV17+ */
6479 uint32_t sig;
6480 6086
6481 if (dcb->version >= 0x30) { /* NV40+ */ 6087 if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
6482 headerlen = dcbtable[1]; 6088 struct dcb_entry *entry = new_dcb_entry(dcb);
6483 entries = dcbtable[2];
6484 recordlength = dcbtable[3];
6485 i2ctabptr = ROM16(dcbtable[4]);
6486 sig = ROM32(dcbtable[6]);
6487 dcb->gpio_table_ptr = ROM16(dcbtable[10]);
6488 dcb->connector_table_ptr = ROM16(dcbtable[20]);
6489 } else {
6490 i2ctabptr = ROM16(dcbtable[2]);
6491 sig = ROM32(dcbtable[4]);
6492 headerlen = 8;
6493 }
6494 6089
6495 if (sig != 0x4edcbdcb) { 6090 NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
6496 NV_ERROR(dev, "Bad Display Configuration Block "
6497 "signature (%08X)\n", sig);
6498 return -EINVAL;
6499 }
6500 } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
6501 char sig[8] = { 0 };
6502 6091
6503 strncpy(sig, (char *)&dcbtable[-7], 7); 6092 if (dcb->version >= 0x20)
6504 i2ctabptr = ROM16(dcbtable[2]); 6093 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
6505 recordlength = 10; 6094 else
6506 confofs = 6; 6095 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
6096 if (!ret)
6097 return 1; /* stop parsing */
6507 6098
6508 if (strcmp(sig, "DEV_REC")) { 6099 /* Ignore the I2C index for on-chip TV-out, as there
6509 NV_ERROR(dev, "Bad Display Configuration Block " 6100 * are cards with bogus values (nv31m in bug 23212),
6510 "signature (%s)\n", sig); 6101 * and it's otherwise useless.
6511 return -EINVAL;
6512 }
6513 } else {
6514 /*
6515 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
6516 * has the same single (crt) entry, even when tv-out present, so
6517 * the conclusion is this version cannot really be used.
6518 * v1.2 tables (some NV6/10, and NV15+) normally have the same
6519 * 5 entries, which are not specific to the card and so no use.
6520 * v1.2 does have an I2C table that read_dcb_i2c_table can
6521 * handle, but cards exist (nv11 in #14821) with a bad i2c table
6522 * pointer, so use the indices parsed in parse_bmp_structure.
6523 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
6524 */ 6102 */
6525 NV_TRACEWARN(dev, "No useful information in BIOS output table; " 6103 if (entry->type == OUTPUT_TV &&
6526 "adding all possible outputs\n"); 6104 entry->location == DCB_LOC_ON_CHIP)
6527 fabricate_dcb_encoder_table(dev, bios); 6105 entry->i2c_index = 0x0f;
6528 return 0;
6529 } 6106 }
6530 6107
6531 if (!i2ctabptr) 6108 return 0;
6532 NV_WARN(dev, "No pointer to DCB I2C port table\n"); 6109}
6533 else {
6534 dcb->i2c_table = &bios->data[i2ctabptr];
6535 if (dcb->version >= 0x30)
6536 dcb->i2c_default_indices = dcb->i2c_table[4];
6537 6110
6538 /* 6111static void
6539 * Parse the "management" I2C bus, used for hardware 6112dcb_fake_connectors(struct nvbios *bios)
6540 * monitoring and some external TMDS transmitters. 6113{
6541 */ 6114 struct dcb_table *dcbt = &bios->dcb;
6542 if (dcb->version >= 0x22) { 6115 u8 map[16] = { };
6543 int idx = (dcb->version >= 0x40 ? 6116 int i, idx = 0;
6544 dcb->i2c_default_indices & 0xf :
6545 2);
6546 6117
6547 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, 6118 /* heuristic: if we ever get a non-zero connector field, assume
6548 idx, &dcb->i2c[idx]); 6119 * that all the indices are valid and we don't need fake them.
6549 } 6120 */
6121 for (i = 0; i < dcbt->entries; i++) {
6122 if (dcbt->entry[i].connector)
6123 return;
6550 } 6124 }
6551 6125
6552 if (entries > DCB_MAX_NUM_ENTRIES) 6126 /* no useful connector info available, we need to make it up
6553 entries = DCB_MAX_NUM_ENTRIES; 6127 * ourselves. the rule here is: anything on the same i2c bus
6554 6128 * is considered to be on the same connector. any output
6555 for (i = 0; i < entries; i++) { 6129 * without an associated i2c bus is assigned its own unique
6556 uint32_t connection, config = 0; 6130 * connector index.
6557 6131 */
6558 connection = ROM32(dcbtable[headerlen + recordlength * i]); 6132 for (i = 0; i < dcbt->entries; i++) {
6559 if (configblock) 6133 u8 i2c = dcbt->entry[i].i2c_index;
6560 config = ROM32(dcbtable[headerlen + confofs + recordlength * i]); 6134 if (i2c == 0x0f) {
6561 6135 dcbt->entry[i].connector = idx++;
6562 /* seen on an NV11 with DCB v1.5 */ 6136 } else {
6563 if (connection == 0x00000000) 6137 if (!map[i2c])
6564 break; 6138 map[i2c] = ++idx;
6139 dcbt->entry[i].connector = map[i2c] - 1;
6140 }
6141 }
6565 6142
6566 /* seen on an NV17 with DCB v2.0 */ 6143 /* if we created more than one connector, destroy the connector
6567 if (connection == 0xffffffff) 6144 * table - just in case it has random, rather than stub, entries.
6568 break; 6145 */
6146 if (i > 1) {
6147 u8 *conntab = dcb_conntab(bios->dev);
6148 if (conntab)
6149 conntab[0] = 0x00;
6150 }
6151}
6569 6152
6570 if ((connection & 0x0000000f) == 0x0000000f) 6153static int
6571 continue; 6154parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6155{
6156 struct dcb_table *dcb = &bios->dcb;
6157 u8 *dcbt, *conn;
6158 int idx;
6159
6160 dcbt = dcb_table(dev);
6161 if (!dcbt) {
6162 /* handle pre-DCB boards */
6163 if (bios->type == NVBIOS_BMP) {
6164 fabricate_dcb_encoder_table(dev, bios);
6165 return 0;
6166 }
6572 6167
6573 if (!apply_dcb_encoder_quirks(dev, i, &connection, &config)) 6168 return -EINVAL;
6574 continue; 6169 }
6575 6170
6576 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", 6171 NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
6577 dcb->entries, connection, config);
6578 6172
6579 if (!parse_dcb_entry(dev, dcb, connection, config)) 6173 dcb->version = dcbt[0];
6580 break; 6174 dcb_outp_foreach(dev, NULL, parse_dcb_entry);
6581 }
6582 6175
6583 /* 6176 /*
6584 * apart for v2.1+ not being known for requiring merging, this 6177 * apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6590 if (!dcb->entries) 6183 if (!dcb->entries)
6591 return -ENXIO; 6184 return -ENXIO;
6592 6185
6593 parse_dcb_gpio_table(bios); 6186 /* dump connector table entries to log, if any exist */
6594 parse_dcb_connector_table(bios); 6187 idx = -1;
6595 return 0; 6188 while ((conn = dcb_conn(dev, ++idx))) {
6596} 6189 if (conn[0] != 0xff) {
6597 6190 NV_TRACE(dev, "DCB conn %02d: ", idx);
6598static void 6191 if (dcb_conntab(dev)[3] < 4)
6599fixup_legacy_connector(struct nvbios *bios) 6192 printk("%04x\n", ROM16(conn[0]));
6600{ 6193 else
6601 struct dcb_table *dcb = &bios->dcb; 6194 printk("%08x\n", ROM32(conn[0]));
6602 int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
6603
6604 /*
6605 * DCB 3.0 also has the table in most cases, but there are some cards
6606 * where the table is filled with stub entries, and the DCB entriy
6607 * indices are all 0. We don't need the connector indices on pre-G80
6608 * chips (yet?) so limit the use to DCB 4.0 and above.
6609 */
6610 if (dcb->version >= 0x40)
6611 return;
6612
6613 dcb->connector.entries = 0;
6614
6615 /*
6616 * No known connector info before v3.0, so make it up. the rule here
6617 * is: anything on the same i2c bus is considered to be on the same
6618 * connector. any output without an associated i2c bus is assigned
6619 * its own unique connector index.
6620 */
6621 for (i = 0; i < dcb->entries; i++) {
6622 /*
6623 * Ignore the I2C index for on-chip TV-out, as there
6624 * are cards with bogus values (nv31m in bug 23212),
6625 * and it's otherwise useless.
6626 */
6627 if (dcb->entry[i].type == OUTPUT_TV &&
6628 dcb->entry[i].location == DCB_LOC_ON_CHIP)
6629 dcb->entry[i].i2c_index = 0xf;
6630 i2c = dcb->entry[i].i2c_index;
6631
6632 if (i2c_conn[i2c]) {
6633 dcb->entry[i].connector = i2c_conn[i2c] - 1;
6634 continue;
6635 } 6195 }
6636
6637 dcb->entry[i].connector = dcb->connector.entries++;
6638 if (i2c != 0xf)
6639 i2c_conn[i2c] = dcb->connector.entries;
6640 }
6641
6642 /* Fake the connector table as well as just connector indices */
6643 for (i = 0; i < dcb->connector.entries; i++) {
6644 dcb->connector.entry[i].index = i;
6645 dcb->connector.entry[i].type = divine_connector_type(bios, i);
6646 dcb->connector.entry[i].gpio_tag = 0xff;
6647 }
6648}
6649
6650static void
6651fixup_legacy_i2c(struct nvbios *bios)
6652{
6653 struct dcb_table *dcb = &bios->dcb;
6654 int i;
6655
6656 for (i = 0; i < dcb->entries; i++) {
6657 if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
6658 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
6659 if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
6660 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
6661 if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
6662 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
6663 } 6196 }
6197 dcb_fake_connectors(bios);
6198 return 0;
6664} 6199}
6665 6200
6666static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry) 6201static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
6879 return ret; 6414 return ret;
6880} 6415}
6881 6416
6882static void
6883nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6884{
6885 struct drm_nouveau_private *dev_priv = dev->dev_private;
6886 struct nvbios *bios = &dev_priv->vbios;
6887 struct dcb_i2c_entry *entry;
6888 int i;
6889
6890 entry = &bios->dcb.i2c[0];
6891 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
6892 nouveau_i2c_fini(dev, entry);
6893}
6894
6895static bool 6417static bool
6896nouveau_bios_posted(struct drm_device *dev) 6418nouveau_bios_posted(struct drm_device *dev)
6897{ 6419{
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
6928 if (ret) 6450 if (ret)
6929 return ret; 6451 return ret;
6930 6452
6931 ret = parse_dcb_table(dev, bios); 6453 ret = nouveau_i2c_init(dev);
6932 if (ret) 6454 if (ret)
6933 return ret; 6455 return ret;
6934 6456
6935 fixup_legacy_i2c(bios); 6457 ret = nouveau_mxm_init(dev);
6936 fixup_legacy_connector(bios); 6458 if (ret)
6459 return ret;
6460
6461 ret = parse_dcb_table(dev, bios);
6462 if (ret)
6463 return ret;
6937 6464
6938 if (!bios->major_version) /* we don't run version 0 bios */ 6465 if (!bios->major_version) /* we don't run version 0 bios */
6939 return 0; 6466 return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
6971void 6498void
6972nouveau_bios_takedown(struct drm_device *dev) 6499nouveau_bios_takedown(struct drm_device *dev)
6973{ 6500{
6974 nouveau_bios_i2c_devices_takedown(dev); 6501 nouveau_mxm_fini(dev);
6502 nouveau_i2c_fini(dev);
6975} 6503}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8adb69e4a6b..1e382ad5a2b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,14 @@
34 34
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) 37#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
38#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) 38#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
39#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL) 39#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
40#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
41#define ROMPTR(d,x) ({ \
42 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
43 ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
44})
40 45
41struct bit_entry { 46struct bit_entry {
42 uint8_t id; 47 uint8_t id;
@@ -48,30 +53,12 @@ struct bit_entry {
48 53
49int bit_table(struct drm_device *, u8 id, struct bit_entry *); 54int bit_table(struct drm_device *, u8 id, struct bit_entry *);
50 55
51struct dcb_i2c_entry {
52 uint32_t entry;
53 uint8_t port_type;
54 uint8_t read, write;
55 struct nouveau_i2c_chan *chan;
56};
57
58enum dcb_gpio_tag { 56enum dcb_gpio_tag {
59 DCB_GPIO_TVDAC0 = 0xc, 57 DCB_GPIO_TVDAC0 = 0xc,
60 DCB_GPIO_TVDAC1 = 0x2d, 58 DCB_GPIO_TVDAC1 = 0x2d,
61}; 59 DCB_GPIO_PWM_FAN = 0x9,
62 60 DCB_GPIO_FAN_SENSE = 0x3d,
63struct dcb_gpio_entry { 61 DCB_GPIO_UNUSED = 0xff
64 enum dcb_gpio_tag tag;
65 int line;
66 bool invert;
67 uint32_t entry;
68 uint8_t state_default;
69 uint8_t state[2];
70};
71
72struct dcb_gpio_table {
73 int entries;
74 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
75}; 62};
76 63
77enum dcb_connector_type { 64enum dcb_connector_type {
@@ -90,20 +77,6 @@ enum dcb_connector_type {
90 DCB_CONNECTOR_NONE = 0xff 77 DCB_CONNECTOR_NONE = 0xff
91}; 78};
92 79
93struct dcb_connector_table_entry {
94 uint8_t index;
95 uint32_t entry;
96 enum dcb_connector_type type;
97 uint8_t index2;
98 uint8_t gpio_tag;
99 void *drm;
100};
101
102struct dcb_connector_table {
103 int entries;
104 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
105};
106
107enum dcb_type { 80enum dcb_type {
108 OUTPUT_ANALOG = 0, 81 OUTPUT_ANALOG = 0,
109 OUTPUT_TV = 1, 82 OUTPUT_TV = 1,
@@ -111,6 +84,7 @@ enum dcb_type {
111 OUTPUT_LVDS = 3, 84 OUTPUT_LVDS = 3,
112 OUTPUT_DP = 6, 85 OUTPUT_DP = 6,
113 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */ 86 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
87 OUTPUT_UNUSED = 15,
114 OUTPUT_ANY = -1 88 OUTPUT_ANY = -1
115}; 89};
116 90
@@ -155,18 +129,8 @@ struct dcb_entry {
155 129
156struct dcb_table { 130struct dcb_table {
157 uint8_t version; 131 uint8_t version;
158
159 int entries; 132 int entries;
160 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 133 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
161
162 uint8_t *i2c_table;
163 uint8_t i2c_default_indices;
164 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
165
166 uint16_t gpio_table_ptr;
167 struct dcb_gpio_table gpio;
168 uint16_t connector_table_ptr;
169 struct dcb_connector_table connector;
170}; 134};
171 135
172enum nouveau_or { 136enum nouveau_or {
@@ -195,7 +159,7 @@ enum pll_types {
195 PLL_SHADER = 0x02, 159 PLL_SHADER = 0x02,
196 PLL_UNK03 = 0x03, 160 PLL_UNK03 = 0x03,
197 PLL_MEMORY = 0x04, 161 PLL_MEMORY = 0x04,
198 PLL_UNK05 = 0x05, 162 PLL_VDEC = 0x05,
199 PLL_UNK40 = 0x40, 163 PLL_UNK40 = 0x40,
200 PLL_UNK41 = 0x41, 164 PLL_UNK41 = 0x41,
201 PLL_UNK42 = 0x42, 165 PLL_UNK42 = 0x42,
@@ -333,4 +297,11 @@ struct nvbios {
333 } legacy; 297 } legacy;
334}; 298};
335 299
300void *dcb_table(struct drm_device *);
301void *dcb_outp(struct drm_device *, u8 idx);
302int dcb_outp_foreach(struct drm_device *, void *data,
303 int (*)(struct drm_device *, void *, int idx, u8 *outp));
304u8 *dcb_conntab(struct drm_device *);
305u8 *dcb_conn(struct drm_device *, u8 idx);
306
336#endif 307#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7cc37e69086..724b41a2b9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "ttm/ttm_page_alloc.h"
31 32
32#include "nouveau_drm.h" 33#include "nouveau_drm.h"
33#include "nouveau_drv.h" 34#include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
92{ 93{
93 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct nouveau_bo *nvbo; 95 struct nouveau_bo *nvbo;
96 size_t acc_size;
95 int ret; 97 int ret;
96 98
97 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
114 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
115 nouveau_bo_placement_set(nvbo, flags, 0); 117 nouveau_bo_placement_set(nvbo, flags, 0);
116 118
119 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
120 sizeof(struct nouveau_bo));
121
117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
118 ttm_bo_type_device, &nvbo->placement, 123 ttm_bo_type_device, &nvbo->placement,
119 align >> PAGE_SHIFT, 0, false, NULL, size, 124 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
120 nouveau_bo_del_ttm); 125 nouveau_bo_del_ttm);
121 if (ret) { 126 if (ret) {
122 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 127 /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
343 *mem = val; 348 *mem = val;
344} 349}
345 350
346static struct ttm_backend * 351static struct ttm_tt *
347nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) 352nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
353 unsigned long size, uint32_t page_flags,
354 struct page *dummy_read_page)
348{ 355{
349 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 356 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
350 struct drm_device *dev = dev_priv->dev; 357 struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
352 switch (dev_priv->gart_info.type) { 359 switch (dev_priv->gart_info.type) {
353#if __OS_HAS_AGP 360#if __OS_HAS_AGP
354 case NOUVEAU_GART_AGP: 361 case NOUVEAU_GART_AGP:
355 return ttm_agp_backend_init(bdev, dev->agp->bridge); 362 return ttm_agp_tt_create(bdev, dev->agp->bridge,
363 size, page_flags, dummy_read_page);
356#endif 364#endif
357 case NOUVEAU_GART_PDMA: 365 case NOUVEAU_GART_PDMA:
358 case NOUVEAU_GART_HW: 366 case NOUVEAU_GART_HW:
359 return nouveau_sgdma_init_ttm(dev); 367 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
368 dummy_read_page);
360 default: 369 default:
361 NV_ERROR(dev, "Unknown GART type %d\n", 370 NV_ERROR(dev, "Unknown GART type %d\n",
362 dev_priv->gart_info.type); 371 dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
673 if (mem->mem_type == TTM_PL_VRAM) 682 if (mem->mem_type == TTM_PL_VRAM)
674 nouveau_vm_map(vma, node); 683 nouveau_vm_map(vma, node);
675 else 684 else
676 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, 685 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
677 node, node->pages);
678 686
679 return 0; 687 return 0;
680} 688}
@@ -801,19 +809,18 @@ out:
801static void 809static void
802nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) 810nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
803{ 811{
804 struct nouveau_mem *node = new_mem->mm_node;
805 struct nouveau_bo *nvbo = nouveau_bo(bo); 812 struct nouveau_bo *nvbo = nouveau_bo(bo);
806 struct nouveau_vma *vma; 813 struct nouveau_vma *vma;
807 814
808 list_for_each_entry(vma, &nvbo->vma_list, head) { 815 list_for_each_entry(vma, &nvbo->vma_list, head) {
809 if (new_mem->mem_type == TTM_PL_VRAM) { 816 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
810 nouveau_vm_map(vma, new_mem->mm_node); 817 nouveau_vm_map(vma, new_mem->mm_node);
811 } else 818 } else
812 if (new_mem->mem_type == TTM_PL_TT && 819 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
813 nvbo->page_shift == vma->vm->spg_shift) { 820 nvbo->page_shift == vma->vm->spg_shift) {
814 nouveau_vm_map_sg(vma, 0, new_mem-> 821 nouveau_vm_map_sg(vma, 0, new_mem->
815 num_pages << PAGE_SHIFT, 822 num_pages << PAGE_SHIFT,
816 node, node->pages); 823 new_mem->mm_node);
817 } else { 824 } else {
818 nouveau_vm_unmap(vma); 825 nouveau_vm_unmap(vma);
819 } 826 }
@@ -1044,8 +1051,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1044 nouveau_fence_unref(&old_fence); 1051 nouveau_fence_unref(&old_fence);
1045} 1052}
1046 1053
1054static int
1055nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1056{
1057 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1058 struct drm_nouveau_private *dev_priv;
1059 struct drm_device *dev;
1060 unsigned i;
1061 int r;
1062
1063 if (ttm->state != tt_unpopulated)
1064 return 0;
1065
1066 dev_priv = nouveau_bdev(ttm->bdev);
1067 dev = dev_priv->dev;
1068
1069#if __OS_HAS_AGP
1070 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1071 return ttm_agp_tt_populate(ttm);
1072 }
1073#endif
1074
1075#ifdef CONFIG_SWIOTLB
1076 if (swiotlb_nr_tbl()) {
1077 return ttm_dma_populate((void *)ttm, dev->dev);
1078 }
1079#endif
1080
1081 r = ttm_pool_populate(ttm);
1082 if (r) {
1083 return r;
1084 }
1085
1086 for (i = 0; i < ttm->num_pages; i++) {
1087 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1088 0, PAGE_SIZE,
1089 PCI_DMA_BIDIRECTIONAL);
1090 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1091 while (--i) {
1092 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1093 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1094 ttm_dma->dma_address[i] = 0;
1095 }
1096 ttm_pool_unpopulate(ttm);
1097 return -EFAULT;
1098 }
1099 }
1100 return 0;
1101}
1102
1103static void
1104nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1105{
1106 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1107 struct drm_nouveau_private *dev_priv;
1108 struct drm_device *dev;
1109 unsigned i;
1110
1111 dev_priv = nouveau_bdev(ttm->bdev);
1112 dev = dev_priv->dev;
1113
1114#if __OS_HAS_AGP
1115 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1116 ttm_agp_tt_unpopulate(ttm);
1117 return;
1118 }
1119#endif
1120
1121#ifdef CONFIG_SWIOTLB
1122 if (swiotlb_nr_tbl()) {
1123 ttm_dma_unpopulate((void *)ttm, dev->dev);
1124 return;
1125 }
1126#endif
1127
1128 for (i = 0; i < ttm->num_pages; i++) {
1129 if (ttm_dma->dma_address[i]) {
1130 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1131 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1132 }
1133 }
1134
1135 ttm_pool_unpopulate(ttm);
1136}
1137
1047struct ttm_bo_driver nouveau_bo_driver = { 1138struct ttm_bo_driver nouveau_bo_driver = {
1048 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 1139 .ttm_tt_create = &nouveau_ttm_tt_create,
1140 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1141 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1049 .invalidate_caches = nouveau_bo_invalidate_caches, 1142 .invalidate_caches = nouveau_bo_invalidate_caches,
1050 .init_mem_type = nouveau_bo_init_mem_type, 1143 .init_mem_type = nouveau_bo_init_mem_type,
1051 .evict_flags = nouveau_bo_evict_flags, 1144 .evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1184,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1091 nouveau_vm_map(vma, nvbo->bo.mem.mm_node); 1184 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1092 else 1185 else
1093 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 1186 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1094 nouveau_vm_map_sg(vma, 0, size, node, node->pages); 1187 nouveau_vm_map_sg(vma, 0, size, node);
1095 1188
1096 list_add_tail(&vma->head, &nvbo->vma_list); 1189 list_add_tail(&vma->head, &nvbo->vma_list);
1097 vma->refcount = 1; 1190 vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index bb6ec9ef867..a018defb762 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
187 nouveau_dma_pre_init(chan); 187 nouveau_dma_pre_init(chan);
188 chan->user_put = 0x40; 188 chan->user_put = 0x40;
189 chan->user_get = 0x44; 189 chan->user_get = 0x44;
190 if (dev_priv->card_type >= NV_50)
191 chan->user_get_hi = 0x60;
190 192
191 /* disable the fifo caches */ 193 /* disable the fifo caches */
192 pfifo->reassign(dev, false); 194 pfifo->reassign(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index cea6696b190..f3ce34be082 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -35,6 +35,7 @@
35#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
37#include "nouveau_connector.h" 37#include "nouveau_connector.h"
38#include "nouveau_gpio.h"
38#include "nouveau_hw.h" 39#include "nouveau_hw.h"
39 40
40static void nouveau_connector_hotplug(void *, int); 41static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
78 return NULL; 79 return NULL;
79} 80}
80 81
81/*TODO: This could use improvement, and learn to handle the fixed
82 * BIOS tables etc. It's fine currently, for its only user.
83 */
84int
85nouveau_connector_bpp(struct drm_connector *connector)
86{
87 struct nouveau_connector *nv_connector = nouveau_connector(connector);
88
89 if (nv_connector->edid && nv_connector->edid->revision >= 4) {
90 u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
91 if (bpc > 4)
92 return bpc;
93 }
94
95 return 18;
96}
97
98static void 82static void
99nouveau_connector_destroy(struct drm_connector *connector) 83nouveau_connector_destroy(struct drm_connector *connector)
100{ 84{
101 struct nouveau_connector *nv_connector = nouveau_connector(connector); 85 struct nouveau_connector *nv_connector = nouveau_connector(connector);
102 struct drm_nouveau_private *dev_priv; 86 struct drm_nouveau_private *dev_priv;
103 struct nouveau_gpio_engine *pgpio;
104 struct drm_device *dev; 87 struct drm_device *dev;
105 88
106 if (!nv_connector) 89 if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
110 dev_priv = dev->dev_private; 93 dev_priv = dev->dev_private;
111 NV_DEBUG_KMS(dev, "\n"); 94 NV_DEBUG_KMS(dev, "\n");
112 95
113 pgpio = &dev_priv->engine.gpio; 96 if (nv_connector->hpd != DCB_GPIO_UNUSED) {
114 if (pgpio->irq_unregister) { 97 nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
115 pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag, 98 nouveau_connector_hotplug, connector);
116 nouveau_connector_hotplug, connector);
117 } 99 }
118 100
119 kfree(nv_connector->edid); 101 kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
198 return; 180 return;
199 nv_connector->detected_encoder = nv_encoder; 181 nv_connector->detected_encoder = nv_encoder;
200 182
183 if (dev_priv->card_type >= NV_50) {
184 connector->interlace_allowed = true;
185 connector->doublescan_allowed = true;
186 } else
201 if (nv_encoder->dcb->type == OUTPUT_LVDS || 187 if (nv_encoder->dcb->type == OUTPUT_LVDS ||
202 nv_encoder->dcb->type == OUTPUT_TMDS) { 188 nv_encoder->dcb->type == OUTPUT_TMDS) {
203 connector->doublescan_allowed = false; 189 connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
214 connector->interlace_allowed = true; 200 connector->interlace_allowed = true;
215 } 201 }
216 202
217 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { 203 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
218 drm_connector_property_set_value(connector, 204 drm_connector_property_set_value(connector,
219 dev->mode_config.dvi_i_subconnector_property, 205 dev->mode_config.dvi_i_subconnector_property,
220 nv_encoder->dcb->type == OUTPUT_TMDS ? 206 nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
397 struct nouveau_encoder *nv_encoder; 383 struct nouveau_encoder *nv_encoder;
398 int type; 384 int type;
399 385
400 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { 386 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
401 if (connector->force == DRM_FORCE_ON_DIGITAL) 387 if (connector->force == DRM_FORCE_ON_DIGITAL)
402 type = OUTPUT_TMDS; 388 type = OUTPUT_TMDS;
403 else 389 else
@@ -420,15 +406,21 @@ static int
420nouveau_connector_set_property(struct drm_connector *connector, 406nouveau_connector_set_property(struct drm_connector *connector,
421 struct drm_property *property, uint64_t value) 407 struct drm_property *property, uint64_t value)
422{ 408{
409 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
410 struct nouveau_display_engine *disp = &dev_priv->engine.display;
423 struct nouveau_connector *nv_connector = nouveau_connector(connector); 411 struct nouveau_connector *nv_connector = nouveau_connector(connector);
424 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 412 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
425 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 413 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
426 struct drm_device *dev = connector->dev; 414 struct drm_device *dev = connector->dev;
415 struct nouveau_crtc *nv_crtc;
427 int ret; 416 int ret;
428 417
418 nv_crtc = NULL;
419 if (connector->encoder && connector->encoder->crtc)
420 nv_crtc = nouveau_crtc(connector->encoder->crtc);
421
429 /* Scaling mode */ 422 /* Scaling mode */
430 if (property == dev->mode_config.scaling_mode_property) { 423 if (property == dev->mode_config.scaling_mode_property) {
431 struct nouveau_crtc *nv_crtc = NULL;
432 bool modeset = false; 424 bool modeset = false;
433 425
434 switch (value) { 426 switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
454 modeset = true; 446 modeset = true;
455 nv_connector->scaling_mode = value; 447 nv_connector->scaling_mode = value;
456 448
457 if (connector->encoder && connector->encoder->crtc)
458 nv_crtc = nouveau_crtc(connector->encoder->crtc);
459 if (!nv_crtc) 449 if (!nv_crtc)
460 return 0; 450 return 0;
461 451
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
467 if (!ret) 457 if (!ret)
468 return -EINVAL; 458 return -EINVAL;
469 } else { 459 } else {
470 ret = nv_crtc->set_scale(nv_crtc, value, true); 460 ret = nv_crtc->set_scale(nv_crtc, true);
471 if (ret) 461 if (ret)
472 return ret; 462 return ret;
473 } 463 }
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
475 return 0; 465 return 0;
476 } 466 }
477 467
478 /* Dithering */ 468 /* Underscan */
479 if (property == dev->mode_config.dithering_mode_property) { 469 if (property == disp->underscan_property) {
480 struct nouveau_crtc *nv_crtc = NULL; 470 if (nv_connector->underscan != value) {
471 nv_connector->underscan = value;
472 if (!nv_crtc || !nv_crtc->set_scale)
473 return 0;
481 474
482 if (value == DRM_MODE_DITHERING_ON) 475 return nv_crtc->set_scale(nv_crtc, true);
483 nv_connector->use_dithering = true; 476 }
484 else 477
485 nv_connector->use_dithering = false; 478 return 0;
479 }
480
481 if (property == disp->underscan_hborder_property) {
482 if (nv_connector->underscan_hborder != value) {
483 nv_connector->underscan_hborder = value;
484 if (!nv_crtc || !nv_crtc->set_scale)
485 return 0;
486
487 return nv_crtc->set_scale(nv_crtc, true);
488 }
489
490 return 0;
491 }
492
493 if (property == disp->underscan_vborder_property) {
494 if (nv_connector->underscan_vborder != value) {
495 nv_connector->underscan_vborder = value;
496 if (!nv_crtc || !nv_crtc->set_scale)
497 return 0;
498
499 return nv_crtc->set_scale(nv_crtc, true);
500 }
501
502 return 0;
503 }
504
505 /* Dithering */
506 if (property == disp->dithering_mode) {
507 nv_connector->dithering_mode = value;
508 if (!nv_crtc || !nv_crtc->set_dither)
509 return 0;
486 510
487 if (connector->encoder && connector->encoder->crtc) 511 return nv_crtc->set_dither(nv_crtc, true);
488 nv_crtc = nouveau_crtc(connector->encoder->crtc); 512 }
489 513
514 if (property == disp->dithering_depth) {
515 nv_connector->dithering_depth = value;
490 if (!nv_crtc || !nv_crtc->set_dither) 516 if (!nv_crtc || !nv_crtc->set_dither)
491 return 0; 517 return 0;
492 518
493 return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, 519 return nv_crtc->set_dither(nv_crtc, true);
494 true);
495 } 520 }
496 521
497 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) 522 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
602 return modes; 627 return modes;
603} 628}
604 629
630static void
631nouveau_connector_detect_depth(struct drm_connector *connector)
632{
633 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
634 struct nouveau_connector *nv_connector = nouveau_connector(connector);
635 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
636 struct nvbios *bios = &dev_priv->vbios;
637 struct drm_display_mode *mode = nv_connector->native_mode;
638 bool duallink;
639
640 /* if the edid is feeling nice enough to provide this info, use it */
641 if (nv_connector->edid && connector->display_info.bpc)
642 return;
643
644 /* if not, we're out of options unless we're LVDS, default to 6bpc */
645 connector->display_info.bpc = 6;
646 if (nv_encoder->dcb->type != OUTPUT_LVDS)
647 return;
648
649 /* LVDS: panel straps */
650 if (bios->fp_no_ddc) {
651 if (bios->fp.if_is_24bit)
652 connector->display_info.bpc = 8;
653 return;
654 }
655
656 /* LVDS: DDC panel, need to first determine the number of links to
657 * know which if_is_24bit flag to check...
658 */
659 if (nv_connector->edid &&
660 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
661 duallink = ((u8 *)nv_connector->edid)[121] == 2;
662 else
663 duallink = mode->clock >= bios->fp.duallink_transition_clk;
664
665 if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
666 ( duallink && (bios->fp.strapless_is_24bit & 2)))
667 connector->display_info.bpc = 8;
668}
669
605static int 670static int
606nouveau_connector_get_modes(struct drm_connector *connector) 671nouveau_connector_get_modes(struct drm_connector *connector)
607{ 672{
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
631 nv_connector->native_mode = drm_mode_duplicate(dev, &mode); 696 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
632 } 697 }
633 698
699 /* Determine display colour depth for everything except LVDS now,
700 * DP requires this before mode_valid() is called.
701 */
702 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
703 nouveau_connector_detect_depth(connector);
704
634 /* Find the native mode if this is a digital panel, if we didn't 705 /* Find the native mode if this is a digital panel, if we didn't
635 * find any modes through DDC previously add the native mode to 706 * find any modes through DDC previously add the native mode to
636 * the list of modes. 707 * the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
646 ret = 1; 717 ret = 1;
647 } 718 }
648 719
720 /* Determine LVDS colour depth, must happen after determining
721 * "native" mode as some VBIOS tables require us to use the
722 * pixel clock as part of the lookup...
723 */
724 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
725 nouveau_connector_detect_depth(connector);
726
649 if (nv_encoder->dcb->type == OUTPUT_TV) 727 if (nv_encoder->dcb->type == OUTPUT_TV)
650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 728 ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
651 729
652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || 730 if (nv_connector->type == DCB_CONNECTOR_LVDS ||
653 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG || 731 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
654 nv_connector->dcb->type == DCB_CONNECTOR_eDP) 732 nv_connector->type == DCB_CONNECTOR_eDP)
655 ret += nouveau_connector_scaler_modes_add(connector); 733 ret += nouveau_connector_scaler_modes_add(connector);
656 734
657 return ret; 735 return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 788 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 789 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 790 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 10; 791 clock = clock * (connector->display_info.bpc * 3) / 10;
714 break; 792 break;
715 default: 793 default:
716 BUG_ON(1); 794 BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
768 .force = nouveau_connector_force 846 .force = nouveau_connector_force
769}; 847};
770 848
849static int
850drm_conntype_from_dcb(enum dcb_connector_type dcb)
851{
852 switch (dcb) {
853 case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
854 case DCB_CONNECTOR_TV_0 :
855 case DCB_CONNECTOR_TV_1 :
856 case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
857 case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
858 case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
859 case DCB_CONNECTOR_LVDS :
860 case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
861 case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
862 case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
863 case DCB_CONNECTOR_HDMI_0 :
864 case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
865 default:
866 break;
867 }
868
869 return DRM_MODE_CONNECTOR_Unknown;
870}
871
771struct drm_connector * 872struct drm_connector *
772nouveau_connector_create(struct drm_device *dev, int index) 873nouveau_connector_create(struct drm_device *dev, int index)
773{ 874{
774 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 875 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
775 struct drm_nouveau_private *dev_priv = dev->dev_private; 876 struct drm_nouveau_private *dev_priv = dev->dev_private;
776 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 877 struct nouveau_display_engine *disp = &dev_priv->engine.display;
777 struct nouveau_connector *nv_connector = NULL; 878 struct nouveau_connector *nv_connector = NULL;
778 struct dcb_connector_table_entry *dcb = NULL;
779 struct drm_connector *connector; 879 struct drm_connector *connector;
780 int type, ret = 0; 880 int type, ret = 0;
881 bool dummy;
781 882
782 NV_DEBUG_KMS(dev, "\n"); 883 NV_DEBUG_KMS(dev, "\n");
783 884
784 if (index >= dev_priv->vbios.dcb.connector.entries) 885 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
785 return ERR_PTR(-EINVAL); 886 nv_connector = nouveau_connector(connector);
786 887 if (nv_connector->index == index)
787 dcb = &dev_priv->vbios.dcb.connector.entry[index]; 888 return connector;
788 if (dcb->drm)
789 return dcb->drm;
790
791 switch (dcb->type) {
792 case DCB_CONNECTOR_VGA:
793 type = DRM_MODE_CONNECTOR_VGA;
794 break;
795 case DCB_CONNECTOR_TV_0:
796 case DCB_CONNECTOR_TV_1:
797 case DCB_CONNECTOR_TV_3:
798 type = DRM_MODE_CONNECTOR_TV;
799 break;
800 case DCB_CONNECTOR_DVI_I:
801 type = DRM_MODE_CONNECTOR_DVII;
802 break;
803 case DCB_CONNECTOR_DVI_D:
804 type = DRM_MODE_CONNECTOR_DVID;
805 break;
806 case DCB_CONNECTOR_HDMI_0:
807 case DCB_CONNECTOR_HDMI_1:
808 type = DRM_MODE_CONNECTOR_HDMIA;
809 break;
810 case DCB_CONNECTOR_LVDS:
811 case DCB_CONNECTOR_LVDS_SPWG:
812 type = DRM_MODE_CONNECTOR_LVDS;
813 funcs = &nouveau_connector_funcs_lvds;
814 break;
815 case DCB_CONNECTOR_DP:
816 type = DRM_MODE_CONNECTOR_DisplayPort;
817 break;
818 case DCB_CONNECTOR_eDP:
819 type = DRM_MODE_CONNECTOR_eDP;
820 break;
821 default:
822 NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
823 return ERR_PTR(-EINVAL);
824 } 889 }
825 890
826 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 891 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
827 if (!nv_connector) 892 if (!nv_connector)
828 return ERR_PTR(-ENOMEM); 893 return ERR_PTR(-ENOMEM);
829 nv_connector->dcb = dcb; 894
830 connector = &nv_connector->base; 895 connector = &nv_connector->base;
896 nv_connector->index = index;
897
898 /* attempt to parse vbios connector type and hotplug gpio */
899 nv_connector->dcb = dcb_conn(dev, index);
900 if (nv_connector->dcb) {
901 static const u8 hpd[16] = {
902 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
903 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
904 };
905
906 u32 entry = ROM16(nv_connector->dcb[0]);
907 if (dcb_conntab(dev)[3] >= 4)
908 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
909
910 nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
911 nv_connector->hpd = hpd[nv_connector->hpd];
912
913 nv_connector->type = nv_connector->dcb[0];
914 if (drm_conntype_from_dcb(nv_connector->type) ==
915 DRM_MODE_CONNECTOR_Unknown) {
916 NV_WARN(dev, "unknown connector type %02x\n",
917 nv_connector->type);
918 nv_connector->type = DCB_CONNECTOR_NONE;
919 }
831 920
832 /* defaults, will get overridden in detect() */ 921 /* Gigabyte NX85T */
833 connector->interlace_allowed = false; 922 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
834 connector->doublescan_allowed = false; 923 if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
924 nv_connector->type = DCB_CONNECTOR_DVI_I;
925 }
835 926
836 drm_connector_init(dev, connector, funcs, type); 927 /* Gigabyte GV-NX86T512H */
837 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 928 if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
929 if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
930 nv_connector->type = DCB_CONNECTOR_DVI_I;
931 }
932 } else {
933 nv_connector->type = DCB_CONNECTOR_NONE;
934 nv_connector->hpd = DCB_GPIO_UNUSED;
935 }
936
937 /* no vbios data, or an unknown dcb connector type - attempt to
938 * figure out something suitable ourselves
939 */
940 if (nv_connector->type == DCB_CONNECTOR_NONE) {
941 struct drm_nouveau_private *dev_priv = dev->dev_private;
942 struct dcb_table *dcbt = &dev_priv->vbios.dcb;
943 u32 encoders = 0;
944 int i;
945
946 for (i = 0; i < dcbt->entries; i++) {
947 if (dcbt->entry[i].connector == nv_connector->index)
948 encoders |= (1 << dcbt->entry[i].type);
949 }
838 950
839 /* Check if we need dithering enabled */ 951 if (encoders & (1 << OUTPUT_DP)) {
840 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 952 if (encoders & (1 << OUTPUT_TMDS))
841 bool dummy, is_24bit = false; 953 nv_connector->type = DCB_CONNECTOR_DP;
954 else
955 nv_connector->type = DCB_CONNECTOR_eDP;
956 } else
957 if (encoders & (1 << OUTPUT_TMDS)) {
958 if (encoders & (1 << OUTPUT_ANALOG))
959 nv_connector->type = DCB_CONNECTOR_DVI_I;
960 else
961 nv_connector->type = DCB_CONNECTOR_DVI_D;
962 } else
963 if (encoders & (1 << OUTPUT_ANALOG)) {
964 nv_connector->type = DCB_CONNECTOR_VGA;
965 } else
966 if (encoders & (1 << OUTPUT_LVDS)) {
967 nv_connector->type = DCB_CONNECTOR_LVDS;
968 } else
969 if (encoders & (1 << OUTPUT_TV)) {
970 nv_connector->type = DCB_CONNECTOR_TV_0;
971 }
972 }
842 973
843 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); 974 type = drm_conntype_from_dcb(nv_connector->type);
975 if (type == DRM_MODE_CONNECTOR_LVDS) {
976 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
844 if (ret) { 977 if (ret) {
845 NV_ERROR(dev, "Error parsing LVDS table, disabling " 978 NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
846 "LVDS\n"); 979 kfree(nv_connector);
847 goto fail; 980 return ERR_PTR(ret);
848 } 981 }
849 982
850 nv_connector->use_dithering = !is_24bit; 983 funcs = &nouveau_connector_funcs_lvds;
984 } else {
985 funcs = &nouveau_connector_funcs;
851 } 986 }
852 987
988 /* defaults, will get overridden in detect() */
989 connector->interlace_allowed = false;
990 connector->doublescan_allowed = false;
991
992 drm_connector_init(dev, connector, funcs, type);
993 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
994
853 /* Init DVI-I specific properties */ 995 /* Init DVI-I specific properties */
854 if (dcb->type == DCB_CONNECTOR_DVI_I) { 996 if (nv_connector->type == DCB_CONNECTOR_DVI_I)
855 drm_mode_create_dvi_i_properties(dev);
856 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 997 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
857 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); 998
999 /* Add overscan compensation options to digital outputs */
1000 if (disp->underscan_property &&
1001 (nv_connector->type == DCB_CONNECTOR_DVI_D ||
1002 nv_connector->type == DCB_CONNECTOR_DVI_I ||
1003 nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
1004 nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
1005 nv_connector->type == DCB_CONNECTOR_DP)) {
1006 drm_connector_attach_property(connector,
1007 disp->underscan_property,
1008 UNDERSCAN_OFF);
1009 drm_connector_attach_property(connector,
1010 disp->underscan_hborder_property,
1011 0);
1012 drm_connector_attach_property(connector,
1013 disp->underscan_vborder_property,
1014 0);
858 } 1015 }
859 1016
860 switch (dcb->type) { 1017 switch (nv_connector->type) {
861 case DCB_CONNECTOR_VGA: 1018 case DCB_CONNECTOR_VGA:
862 if (dev_priv->card_type >= NV_50) { 1019 if (dev_priv->card_type >= NV_50) {
863 drm_connector_attach_property(connector, 1020 drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
876 drm_connector_attach_property(connector, 1033 drm_connector_attach_property(connector,
877 dev->mode_config.scaling_mode_property, 1034 dev->mode_config.scaling_mode_property,
878 nv_connector->scaling_mode); 1035 nv_connector->scaling_mode);
879 drm_connector_attach_property(connector, 1036 if (disp->dithering_mode) {
880 dev->mode_config.dithering_mode_property, 1037 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
881 nv_connector->use_dithering ? 1038 drm_connector_attach_property(connector,
882 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 1039 disp->dithering_mode,
1040 nv_connector->dithering_mode);
1041 }
1042 if (disp->dithering_depth) {
1043 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1044 drm_connector_attach_property(connector,
1045 disp->dithering_depth,
1046 nv_connector->dithering_depth);
1047 }
883 break; 1048 break;
884 } 1049 }
885 1050
886 if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) { 1051 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
887 pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, 1052 if (nv_connector->hpd != DCB_GPIO_UNUSED) {
888 nouveau_connector_hotplug, connector); 1053 ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
889 1054 nouveau_connector_hotplug,
890 connector->polled = DRM_CONNECTOR_POLL_HPD; 1055 connector);
891 } else { 1056 if (ret == 0)
892 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1057 connector->polled = DRM_CONNECTOR_POLL_HPD;
893 } 1058 }
894 1059
895 drm_sysfs_connector_add(connector); 1060 drm_sysfs_connector_add(connector);
896 1061 return connector;
897 dcb->drm = connector;
898 return dcb->drm;
899
900fail:
901 drm_connector_cleanup(connector);
902 kfree(connector);
903 return ERR_PTR(ret);
904
905} 1062}
906 1063
907static void 1064static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 711b1e9203a..e4857021304 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,13 +30,43 @@
30#include "drm_edid.h" 30#include "drm_edid.h"
31#include "nouveau_i2c.h" 31#include "nouveau_i2c.h"
32 32
33enum nouveau_underscan_type {
34 UNDERSCAN_OFF,
35 UNDERSCAN_ON,
36 UNDERSCAN_AUTO,
37};
38
39/* the enum values specifically defined here match nv50/nvd0 hw values, and
40 * the code relies on this
41 */
42enum nouveau_dithering_mode {
43 DITHERING_MODE_OFF = 0x00,
44 DITHERING_MODE_ON = 0x01,
45 DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
46 DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
47 DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
48 DITHERING_MODE_AUTO
49};
50
51enum nouveau_dithering_depth {
52 DITHERING_DEPTH_6BPC = 0x00,
53 DITHERING_DEPTH_8BPC = 0x02,
54 DITHERING_DEPTH_AUTO
55};
56
33struct nouveau_connector { 57struct nouveau_connector {
34 struct drm_connector base; 58 struct drm_connector base;
59 enum dcb_connector_type type;
60 u8 index;
61 u8 *dcb;
62 u8 hpd;
35 63
36 struct dcb_connector_table_entry *dcb; 64 int dithering_mode;
37 65 int dithering_depth;
38 int scaling_mode; 66 int scaling_mode;
39 bool use_dithering; 67 enum nouveau_underscan_type underscan;
68 u32 underscan_hborder;
69 u32 underscan_vborder;
40 70
41 struct nouveau_encoder *detected_encoder; 71 struct nouveau_encoder *detected_encoder;
42 struct edid *edid; 72 struct edid *edid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index bf8e1289953..686f6b4a1da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -32,8 +32,6 @@ struct nouveau_crtc {
32 32
33 int index; 33 int index;
34 34
35 struct drm_display_mode *mode;
36
37 uint32_t dpms_saved_fp_control; 35 uint32_t dpms_saved_fp_control;
38 uint32_t fp_users; 36 uint32_t fp_users;
39 int saturation; 37 int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
67 int depth; 65 int depth;
68 } lut; 66 } lut;
69 67
70 int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update); 68 int (*set_dither)(struct nouveau_crtc *crtc, bool update);
71 int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update); 69 int (*set_scale)(struct nouveau_crtc *crtc, bool update);
72}; 70};
73 71
74static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) 72static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e1592368cc..fa2ec491f6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
44 seq_printf(m, "channel id : %d\n", chan->id); 44 seq_printf(m, "channel id : %d\n", chan->id);
45 45
46 seq_printf(m, "cpu fifo state:\n"); 46 seq_printf(m, "cpu fifo state:\n");
47 seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base); 47 seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
48 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2); 48 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
49 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); 49 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
50 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); 50 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
178 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 178 { "memory", nouveau_debugfs_memory_info, 0, NULL },
179 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 179 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
180 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, 180 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
181 { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
181}; 182};
182#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 183#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
183 184
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index b12fd2c8081..3cb52bc52b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,8 @@
32#include "nouveau_hw.h" 32#include "nouveau_hw.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_connector.h"
36#include "nouveau_gpio.h"
35#include "nv50_display.h" 37#include "nv50_display.h"
36 38
37static void 39static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
64int 66int
65nouveau_framebuffer_init(struct drm_device *dev, 67nouveau_framebuffer_init(struct drm_device *dev,
66 struct nouveau_framebuffer *nv_fb, 68 struct nouveau_framebuffer *nv_fb,
67 struct drm_mode_fb_cmd *mode_cmd, 69 struct drm_mode_fb_cmd2 *mode_cmd,
68 struct nouveau_bo *nvbo) 70 struct nouveau_bo *nvbo)
69{ 71{
70 struct drm_nouveau_private *dev_priv = dev->dev_private; 72 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
107 109
108 if (!tile_flags) { 110 if (!tile_flags) {
109 if (dev_priv->card_type < NV_D0) 111 if (dev_priv->card_type < NV_D0)
110 nv_fb->r_pitch = 0x00100000 | fb->pitch; 112 nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
111 else 113 else
112 nv_fb->r_pitch = 0x01000000 | fb->pitch; 114 nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
113 } else { 115 } else {
114 u32 mode = nvbo->tile_mode; 116 u32 mode = nvbo->tile_mode;
115 if (dev_priv->card_type >= NV_C0) 117 if (dev_priv->card_type >= NV_C0)
116 mode >>= 4; 118 mode >>= 4;
117 nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode; 119 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
118 } 120 }
119 } 121 }
120 122
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
124static struct drm_framebuffer * 126static struct drm_framebuffer *
125nouveau_user_framebuffer_create(struct drm_device *dev, 127nouveau_user_framebuffer_create(struct drm_device *dev,
126 struct drm_file *file_priv, 128 struct drm_file *file_priv,
127 struct drm_mode_fb_cmd *mode_cmd) 129 struct drm_mode_fb_cmd2 *mode_cmd)
128{ 130{
129 struct nouveau_framebuffer *nouveau_fb; 131 struct nouveau_framebuffer *nouveau_fb;
130 struct drm_gem_object *gem; 132 struct drm_gem_object *gem;
131 int ret; 133 int ret;
132 134
133 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 135 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
134 if (!gem) 136 if (!gem)
135 return ERR_PTR(-ENOENT); 137 return ERR_PTR(-ENOENT);
136 138
@@ -147,11 +149,186 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
147 return &nouveau_fb->base; 149 return &nouveau_fb->base;
148} 150}
149 151
150const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 152static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
151 .fb_create = nouveau_user_framebuffer_create, 153 .fb_create = nouveau_user_framebuffer_create,
152 .output_poll_changed = nouveau_fbcon_output_poll_changed, 154 .output_poll_changed = nouveau_fbcon_output_poll_changed,
153}; 155};
154 156
157
158struct drm_prop_enum_list {
159 u8 gen_mask;
160 int type;
161 char *name;
162};
163
164static struct drm_prop_enum_list underscan[] = {
165 { 6, UNDERSCAN_AUTO, "auto" },
166 { 6, UNDERSCAN_OFF, "off" },
167 { 6, UNDERSCAN_ON, "on" },
168 {}
169};
170
171static struct drm_prop_enum_list dither_mode[] = {
172 { 7, DITHERING_MODE_AUTO, "auto" },
173 { 7, DITHERING_MODE_OFF, "off" },
174 { 1, DITHERING_MODE_ON, "on" },
175 { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
176 { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
177 { 4, DITHERING_MODE_TEMPORAL, "temporal" },
178 {}
179};
180
181static struct drm_prop_enum_list dither_depth[] = {
182 { 6, DITHERING_DEPTH_AUTO, "auto" },
183 { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
184 { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
185 {}
186};
187
188#define PROP_ENUM(p,gen,n,list) do { \
189 struct drm_prop_enum_list *l = (list); \
190 int c = 0; \
191 while (l->gen_mask) { \
192 if (l->gen_mask & (1 << (gen))) \
193 c++; \
194 l++; \
195 } \
196 if (c) { \
197 p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
198 l = (list); \
199 c = 0; \
200 while (p && l->gen_mask) { \
201 if (l->gen_mask & (1 << (gen))) { \
202 drm_property_add_enum(p, c, l->type, l->name); \
203 c++; \
204 } \
205 l++; \
206 } \
207 } \
208} while(0)
209
210int
211nouveau_display_init(struct drm_device *dev)
212{
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct nouveau_display_engine *disp = &dev_priv->engine.display;
215 struct drm_connector *connector;
216 int ret;
217
218 ret = disp->init(dev);
219 if (ret)
220 return ret;
221
222 drm_kms_helper_poll_enable(dev);
223
224 /* enable hotplug interrupts */
225 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
226 struct nouveau_connector *conn = nouveau_connector(connector);
227 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
228 }
229
230 return ret;
231}
232
233void
234nouveau_display_fini(struct drm_device *dev)
235{
236 struct drm_nouveau_private *dev_priv = dev->dev_private;
237 struct nouveau_display_engine *disp = &dev_priv->engine.display;
238 struct drm_connector *connector;
239
240 /* disable hotplug interrupts */
241 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
242 struct nouveau_connector *conn = nouveau_connector(connector);
243 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
244 }
245
246 drm_kms_helper_poll_disable(dev);
247 disp->fini(dev);
248}
249
250int
251nouveau_display_create(struct drm_device *dev)
252{
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
254 struct nouveau_display_engine *disp = &dev_priv->engine.display;
255 int ret, gen;
256
257 drm_mode_config_init(dev);
258 drm_mode_create_scaling_mode_property(dev);
259 drm_mode_create_dvi_i_properties(dev);
260
261 if (dev_priv->card_type < NV_50)
262 gen = 0;
263 else
264 if (dev_priv->card_type < NV_D0)
265 gen = 1;
266 else
267 gen = 2;
268
269 PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
270 PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
271 PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
272
273 disp->underscan_hborder_property =
274 drm_property_create(dev, DRM_MODE_PROP_RANGE,
275 "underscan hborder", 2);
276 disp->underscan_hborder_property->values[0] = 0;
277 disp->underscan_hborder_property->values[1] = 128;
278
279 disp->underscan_vborder_property =
280 drm_property_create(dev, DRM_MODE_PROP_RANGE,
281 "underscan vborder", 2);
282 disp->underscan_vborder_property->values[0] = 0;
283 disp->underscan_vborder_property->values[1] = 128;
284
285 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
286 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
287
288 dev->mode_config.min_width = 0;
289 dev->mode_config.min_height = 0;
290 if (dev_priv->card_type < NV_10) {
291 dev->mode_config.max_width = 2048;
292 dev->mode_config.max_height = 2048;
293 } else
294 if (dev_priv->card_type < NV_50) {
295 dev->mode_config.max_width = 4096;
296 dev->mode_config.max_height = 4096;
297 } else {
298 dev->mode_config.max_width = 8192;
299 dev->mode_config.max_height = 8192;
300 }
301
302 drm_kms_helper_poll_init(dev);
303 drm_kms_helper_poll_disable(dev);
304
305 ret = disp->create(dev);
306 if (ret)
307 return ret;
308
309 if (dev->mode_config.num_crtc) {
310 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
311 if (ret)
312 return ret;
313 }
314
315 return ret;
316}
317
318void
319nouveau_display_destroy(struct drm_device *dev)
320{
321 struct drm_nouveau_private *dev_priv = dev->dev_private;
322 struct nouveau_display_engine *disp = &dev_priv->engine.display;
323
324 drm_vblank_cleanup(dev);
325
326 disp->destroy(dev);
327
328 drm_kms_helper_poll_fini(dev);
329 drm_mode_config_cleanup(dev);
330}
331
155int 332int
156nouveau_vblank_enable(struct drm_device *dev, int crtc) 333nouveau_vblank_enable(struct drm_device *dev, int crtc)
157{ 334{
@@ -294,7 +471,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
294 /* Initialize a page flip struct */ 471 /* Initialize a page flip struct */
295 *s = (struct nouveau_page_flip_state) 472 *s = (struct nouveau_page_flip_state)
296 { { }, event, nouveau_crtc(crtc)->index, 473 { { }, event, nouveau_crtc(crtc)->index,
297 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, 474 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
298 new_bo->bo.offset }; 475 new_bo->bo.offset };
299 476
300 /* Choose the channel the flip will be handled in */ 477 /* Choose the channel the flip will be handled in */
@@ -305,7 +482,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
305 482
306 /* Emit a page flip */ 483 /* Emit a page flip */
307 if (dev_priv->card_type >= NV_50) { 484 if (dev_priv->card_type >= NV_50) {
308 ret = nv50_display_flip_next(crtc, fb, chan); 485 if (dev_priv->card_type >= NV_D0)
486 ret = nvd0_display_flip_next(crtc, fb, chan, 0);
487 else
488 ret = nv50_display_flip_next(crtc, fb, chan);
309 if (ret) { 489 if (ret) {
310 nouveau_channel_put(&chan); 490 nouveau_channel_put(&chan);
311 goto fail_unreserve; 491 goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 00bc6eaad55..4c2e4e5925f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
134 * -EBUSY if timeout exceeded 134 * -EBUSY if timeout exceeded
135 */ 135 */
136static inline int 136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) 137READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
138{ 138{
139 uint32_t val; 139 uint64_t val;
140 140
141 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
142 if (chan->user_get_hi)
143 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
142 144
143 /* reset counter as long as GET is still advancing, this is 145 /* reset counter as long as GET is still advancing, this is
144 * to avoid misdetecting a GPU lockup if the GPU happens to 146 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
218static int 220static int
219nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) 221nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
220{ 222{
221 uint32_t cnt = 0, prev_get = 0; 223 uint64_t prev_get = 0;
222 int ret; 224 int ret, cnt = 0;
223 225
224 ret = nv50_dma_push_wait(chan, slots + 1); 226 ret = nv50_dma_push_wait(chan, slots + 1);
225 if (unlikely(ret)) 227 if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
261int 263int
262nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) 264nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
263{ 265{
264 uint32_t prev_get = 0, cnt = 0; 266 uint64_t prev_get = 0;
265 int get; 267 int cnt = 0, get;
266 268
267 if (chan->dma.ib_max) 269 if (chan->dma.ib_max)
268 return nv50_dma_wait(chan, slots, size); 270 return nv50_dma_wait(chan, slots, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de5efe71fef..9b93b703cea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -29,6 +29,7 @@
29#include "nouveau_connector.h" 29#include "nouveau_connector.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_gpio.h"
32 33
33/****************************************************************************** 34/******************************************************************************
34 * aux channel util functions 35 * aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
273u8 * 274u8 *
274nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) 275nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
275{ 276{
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 struct nvbios *bios = &dev_priv->vbios;
278 struct bit_entry d; 277 struct bit_entry d;
279 u8 *table; 278 u8 *table;
280 int i; 279 int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
289 return NULL; 288 return NULL;
290 } 289 }
291 290
292 table = ROMPTR(bios, d.data[0]); 291 table = ROMPTR(dev, d.data[0]);
293 if (!table) { 292 if (!table) {
294 NV_ERROR(dev, "displayport table pointer invalid\n"); 293 NV_ERROR(dev, "displayport table pointer invalid\n");
295 return NULL; 294 return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
306 } 305 }
307 306
308 for (i = 0; i < table[3]; i++) { 307 for (i = 0; i < table[3]; i++) {
309 *entry = ROMPTR(bios, table[table[1] + (i * table[2])]); 308 *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
310 if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0]))) 309 if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
311 return table; 310 return table;
312 } 311 }
@@ -336,7 +335,6 @@ struct dp_state {
336static void 335static void
337dp_set_link_config(struct drm_device *dev, struct dp_state *dp) 336dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
338{ 337{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 int or = dp->or, link = dp->link; 338 int or = dp->or, link = dp->link;
341 u8 *entry, sink[2]; 339 u8 *entry, sink[2];
342 u32 dp_ctrl; 340 u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
360 * table, that has (among other things) pointers to more scripts that 358 * table, that has (among other things) pointers to more scripts that
361 * need to be executed, this time depending on link speed. 359 * need to be executed, this time depending on link speed.
362 */ 360 */
363 entry = ROMPTR(&dev_priv->vbios, dp->entry[10]); 361 entry = ROMPTR(dev, dp->entry[10]);
364 if (entry) { 362 if (entry) {
365 if (dp->table[0] < 0x30) { 363 if (dp->table[0] < 0x30) {
366 while (dp->link_bw < (ROM16(entry[0]) * 10)) 364 while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
559bool 557bool
560nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) 558nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
561{ 559{
562 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
563 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
564 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 560 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
565 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 561 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
566 struct nouveau_connector *nv_connector = 562 struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
581 577
582 dp.dcb = nv_encoder->dcb; 578 dp.dcb = nv_encoder->dcb;
583 dp.crtc = nv_crtc->index; 579 dp.crtc = nv_crtc->index;
584 dp.auxch = auxch->rd; 580 dp.auxch = auxch->drive;
585 dp.or = nv_encoder->or; 581 dp.or = nv_encoder->or;
586 dp.link = !(nv_encoder->dcb->sorconf.link & 1); 582 dp.link = !(nv_encoder->dcb->sorconf.link & 1);
587 dp.dpcd = nv_encoder->dp.dpcd; 583 dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
590 * we take during link training (DP_SET_POWER is one), we need 586 * we take during link training (DP_SET_POWER is one), we need
591 * to ignore them for the moment to avoid races. 587 * to ignore them for the moment to avoid races.
592 */ 588 */
593 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); 589 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
594 590
595 /* enable down-spreading, if possible */ 591 /* enable down-spreading, if possible */
596 if (dp.table[1] >= 16) { 592 if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
639 nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc); 635 nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
640 636
641 /* re-enable hotplug detect */ 637 /* re-enable hotplug detect */
642 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); 638 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
643 return true; 639 return true;
644} 640}
645 641
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
656 if (!auxch) 652 if (!auxch)
657 return false; 653 return false;
658 654
659 ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8); 655 ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
660 if (ret) 656 if (ret)
661 return false; 657 return false;
662 658
@@ -684,7 +680,7 @@ int
684nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, 680nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
685 uint8_t *data, int data_nr) 681 uint8_t *data, int data_nr)
686{ 682{
687 return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr); 683 return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
688} 684}
689 685
690static int 686static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9791d13c9e3..e4a7cfe7898 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
124int nouveau_ctxfw; 124int nouveau_ctxfw;
125module_param_named(ctxfw, nouveau_ctxfw, int, 0400); 125module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
126 126
127MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
128int nouveau_mxmdcb = 1;
129module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
130
127int nouveau_fbpercrtc; 131int nouveau_fbpercrtc;
128#if 0 132#if 0
129module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 133module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
178 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 182 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
179 return 0; 183 return 0;
180 184
181 NV_INFO(dev, "Disabling fbcon acceleration...\n"); 185 NV_INFO(dev, "Disabling display...\n");
182 nouveau_fbcon_save_disable_accel(dev); 186 nouveau_display_fini(dev);
187
188 NV_INFO(dev, "Disabling fbcon...\n");
189 nouveau_fbcon_set_suspend(dev, 1);
183 190
184 NV_INFO(dev, "Unpinning framebuffer(s)...\n"); 191 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
185 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 192 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
220 227
221 ret = dev_priv->eng[e]->fini(dev, e, true); 228 ret = dev_priv->eng[e]->fini(dev, e, true);
222 if (ret) { 229 if (ret) {
223 NV_ERROR(dev, "... engine %d failed: %d\n", i, ret); 230 NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
224 goto out_abort; 231 goto out_abort;
225 } 232 }
226 } 233 }
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
246 pci_set_power_state(pdev, PCI_D3hot); 253 pci_set_power_state(pdev, PCI_D3hot);
247 } 254 }
248 255
249 console_lock();
250 nouveau_fbcon_set_suspend(dev, 1);
251 console_unlock();
252 nouveau_fbcon_restore_accel(dev);
253 return 0; 256 return 0;
254 257
255out_abort: 258out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
275 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 278 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
276 return 0; 279 return 0;
277 280
278 nouveau_fbcon_save_disable_accel(dev);
279
280 NV_INFO(dev, "We're back, enabling device...\n"); 281 NV_INFO(dev, "We're back, enabling device...\n");
281 pci_set_power_state(pdev, PCI_D0); 282 pci_set_power_state(pdev, PCI_D0);
282 pci_restore_state(pdev); 283 pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
296 if (ret) 297 if (ret)
297 return ret; 298 return ret;
298 299
299 nouveau_pm_resume(dev);
300
301 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 300 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
302 ret = nouveau_mem_init_agp(dev); 301 ret = nouveau_mem_init_agp(dev);
303 if (ret) { 302 if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
337 } 336 }
338 } 337 }
339 338
339 nouveau_pm_resume(dev);
340
340 NV_INFO(dev, "Restoring mode...\n"); 341 NV_INFO(dev, "Restoring mode...\n");
341 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 342 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
342 struct nouveau_framebuffer *nouveau_fb; 343 struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
358 NV_ERROR(dev, "Could not pin/map cursor.\n"); 359 NV_ERROR(dev, "Could not pin/map cursor.\n");
359 } 360 }
360 361
361 engine->display.init(dev); 362 nouveau_fbcon_set_suspend(dev, 0);
362 363 nouveau_fbcon_zfill_all(dev);
363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
364 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
365 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
366 364
367 nv_crtc->cursor.set_offset(nv_crtc, offset); 365 nouveau_display_init(dev);
368 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
369 nv_crtc->cursor_saved_y);
370 }
371 366
372 /* Force CLUT to get re-loaded during modeset */ 367 /* Force CLUT to get re-loaded during modeset */
373 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 368 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
376 nv_crtc->lut.depth = 0; 371 nv_crtc->lut.depth = 0;
377 } 372 }
378 373
379 console_lock(); 374 drm_helper_resume_force_mode(dev);
380 nouveau_fbcon_set_suspend(dev, 0);
381 console_unlock();
382 375
383 nouveau_fbcon_zfill_all(dev); 376 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
377 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
378 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
384 379
385 drm_helper_resume_force_mode(dev); 380 nv_crtc->cursor.set_offset(nv_crtc, offset);
381 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
382 nv_crtc->cursor_saved_y);
383 }
386 384
387 nouveau_fbcon_restore_accel(dev);
388 return 0; 385 return 0;
389} 386}
390 387
388static const struct file_operations nouveau_driver_fops = {
389 .owner = THIS_MODULE,
390 .open = drm_open,
391 .release = drm_release,
392 .unlocked_ioctl = drm_ioctl,
393 .mmap = nouveau_ttm_mmap,
394 .poll = drm_poll,
395 .fasync = drm_fasync,
396 .read = drm_read,
397#if defined(CONFIG_COMPAT)
398 .compat_ioctl = nouveau_compat_ioctl,
399#endif
400 .llseek = noop_llseek,
401};
402
391static struct drm_driver driver = { 403static struct drm_driver driver = {
392 .driver_features = 404 .driver_features =
393 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 405 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,21 +425,7 @@ static struct drm_driver driver = {
413 .disable_vblank = nouveau_vblank_disable, 425 .disable_vblank = nouveau_vblank_disable,
414 .reclaim_buffers = drm_core_reclaim_buffers, 426 .reclaim_buffers = drm_core_reclaim_buffers,
415 .ioctls = nouveau_ioctls, 427 .ioctls = nouveau_ioctls,
416 .fops = { 428 .fops = &nouveau_driver_fops,
417 .owner = THIS_MODULE,
418 .open = drm_open,
419 .release = drm_release,
420 .unlocked_ioctl = drm_ioctl,
421 .mmap = nouveau_ttm_mmap,
422 .poll = drm_poll,
423 .fasync = drm_fasync,
424 .read = drm_read,
425#if defined(CONFIG_COMPAT)
426 .compat_ioctl = nouveau_compat_ioctl,
427#endif
428 .llseek = noop_llseek,
429 },
430
431 .gem_init_object = nouveau_gem_object_new, 429 .gem_init_object = nouveau_gem_object_new,
432 .gem_free_object = nouveau_gem_object_del, 430 .gem_free_object = nouveau_gem_object_del,
433 .gem_open_object = nouveau_gem_object_open, 431 .gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4c0be3a4ed8..38134a9c757 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -163,6 +163,9 @@ enum nouveau_flags {
163#define NVOBJ_ENGINE_COPY0 3 163#define NVOBJ_ENGINE_COPY0 3
164#define NVOBJ_ENGINE_COPY1 4 164#define NVOBJ_ENGINE_COPY1 4
165#define NVOBJ_ENGINE_MPEG 5 165#define NVOBJ_ENGINE_MPEG 5
166#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
167#define NVOBJ_ENGINE_BSP 6
168#define NVOBJ_ENGINE_VP 7
166#define NVOBJ_ENGINE_DISPLAY 15 169#define NVOBJ_ENGINE_DISPLAY 15
167#define NVOBJ_ENGINE_NR 16 170#define NVOBJ_ENGINE_NR 16
168 171
@@ -229,6 +232,7 @@ struct nouveau_channel {
229 /* mapping of the regs controlling the fifo */ 232 /* mapping of the regs controlling the fifo */
230 void __iomem *user; 233 void __iomem *user;
231 uint32_t user_get; 234 uint32_t user_get;
235 uint32_t user_get_hi;
232 uint32_t user_put; 236 uint32_t user_put;
233 237
234 /* Fencing */ 238 /* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
246 struct nouveau_gpuobj *pushbuf; 250 struct nouveau_gpuobj *pushbuf;
247 struct nouveau_bo *pushbuf_bo; 251 struct nouveau_bo *pushbuf_bo;
248 struct nouveau_vma pushbuf_vma; 252 struct nouveau_vma pushbuf_vma;
249 uint32_t pushbuf_base; 253 uint64_t pushbuf_base;
250 254
251 /* Notifier memory */ 255 /* Notifier memory */
252 struct nouveau_bo *notifier_bo; 256 struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
393 int (*early_init)(struct drm_device *); 397 int (*early_init)(struct drm_device *);
394 void (*late_takedown)(struct drm_device *); 398 void (*late_takedown)(struct drm_device *);
395 int (*create)(struct drm_device *); 399 int (*create)(struct drm_device *);
396 int (*init)(struct drm_device *);
397 void (*destroy)(struct drm_device *); 400 void (*destroy)(struct drm_device *);
401 int (*init)(struct drm_device *);
402 void (*fini)(struct drm_device *);
403
404 struct drm_property *dithering_mode;
405 struct drm_property *dithering_depth;
406 struct drm_property *underscan_property;
407 struct drm_property *underscan_hborder_property;
408 struct drm_property *underscan_vborder_property;
398}; 409};
399 410
400struct nouveau_gpio_engine { 411struct nouveau_gpio_engine {
401 void *priv; 412 spinlock_t lock;
402 413 struct list_head isr;
403 int (*init)(struct drm_device *); 414 int (*init)(struct drm_device *);
404 void (*takedown)(struct drm_device *); 415 void (*fini)(struct drm_device *);
405 416 int (*drive)(struct drm_device *, int line, int dir, int out);
406 int (*get)(struct drm_device *, enum dcb_gpio_tag); 417 int (*sense)(struct drm_device *, int line);
407 int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); 418 void (*irq_enable)(struct drm_device *, int line, bool);
408
409 int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
410 void (*)(void *, int), void *);
411 void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
412 void (*)(void *, int), void *);
413 bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
414}; 419};
415 420
416struct nouveau_pm_voltage_level { 421struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
484 u32 copy; 489 u32 copy;
485 u32 daemon; 490 u32 daemon;
486 u32 vdec; 491 u32 vdec;
487 u32 unk05; /* nv50:nva3, roughly.. */ 492 u32 dom6;
488 u32 unka0; /* nva3:nvc0 */ 493 u32 unka0; /* nva3:nvc0 */
489 u32 hub01; /* nvc0- */ 494 u32 hub01; /* nvc0- */
490 u32 hub06; /* nvc0- */ 495 u32 hub06; /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
518 int nr_timing; 523 int nr_timing;
519}; 524};
520 525
526struct nouveau_pm_fan {
527 u32 min_duty;
528 u32 max_duty;
529 u32 pwm_freq;
530};
531
521struct nouveau_pm_engine { 532struct nouveau_pm_engine {
522 struct nouveau_pm_voltage voltage; 533 struct nouveau_pm_voltage voltage;
523 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; 534 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
525 struct nouveau_pm_memtimings memtimings; 536 struct nouveau_pm_memtimings memtimings;
526 struct nouveau_pm_temp_sensor_constants sensor_constants; 537 struct nouveau_pm_temp_sensor_constants sensor_constants;
527 struct nouveau_pm_threshold_temp threshold_temp; 538 struct nouveau_pm_threshold_temp threshold_temp;
539 struct nouveau_pm_fan fan;
540 u32 pwm_divisor;
528 541
529 struct nouveau_pm_level boot; 542 struct nouveau_pm_level boot;
530 struct nouveau_pm_level *cur; 543 struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
532 struct device *hwmon; 545 struct device *hwmon;
533 struct notifier_block acpi_nb; 546 struct notifier_block acpi_nb;
534 547
535 int (*clock_get)(struct drm_device *, u32 id);
536 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
537 u32 id, int khz);
538 void (*clock_set)(struct drm_device *, void *);
539
540 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *); 548 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
541 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *); 549 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
542 void (*clocks_set)(struct drm_device *, void *); 550 int (*clocks_set)(struct drm_device *, void *);
543 551
544 int (*voltage_get)(struct drm_device *); 552 int (*voltage_get)(struct drm_device *);
545 int (*voltage_set)(struct drm_device *, int voltage); 553 int (*voltage_set)(struct drm_device *, int voltage);
546 int (*fanspeed_get)(struct drm_device *); 554 int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
547 int (*fanspeed_set)(struct drm_device *, int fanspeed); 555 int (*pwm_set)(struct drm_device *, int line, u32, u32);
548 int (*temp_get)(struct drm_device *); 556 int (*temp_get)(struct drm_device *);
549}; 557};
550 558
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
780 struct nouveau_vm *chan_vm; 788 struct nouveau_vm *chan_vm;
781 789
782 struct nvbios vbios; 790 struct nvbios vbios;
791 u8 *mxms;
792 struct list_head i2c_ports;
783 793
784 struct nv04_mode_state mode_reg; 794 struct nv04_mode_state mode_reg;
785 struct nv04_mode_state saved_reg; 795 struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
850extern int nouveau_perflvl_wr; 860extern int nouveau_perflvl_wr;
851extern int nouveau_msi; 861extern int nouveau_msi;
852extern int nouveau_ctxfw; 862extern int nouveau_ctxfw;
863extern int nouveau_mxmdcb;
853 864
854extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 865extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
855extern int nouveau_pci_resume(struct pci_dev *pdev); 866extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
1000extern void nouveau_sgdma_takedown(struct drm_device *); 1011extern void nouveau_sgdma_takedown(struct drm_device *);
1001extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, 1012extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
1002 uint32_t offset); 1013 uint32_t offset);
1003extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); 1014extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
1015 unsigned long size,
1016 uint32_t page_flags,
1017 struct page *dummy_read_page);
1004 1018
1005/* nouveau_debugfs.c */ 1019/* nouveau_debugfs.c */
1006#if defined(CONFIG_DRM_NOUVEAU_DEBUG) 1020#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1072,8 +1086,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
1072extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, 1086extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
1073 struct dcb_entry *, int crtc); 1087 struct dcb_entry *, int crtc);
1074extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table); 1088extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
1075extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
1076 enum dcb_gpio_tag);
1077extern struct dcb_connector_table_entry * 1089extern struct dcb_connector_table_entry *
1078nouveau_bios_connector_entry(struct drm_device *, int index); 1090nouveau_bios_connector_entry(struct drm_device *, int index);
1079extern u32 get_pll_register(struct drm_device *, enum pll_types); 1091extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1103,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
1091 enum LVDS_script, int pxclk); 1103 enum LVDS_script, int pxclk);
1092bool bios_encoder_match(struct dcb_entry *, u32 hash); 1104bool bios_encoder_match(struct dcb_entry *, u32 hash);
1093 1105
1106/* nouveau_mxm.c */
1107int nouveau_mxm_init(struct drm_device *dev);
1108void nouveau_mxm_fini(struct drm_device *dev);
1109
1094/* nouveau_ttm.c */ 1110/* nouveau_ttm.c */
1095int nouveau_ttm_global_init(struct drm_nouveau_private *); 1111int nouveau_ttm_global_init(struct drm_nouveau_private *);
1096void nouveau_ttm_global_release(struct drm_nouveau_private *); 1112void nouveau_ttm_global_release(struct drm_nouveau_private *);
1097int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); 1113int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
1098 1114
1115/* nouveau_hdmi.c */
1116void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
1117
1099/* nouveau_dp.c */ 1118/* nouveau_dp.c */
1100int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, 1119int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
1101 uint8_t *data, int data_nr); 1120 uint8_t *data, int data_nr);
@@ -1222,6 +1241,9 @@ extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1222/* nv84_crypt.c */ 1241/* nv84_crypt.c */
1223extern int nv84_crypt_create(struct drm_device *); 1242extern int nv84_crypt_create(struct drm_device *);
1224 1243
1244/* nv98_crypt.c */
1245extern int nv98_crypt_create(struct drm_device *dev);
1246
1225/* nva3_copy.c */ 1247/* nva3_copy.c */
1226extern int nva3_copy_create(struct drm_device *dev); 1248extern int nva3_copy_create(struct drm_device *dev);
1227 1249
@@ -1234,6 +1256,17 @@ extern int nv31_mpeg_create(struct drm_device *dev);
1234/* nv50_mpeg.c */ 1256/* nv50_mpeg.c */
1235extern int nv50_mpeg_create(struct drm_device *dev); 1257extern int nv50_mpeg_create(struct drm_device *dev);
1236 1258
1259/* nv84_bsp.c */
1260/* nv98_bsp.c */
1261extern int nv84_bsp_create(struct drm_device *dev);
1262
1263/* nv84_vp.c */
1264/* nv98_vp.c */
1265extern int nv84_vp_create(struct drm_device *dev);
1266
1267/* nv98_ppp.c */
1268extern int nv98_ppp_create(struct drm_device *dev);
1269
1237/* nv04_instmem.c */ 1270/* nv04_instmem.c */
1238extern int nv04_instmem_init(struct drm_device *); 1271extern int nv04_instmem_init(struct drm_device *);
1239extern void nv04_instmem_takedown(struct drm_device *); 1272extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1344,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
1311extern int nv04_display_early_init(struct drm_device *); 1344extern int nv04_display_early_init(struct drm_device *);
1312extern void nv04_display_late_takedown(struct drm_device *); 1345extern void nv04_display_late_takedown(struct drm_device *);
1313extern int nv04_display_create(struct drm_device *); 1346extern int nv04_display_create(struct drm_device *);
1314extern int nv04_display_init(struct drm_device *);
1315extern void nv04_display_destroy(struct drm_device *); 1347extern void nv04_display_destroy(struct drm_device *);
1348extern int nv04_display_init(struct drm_device *);
1349extern void nv04_display_fini(struct drm_device *);
1316 1350
1317/* nvd0_display.c */ 1351/* nvd0_display.c */
1318extern int nvd0_display_create(struct drm_device *); 1352extern int nvd0_display_create(struct drm_device *);
1319extern int nvd0_display_init(struct drm_device *);
1320extern void nvd0_display_destroy(struct drm_device *); 1353extern void nvd0_display_destroy(struct drm_device *);
1354extern int nvd0_display_init(struct drm_device *);
1355extern void nvd0_display_fini(struct drm_device *);
1356struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
1357void nvd0_display_flip_stop(struct drm_crtc *);
1358int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
1359 struct nouveau_channel *, u32 swap_interval);
1321 1360
1322/* nv04_crtc.c */ 1361/* nv04_crtc.c */
1323extern int nv04_crtc_create(struct drm_device *, int index); 1362extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,6 +1451,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1412 struct drm_file *); 1451 struct drm_file *);
1413 1452
1414/* nouveau_display.c */ 1453/* nouveau_display.c */
1454int nouveau_display_create(struct drm_device *dev);
1455void nouveau_display_destroy(struct drm_device *dev);
1456int nouveau_display_init(struct drm_device *dev);
1457void nouveau_display_fini(struct drm_device *dev);
1415int nouveau_vblank_enable(struct drm_device *dev, int crtc); 1458int nouveau_vblank_enable(struct drm_device *dev, int crtc);
1416void nouveau_vblank_disable(struct drm_device *dev, int crtc); 1459void nouveau_vblank_disable(struct drm_device *dev, int crtc);
1417int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1460int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -1426,23 +1469,22 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1426 uint32_t handle); 1469 uint32_t handle);
1427 1470
1428/* nv10_gpio.c */ 1471/* nv10_gpio.c */
1429int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1472int nv10_gpio_init(struct drm_device *dev);
1430int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1473void nv10_gpio_fini(struct drm_device *dev);
1474int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1475int nv10_gpio_sense(struct drm_device *dev, int line);
1476void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
1431 1477
1432/* nv50_gpio.c */ 1478/* nv50_gpio.c */
1433int nv50_gpio_init(struct drm_device *dev); 1479int nv50_gpio_init(struct drm_device *dev);
1434void nv50_gpio_fini(struct drm_device *dev); 1480void nv50_gpio_fini(struct drm_device *dev);
1435int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1481int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1436int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1482int nv50_gpio_sense(struct drm_device *dev, int line);
1437int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1483void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
1438int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1484int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1439int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, 1485int nvd0_gpio_sense(struct drm_device *dev, int line);
1440 void (*)(void *, int), void *); 1486
1441void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, 1487/* nv50_calc.c */
1442 void (*)(void *, int), void *);
1443bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
1444
1445/* nv50_calc. */
1446int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1488int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1447 int *N1, int *M1, int *N2, int *M2, int *P); 1489 int *N1, int *M1, int *N2, int *M2, int *P);
1448int nva3_calc_pll(struct drm_device *, struct pll_lims *, 1490int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1565,6 +1607,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
1565#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg) 1607#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1566#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg) 1608#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1567#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg) 1609#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1610#define NV_WARNONCE(d, fmt, arg...) do { \
1611 static int _warned = 0; \
1612 if (!_warned) { \
1613 NV_WARN(d, fmt, ##arg); \
1614 _warned = 1; \
1615 } \
1616} while(0)
1568 1617
1569/* nouveau_reg_debug bitmask */ 1618/* nouveau_reg_debug bitmask */
1570enum { 1619enum {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 95c843e684b..f3fb649fe45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
42 return container_of(fb, struct nouveau_framebuffer, base); 42 return container_of(fb, struct nouveau_framebuffer, base);
43} 43}
44 44
45extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
46
47int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, 45int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
48 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo); 46 struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
49#endif /* __NOUVEAU_FB_H__ */ 47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3a4cc32b9e4..9892218d745 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/screen_info.h> 37#include <linux/screen_info.h>
38#include <linux/vga_switcheroo.h> 38#include <linux/vga_switcheroo.h>
39#include <linux/console.h>
39 40
40#include "drmP.h" 41#include "drmP.h"
41#include "drm.h" 42#include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
281 struct nouveau_framebuffer *nouveau_fb; 282 struct nouveau_framebuffer *nouveau_fb;
282 struct nouveau_channel *chan; 283 struct nouveau_channel *chan;
283 struct nouveau_bo *nvbo; 284 struct nouveau_bo *nvbo;
284 struct drm_mode_fb_cmd mode_cmd; 285 struct drm_mode_fb_cmd2 mode_cmd;
285 struct pci_dev *pdev = dev->pdev; 286 struct pci_dev *pdev = dev->pdev;
286 struct device *device = &pdev->dev; 287 struct device *device = &pdev->dev;
287 int size, ret; 288 int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
289 mode_cmd.width = sizes->surface_width; 290 mode_cmd.width = sizes->surface_width;
290 mode_cmd.height = sizes->surface_height; 291 mode_cmd.height = sizes->surface_height;
291 292
292 mode_cmd.bpp = sizes->surface_bpp; 293 mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
293 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); 294 mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
294 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
295 mode_cmd.depth = sizes->surface_depth;
296 295
297 size = mode_cmd.pitch * mode_cmd.height; 296 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
297 sizes->surface_depth);
298
299 size = mode_cmd.pitches[0] * mode_cmd.height;
298 size = roundup(size, PAGE_SIZE); 300 size = roundup(size, PAGE_SIZE);
299 301
300 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 302 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
369 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 371 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
370 info->screen_size = size; 372 info->screen_size = size;
371 373
372 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 374 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
373 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 375 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
374 376
375 /* Set aperture base/size for vesafb takeover */ 377 /* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
547void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 549void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
548{ 550{
549 struct drm_nouveau_private *dev_priv = dev->dev_private; 551 struct drm_nouveau_private *dev_priv = dev->dev_private;
552 console_lock();
553 if (state == 0)
554 nouveau_fbcon_save_disable_accel(dev);
550 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); 555 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
556 if (state == 1)
557 nouveau_fbcon_restore_accel(dev);
558 console_unlock();
551} 559}
552 560
553void nouveau_fbcon_zfill_all(struct drm_device *dev) 561void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644
index 00000000000..a580cc62337
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -0,0 +1,400 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_gpio.h"
29
30static u8 *
31dcb_gpio_table(struct drm_device *dev)
32{
33 u8 *dcb = dcb_table(dev);
34 if (dcb) {
35 if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
36 return ROMPTR(dev, dcb[0x0a]);
37 if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
38 return ROMPTR(dev, dcb[-15]);
39 }
40 return NULL;
41}
42
43static u8 *
44dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
45{
46 u8 *table = dcb_gpio_table(dev);
47 if (table) {
48 *version = table[0];
49 if (*version < 0x30 && ent < table[2])
50 return table + 3 + (ent * table[1]);
51 else if (ent < table[2])
52 return table + table[1] + (ent * table[3]);
53 }
54 return NULL;
55}
56
57int
58nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
62
63 return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
64}
65
66int
67nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
71
72 return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
73}
74
75int
76nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
77 struct gpio_func *gpio)
78{
79 u8 *table, *entry, version;
80 int i = -1;
81
82 if (line == 0xff && func == 0xff)
83 return -EINVAL;
84
85 while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
86 if (version < 0x40) {
87 u16 data = ROM16(entry[0]);
88 *gpio = (struct gpio_func) {
89 .line = (data & 0x001f) >> 0,
90 .func = (data & 0x07e0) >> 5,
91 .log[0] = (data & 0x1800) >> 11,
92 .log[1] = (data & 0x6000) >> 13,
93 };
94 } else
95 if (version < 0x41) {
96 *gpio = (struct gpio_func) {
97 .line = entry[0] & 0x1f,
98 .func = entry[1],
99 .log[0] = (entry[3] & 0x18) >> 3,
100 .log[1] = (entry[3] & 0x60) >> 5,
101 };
102 } else {
103 *gpio = (struct gpio_func) {
104 .line = entry[0] & 0x3f,
105 .func = entry[1],
106 .log[0] = (entry[4] & 0x30) >> 4,
107 .log[1] = (entry[4] & 0xc0) >> 6,
108 };
109 }
110
111 if ((line == 0xff || line == gpio->line) &&
112 (func == 0xff || func == gpio->func))
113 return 0;
114 }
115
116 /* DCB 2.2, fixed TVDAC GPIO data */
117 if ((table = dcb_table(dev)) && table[0] >= 0x22) {
118 if (func == DCB_GPIO_TVDAC0) {
119 *gpio = (struct gpio_func) {
120 .func = DCB_GPIO_TVDAC0,
121 .line = table[-4] >> 4,
122 .log[0] = !!(table[-5] & 2),
123 .log[1] = !(table[-5] & 2),
124 };
125 return 0;
126 }
127 }
128
129 /* Apple iMac G4 NV18 */
130 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
131 if (func == DCB_GPIO_TVDAC0) {
132 *gpio = (struct gpio_func) {
133 .func = DCB_GPIO_TVDAC0,
134 .line = 4,
135 .log[0] = 0,
136 .log[1] = 1,
137 };
138 return 0;
139 }
140 }
141
142 return -EINVAL;
143}
144
145int
146nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
147{
148 struct gpio_func gpio;
149 int ret;
150
151 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
152 if (ret == 0) {
153 int dir = !!(gpio.log[state] & 0x02);
154 int out = !!(gpio.log[state] & 0x01);
155 ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
156 }
157
158 return ret;
159}
160
161int
162nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
163{
164 struct gpio_func gpio;
165 int ret;
166
167 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
168 if (ret == 0) {
169 ret = nouveau_gpio_sense(dev, idx, gpio.line);
170 if (ret >= 0)
171 ret = (ret == (gpio.log[1] & 1));
172 }
173
174 return ret;
175}
176
177int
178nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
179{
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
182 struct gpio_func gpio;
183 int ret;
184
185 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
186 if (ret == 0) {
187 if (idx == 0 && pgpio->irq_enable)
188 pgpio->irq_enable(dev, gpio.line, on);
189 else
190 ret = -ENODEV;
191 }
192
193 return ret;
194}
195
196struct gpio_isr {
197 struct drm_device *dev;
198 struct list_head head;
199 struct work_struct work;
200 int idx;
201 struct gpio_func func;
202 void (*handler)(void *, int);
203 void *data;
204 bool inhibit;
205};
206
207static void
208nouveau_gpio_isr_bh(struct work_struct *work)
209{
210 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
211 struct drm_device *dev = isr->dev;
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
214 unsigned long flags;
215 int state;
216
217 state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
218 if (state >= 0)
219 isr->handler(isr->data, state);
220
221 spin_lock_irqsave(&pgpio->lock, flags);
222 isr->inhibit = false;
223 spin_unlock_irqrestore(&pgpio->lock, flags);
224}
225
226void
227nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
228{
229 struct drm_nouveau_private *dev_priv = dev->dev_private;
230 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
231 struct gpio_isr *isr;
232
233 if (idx != 0)
234 return;
235
236 spin_lock(&pgpio->lock);
237 list_for_each_entry(isr, &pgpio->isr, head) {
238 if (line_mask & (1 << isr->func.line)) {
239 if (isr->inhibit)
240 continue;
241 isr->inhibit = true;
242 schedule_work(&isr->work);
243 }
244 }
245 spin_unlock(&pgpio->lock);
246}
247
248int
249nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
250 void (*handler)(void *, int), void *data)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
254 struct gpio_isr *isr;
255 unsigned long flags;
256 int ret;
257
258 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
259 if (!isr)
260 return -ENOMEM;
261
262 ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
263 if (ret) {
264 kfree(isr);
265 return ret;
266 }
267
268 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
269 isr->dev = dev;
270 isr->handler = handler;
271 isr->data = data;
272 isr->idx = idx;
273
274 spin_lock_irqsave(&pgpio->lock, flags);
275 list_add(&isr->head, &pgpio->isr);
276 spin_unlock_irqrestore(&pgpio->lock, flags);
277 return 0;
278}
279
280void
281nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
282 void (*handler)(void *, int), void *data)
283{
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
286 struct gpio_isr *isr, *tmp;
287 struct gpio_func func;
288 unsigned long flags;
289 LIST_HEAD(tofree);
290 int ret;
291
292 ret = nouveau_gpio_find(dev, idx, tag, line, &func);
293 if (ret == 0) {
294 spin_lock_irqsave(&pgpio->lock, flags);
295 list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
296 if (memcmp(&isr->func, &func, sizeof(func)) ||
297 isr->idx != idx ||
298 isr->handler != handler || isr->data != data)
299 continue;
300 list_move(&isr->head, &tofree);
301 }
302 spin_unlock_irqrestore(&pgpio->lock, flags);
303
304 list_for_each_entry_safe(isr, tmp, &tofree, head) {
305 flush_work_sync(&isr->work);
306 kfree(isr);
307 }
308 }
309}
310
311int
312nouveau_gpio_create(struct drm_device *dev)
313{
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
316
317 INIT_LIST_HEAD(&pgpio->isr);
318 spin_lock_init(&pgpio->lock);
319
320 return nouveau_gpio_init(dev);
321}
322
323void
324nouveau_gpio_destroy(struct drm_device *dev)
325{
326 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
328
329 nouveau_gpio_fini(dev);
330 BUG_ON(!list_empty(&pgpio->isr));
331}
332
333int
334nouveau_gpio_init(struct drm_device *dev)
335{
336 struct drm_nouveau_private *dev_priv = dev->dev_private;
337 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
338 int ret = 0;
339
340 if (pgpio->init)
341 ret = pgpio->init(dev);
342
343 return ret;
344}
345
346void
347nouveau_gpio_fini(struct drm_device *dev)
348{
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
351
352 if (pgpio->fini)
353 pgpio->fini(dev);
354}
355
356void
357nouveau_gpio_reset(struct drm_device *dev)
358{
359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 u8 *entry, version;
361 int ent = -1;
362
363 while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
364 u8 func = 0xff, line, defs, unk0, unk1;
365 if (version >= 0x41) {
366 defs = !!(entry[0] & 0x80);
367 line = entry[0] & 0x3f;
368 func = entry[1];
369 unk0 = entry[2];
370 unk1 = entry[3] & 0x1f;
371 } else
372 if (version >= 0x40) {
373 line = entry[0] & 0x1f;
374 func = entry[1];
375 defs = !!(entry[3] & 0x01);
376 unk0 = !!(entry[3] & 0x02);
377 unk1 = !!(entry[3] & 0x04);
378 } else {
379 break;
380 }
381
382 if (func == 0xff)
383 continue;
384
385 nouveau_gpio_func_set(dev, func, defs);
386
387 if (dev_priv->card_type >= NV_D0) {
388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
389 if (unk1--)
390 nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
391 } else
392 if (dev_priv->card_type >= NV_50) {
393 static const u32 regs[] = { 0xe100, 0xe28c };
394 u32 val = (unk1 << 16) | unk0;
395 u32 reg = regs[line >> 4]; line &= 0x0f;
396
397 nv_mask(dev, reg, 0x00010001 << line, val << line);
398 }
399 }
400}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644
index 00000000000..64c5cb077ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_GPIO_H__
24#define __NOUVEAU_GPIO_H__
25
26struct gpio_func {
27 u8 func;
28 u8 line;
29 u8 log[2];
30};
31
32/* nouveau_gpio.c */
33int nouveau_gpio_create(struct drm_device *);
34void nouveau_gpio_destroy(struct drm_device *);
35int nouveau_gpio_init(struct drm_device *);
36void nouveau_gpio_fini(struct drm_device *);
37void nouveau_gpio_reset(struct drm_device *);
38int nouveau_gpio_drive(struct drm_device *, int idx, int line,
39 int dir, int out);
40int nouveau_gpio_sense(struct drm_device *, int idx, int line);
41int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
42 struct gpio_func *);
43int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
44int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
45int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
46void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
47int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
48 void (*)(void *, int state), void *data);
49void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
50 void (*)(void *, int state), void *data);
51
52static inline bool
53nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
54{
55 struct gpio_func func;
56 return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
57}
58
59static inline int
60nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
61{
62 return nouveau_gpio_set(dev, 0, tag, 0xff, state);
63}
64
65static inline int
66nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
67{
68 return nouveau_gpio_get(dev, 0, tag, 0xff);
69}
70
71#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644
index 00000000000..59ea1c14eca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_connector.h"
28#include "nouveau_encoder.h"
29#include "nouveau_crtc.h"
30
31static bool
32hdmi_sor(struct drm_encoder *encoder)
33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
35 if (dev_priv->chipset < 0xa3)
36 return false;
37 return true;
38}
39
40static inline u32
41hdmi_base(struct drm_encoder *encoder)
42{
43 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
44 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
45 if (!hdmi_sor(encoder))
46 return 0x616500 + (nv_crtc->index * 0x800);
47 return 0x61c500 + (nv_encoder->or * 0x800);
48}
49
50static void
51hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
52{
53 nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
54}
55
56static u32
57hdmi_rd32(struct drm_encoder *encoder, u32 reg)
58{
59 return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
60}
61
62static u32
63hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
64{
65 u32 tmp = hdmi_rd32(encoder, reg);
66 hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
67 return tmp;
68}
69
70static void
71nouveau_audio_disconnect(struct drm_encoder *encoder)
72{
73 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
74 struct drm_device *dev = encoder->dev;
75 u32 or = nv_encoder->or * 0x800;
76
77 if (hdmi_sor(encoder)) {
78 nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
79 }
80}
81
82static void
83nouveau_audio_mode_set(struct drm_encoder *encoder,
84 struct drm_display_mode *mode)
85{
86 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
87 struct nouveau_connector *nv_connector;
88 struct drm_device *dev = encoder->dev;
89 u32 or = nv_encoder->or * 0x800;
90 int i;
91
92 nv_connector = nouveau_encoder_connector_get(nv_encoder);
93 if (!drm_detect_monitor_audio(nv_connector->edid)) {
94 nouveau_audio_disconnect(encoder);
95 return;
96 }
97
98 if (hdmi_sor(encoder)) {
99 nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
100
101 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
102 if (nv_connector->base.eld[0]) {
103 u8 *eld = nv_connector->base.eld;
104 for (i = 0; i < eld[2] * 4; i++)
105 nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
106 for (i = eld[2] * 4; i < 0x60; i++)
107 nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
108 nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
109 }
110 }
111}
112
113static void
114nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
115{
116 /* calculate checksum for the infoframe */
117 u8 sum = 0, i;
118 for (i = 0; i < frame[2]; i++)
119 sum += frame[i];
120 frame[3] = 256 - sum;
121
122 /* disable infoframe, and write header */
123 hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
124 hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
125
126 /* register scans tell me the audio infoframe has only one set of
127 * subpack regs, according to tegra (gee nvidia, it'd be nice if we
128 * could get those docs too!), the hdmi block pads out the rest of
129 * the packet on its own.
130 */
131 if (ctrl == 0x020)
132 frame[2] = 6;
133
134 /* write out checksum and data, weird weird 7 byte register pairs */
135 for (i = 0; i < frame[2] + 1; i += 7) {
136 u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
137 u32 *subpack = (u32 *)&frame[3 + i];
138 hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
139 hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
140 }
141
142 /* enable the infoframe */
143 hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
144}
145
146static void
147nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
148 struct drm_display_mode *mode)
149{
150 const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
151 const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
152 const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
153 u8 frame[20];
154
155 frame[0x00] = 0x82; /* AVI infoframe */
156 frame[0x01] = 0x02; /* version */
157 frame[0x02] = 0x0d; /* length */
158 frame[0x03] = 0x00;
159 frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
160 frame[0x05] = (C << 6) | (M << 4) | R;
161 frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
162 frame[0x07] = VIC;
163 frame[0x08] = PR;
164 frame[0x09] = bar_top & 0xff;
165 frame[0x0a] = bar_top >> 8;
166 frame[0x0b] = bar_bottom & 0xff;
167 frame[0x0c] = bar_bottom >> 8;
168 frame[0x0d] = bar_left & 0xff;
169 frame[0x0e] = bar_left >> 8;
170 frame[0x0f] = bar_right & 0xff;
171 frame[0x10] = bar_right >> 8;
172 frame[0x11] = 0x00;
173 frame[0x12] = 0x00;
174 frame[0x13] = 0x00;
175
176 nouveau_hdmi_infoframe(encoder, 0x020, frame);
177}
178
179static void
180nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
181 struct drm_display_mode *mode)
182{
183 const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
184 const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
185 u8 frame[12];
186
187 frame[0x00] = 0x84; /* Audio infoframe */
188 frame[0x01] = 0x01; /* version */
189 frame[0x02] = 0x0a; /* length */
190 frame[0x03] = 0x00;
191 frame[0x04] = (CT << 4) | CC;
192 frame[0x05] = (SF << 2) | ceaSS;
193 frame[0x06] = FMT;
194 frame[0x07] = CA;
195 frame[0x08] = (DM_INH << 7) | (LSV << 3);
196 frame[0x09] = 0x00;
197 frame[0x0a] = 0x00;
198 frame[0x0b] = 0x00;
199
200 nouveau_hdmi_infoframe(encoder, 0x000, frame);
201}
202
203static void
204nouveau_hdmi_disconnect(struct drm_encoder *encoder)
205{
206 nouveau_audio_disconnect(encoder);
207
208 /* disable audio and avi infoframes */
209 hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
210 hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
211
212 /* disable hdmi */
213 hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
214}
215
216void
217nouveau_hdmi_mode_set(struct drm_encoder *encoder,
218 struct drm_display_mode *mode)
219{
220 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
221 struct nouveau_connector *nv_connector;
222 struct drm_device *dev = encoder->dev;
223 u32 max_ac_packet, rekey;
224
225 nv_connector = nouveau_encoder_connector_get(nv_encoder);
226 if (!mode || !nv_connector || !nv_connector->edid ||
227 !drm_detect_hdmi_monitor(nv_connector->edid)) {
228 nouveau_hdmi_disconnect(encoder);
229 return;
230 }
231
232 nouveau_hdmi_video_infoframe(encoder, mode);
233 nouveau_hdmi_audio_infoframe(encoder, mode);
234
235 hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
236 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
237 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
238
239 nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
240 nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
241 nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
242
243 /* value matches nvidia binary driver, and tegra constant */
244 rekey = 56;
245
246 max_ac_packet = mode->htotal - mode->hdisplay;
247 max_ac_packet -= rekey;
248 max_ac_packet -= 18; /* constant from tegra */
249 max_ac_packet /= 32;
250
251 /* enable hdmi */
252 hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
253 0x1f000000 | /* unknown */
254 max_ac_packet << 16 |
255 rekey);
256
257 nouveau_audio_mode_set(encoder, mode);
258}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644
index 00000000000..697687593a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_HWSQ_H__
26#define __NOUVEAU_HWSQ_H__
27
28struct hwsq_ucode {
29 u8 data[0x200];
30 union {
31 u8 *u08;
32 u16 *u16;
33 u32 *u32;
34 } ptr;
35 u16 len;
36
37 u32 reg;
38 u32 val;
39};
40
41static inline void
42hwsq_init(struct hwsq_ucode *hwsq)
43{
44 hwsq->ptr.u08 = hwsq->data;
45 hwsq->reg = 0xffffffff;
46 hwsq->val = 0xffffffff;
47}
48
49static inline void
50hwsq_fini(struct hwsq_ucode *hwsq)
51{
52 do {
53 *hwsq->ptr.u08++ = 0x7f;
54 hwsq->len = hwsq->ptr.u08 - hwsq->data;
55 } while (hwsq->len & 3);
56 hwsq->ptr.u08 = hwsq->data;
57}
58
59static inline void
60hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
61{
62 u32 shift = 0;
63 while (usec & ~3) {
64 usec >>= 2;
65 shift++;
66 }
67
68 *hwsq->ptr.u08++ = (shift << 2) | usec;
69}
70
71static inline void
72hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
73{
74 flag += 0x80;
75 if (val >= 0)
76 flag += 0x20;
77 if (val >= 1)
78 flag += 0x20;
79 *hwsq->ptr.u08++ = flag;
80}
81
82static inline void
83hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
84{
85 *hwsq->ptr.u08++ = 0x5f;
86 *hwsq->ptr.u08++ = v0;
87 *hwsq->ptr.u08++ = v1;
88}
89
90static inline void
91hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
92{
93 if (val != hwsq->val) {
94 if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
95 *hwsq->ptr.u08++ = 0x42;
96 *hwsq->ptr.u16++ = (val & 0x0000ffff);
97 } else {
98 *hwsq->ptr.u08++ = 0xe2;
99 *hwsq->ptr.u32++ = val;
100 }
101
102 hwsq->val = val;
103 }
104
105 if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
106 *hwsq->ptr.u08++ = 0x40;
107 *hwsq->ptr.u16++ = (reg & 0x0000ffff);
108 } else {
109 *hwsq->ptr.u08++ = 0xe0;
110 *hwsq->ptr.u32++ = reg;
111 }
112 hwsq->reg = reg;
113}
114
115#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index d39b2202b19..820ae7f5204 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,262 +29,465 @@
29#include "nouveau_i2c.h" 29#include "nouveau_i2c.h"
30#include "nouveau_hw.h" 30#include "nouveau_hw.h"
31 31
32#define T_TIMEOUT 2200000
33#define T_RISEFALL 1000
34#define T_HOLD 5000
35
32static void 36static void
33nv04_i2c_setscl(void *data, int state) 37i2c_drive_scl(void *data, int state)
34{ 38{
35 struct nouveau_i2c_chan *i2c = data; 39 struct nouveau_i2c_chan *port = data;
36 struct drm_device *dev = i2c->dev; 40 if (port->type == 0) {
37 uint8_t val; 41 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
38 42 if (state) val |= 0x20;
39 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0); 43 else val &= 0xdf;
40 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); 44 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
45 } else
46 if (port->type == 4) {
47 nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
48 } else
49 if (port->type == 5) {
50 if (state) port->state |= 0x01;
51 else port->state &= 0xfe;
52 nv_wr32(port->dev, port->drive, 4 | port->state);
53 }
41} 54}
42 55
43static void 56static void
44nv04_i2c_setsda(void *data, int state) 57i2c_drive_sda(void *data, int state)
45{ 58{
46 struct nouveau_i2c_chan *i2c = data; 59 struct nouveau_i2c_chan *port = data;
47 struct drm_device *dev = i2c->dev; 60 if (port->type == 0) {
48 uint8_t val; 61 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
49 62 if (state) val |= 0x10;
50 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0); 63 else val &= 0xef;
51 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); 64 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
65 } else
66 if (port->type == 4) {
67 nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
68 } else
69 if (port->type == 5) {
70 if (state) port->state |= 0x02;
71 else port->state &= 0xfd;
72 nv_wr32(port->dev, port->drive, 4 | port->state);
73 }
52} 74}
53 75
54static int 76static int
55nv04_i2c_getscl(void *data) 77i2c_sense_scl(void *data)
56{ 78{
57 struct nouveau_i2c_chan *i2c = data; 79 struct nouveau_i2c_chan *port = data;
58 struct drm_device *dev = i2c->dev; 80 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
59 81 if (port->type == 0) {
60 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4); 82 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
83 } else
84 if (port->type == 4) {
85 return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
86 } else
87 if (port->type == 5) {
88 if (dev_priv->card_type < NV_D0)
89 return !!(nv_rd32(port->dev, port->sense) & 0x01);
90 else
91 return !!(nv_rd32(port->dev, port->sense) & 0x10);
92 }
93 return 0;
61} 94}
62 95
63static int 96static int
64nv04_i2c_getsda(void *data) 97i2c_sense_sda(void *data)
65{ 98{
66 struct nouveau_i2c_chan *i2c = data; 99 struct nouveau_i2c_chan *port = data;
67 struct drm_device *dev = i2c->dev; 100 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
68 101 if (port->type == 0) {
69 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8); 102 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
103 } else
104 if (port->type == 4) {
105 return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
106 } else
107 if (port->type == 5) {
108 if (dev_priv->card_type < NV_D0)
109 return !!(nv_rd32(port->dev, port->sense) & 0x02);
110 else
111 return !!(nv_rd32(port->dev, port->sense) & 0x20);
112 }
113 return 0;
70} 114}
71 115
72static void 116static void
73nv4e_i2c_setscl(void *data, int state) 117i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
74{ 118{
75 struct nouveau_i2c_chan *i2c = data; 119 udelay((nsec + 500) / 1000);
76 struct drm_device *dev = i2c->dev;
77 uint8_t val;
78
79 val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
80 nv_wr32(dev, i2c->wr, val | 0x01);
81} 120}
82 121
83static void 122static bool
84nv4e_i2c_setsda(void *data, int state) 123i2c_raise_scl(struct nouveau_i2c_chan *port)
85{ 124{
86 struct nouveau_i2c_chan *i2c = data; 125 u32 timeout = T_TIMEOUT / T_RISEFALL;
87 struct drm_device *dev = i2c->dev; 126
88 uint8_t val; 127 i2c_drive_scl(port, 1);
128 do {
129 i2c_delay(port, T_RISEFALL);
130 } while (!i2c_sense_scl(port) && --timeout);
89 131
90 val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0); 132 return timeout != 0;
91 nv_wr32(dev, i2c->wr, val | 0x01);
92} 133}
93 134
94static int 135static int
95nv4e_i2c_getscl(void *data) 136i2c_start(struct nouveau_i2c_chan *port)
96{ 137{
97 struct nouveau_i2c_chan *i2c = data; 138 int ret = 0;
98 struct drm_device *dev = i2c->dev; 139
140 port->state = i2c_sense_scl(port);
141 port->state |= i2c_sense_sda(port) << 1;
142 if (port->state != 3) {
143 i2c_drive_scl(port, 0);
144 i2c_drive_sda(port, 1);
145 if (!i2c_raise_scl(port))
146 ret = -EBUSY;
147 }
99 148
100 return !!((nv_rd32(dev, i2c->rd) >> 16) & 4); 149 i2c_drive_sda(port, 0);
150 i2c_delay(port, T_HOLD);
151 i2c_drive_scl(port, 0);
152 i2c_delay(port, T_HOLD);
153 return ret;
101} 154}
102 155
103static int 156static void
104nv4e_i2c_getsda(void *data) 157i2c_stop(struct nouveau_i2c_chan *port)
105{ 158{
106 struct nouveau_i2c_chan *i2c = data; 159 i2c_drive_scl(port, 0);
107 struct drm_device *dev = i2c->dev; 160 i2c_drive_sda(port, 0);
108 161 i2c_delay(port, T_RISEFALL);
109 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); 162
163 i2c_drive_scl(port, 1);
164 i2c_delay(port, T_HOLD);
165 i2c_drive_sda(port, 1);
166 i2c_delay(port, T_HOLD);
110} 167}
111 168
112static const uint32_t nv50_i2c_port[] = {
113 0x00e138, 0x00e150, 0x00e168, 0x00e180,
114 0x00e254, 0x00e274, 0x00e764, 0x00e780,
115 0x00e79c, 0x00e7b8
116};
117#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
118
119static int 169static int
120nv50_i2c_getscl(void *data) 170i2c_bitw(struct nouveau_i2c_chan *port, int sda)
121{ 171{
122 struct nouveau_i2c_chan *i2c = data; 172 i2c_drive_sda(port, sda);
123 struct drm_device *dev = i2c->dev; 173 i2c_delay(port, T_RISEFALL);
124 174
125 return !!(nv_rd32(dev, i2c->rd) & 1); 175 if (!i2c_raise_scl(port))
126} 176 return -ETIMEDOUT;
177 i2c_delay(port, T_HOLD);
127 178
179 i2c_drive_scl(port, 0);
180 i2c_delay(port, T_HOLD);
181 return 0;
182}
128 183
129static int 184static int
130nv50_i2c_getsda(void *data) 185i2c_bitr(struct nouveau_i2c_chan *port)
131{ 186{
132 struct nouveau_i2c_chan *i2c = data; 187 int sda;
133 struct drm_device *dev = i2c->dev; 188
189 i2c_drive_sda(port, 1);
190 i2c_delay(port, T_RISEFALL);
134 191
135 return !!(nv_rd32(dev, i2c->rd) & 2); 192 if (!i2c_raise_scl(port))
193 return -ETIMEDOUT;
194 i2c_delay(port, T_HOLD);
195
196 sda = i2c_sense_sda(port);
197
198 i2c_drive_scl(port, 0);
199 i2c_delay(port, T_HOLD);
200 return sda;
136} 201}
137 202
138static void 203static int
139nv50_i2c_setscl(void *data, int state) 204i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
140{ 205{
141 struct nouveau_i2c_chan *i2c = data; 206 int i, bit;
207
208 *byte = 0;
209 for (i = 7; i >= 0; i--) {
210 bit = i2c_bitr(port);
211 if (bit < 0)
212 return bit;
213 *byte |= bit << i;
214 }
142 215
143 nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); 216 return i2c_bitw(port, last ? 1 : 0);
144} 217}
145 218
146static void 219static int
147nv50_i2c_setsda(void *data, int state) 220i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
148{ 221{
149 struct nouveau_i2c_chan *i2c = data; 222 int i, ret;
223 for (i = 7; i >= 0; i--) {
224 ret = i2c_bitw(port, !!(byte & (1 << i)));
225 if (ret < 0)
226 return ret;
227 }
150 228
151 nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0)); 229 ret = i2c_bitr(port);
152 i2c->data = state; 230 if (ret == 1) /* nack */
231 ret = -EIO;
232 return ret;
153} 233}
154 234
155static int 235static int
156nvd0_i2c_getscl(void *data) 236i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
157{ 237{
158 struct nouveau_i2c_chan *i2c = data; 238 u32 addr = msg->addr << 1;
159 return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10); 239 if (msg->flags & I2C_M_RD)
240 addr |= 1;
241 return i2c_put_byte(port, addr);
160} 242}
161 243
162static int 244static int
163nvd0_i2c_getsda(void *data) 245i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
164{ 246{
165 struct nouveau_i2c_chan *i2c = data; 247 struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
166 return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20); 248 struct i2c_msg *msg = msgs;
249 int ret = 0, mcnt = num;
250
251 while (!ret && mcnt--) {
252 u8 remaining = msg->len;
253 u8 *ptr = msg->buf;
254
255 ret = i2c_start(port);
256 if (ret == 0)
257 ret = i2c_addr(port, msg);
258
259 if (msg->flags & I2C_M_RD) {
260 while (!ret && remaining--)
261 ret = i2c_get_byte(port, ptr++, !remaining);
262 } else {
263 while (!ret && remaining--)
264 ret = i2c_put_byte(port, *ptr++);
265 }
266
267 msg++;
268 }
269
270 i2c_stop(port);
271 return (ret < 0) ? ret : num;
167} 272}
168 273
169int 274static u32
170nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) 275i2c_bit_func(struct i2c_adapter *adap)
171{ 276{
172 struct drm_nouveau_private *dev_priv = dev->dev_private; 277 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
173 struct nouveau_i2c_chan *i2c; 278}
174 int ret; 279
280const struct i2c_algorithm i2c_bit_algo = {
281 .master_xfer = i2c_bit_xfer,
282 .functionality = i2c_bit_func
283};
284
285static const uint32_t nv50_i2c_port[] = {
286 0x00e138, 0x00e150, 0x00e168, 0x00e180,
287 0x00e254, 0x00e274, 0x00e764, 0x00e780,
288 0x00e79c, 0x00e7b8
289};
175 290
176 if (entry->chan) 291static u8 *
177 return -EEXIST; 292i2c_table(struct drm_device *dev, u8 *version)
293{
294 u8 *dcb = dcb_table(dev), *i2c = NULL;
295 if (dcb) {
296 if (dcb[0] >= 0x15)
297 i2c = ROMPTR(dev, dcb[2]);
298 if (dcb[0] >= 0x30)
299 i2c = ROMPTR(dev, dcb[4]);
300 }
178 301
179 if (dev_priv->card_type >= NV_50 && 302 /* early revisions had no version number, use dcb version */
180 dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) { 303 if (i2c) {
181 NV_ERROR(dev, "unknown i2c port %d\n", entry->read); 304 *version = dcb[0];
182 return -EINVAL; 305 if (*version >= 0x30)
306 *version = i2c[0];
183 } 307 }
184 308
185 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); 309 return i2c;
186 if (i2c == NULL) 310}
187 return -ENOMEM; 311
188 312int
189 switch (entry->port_type) { 313nouveau_i2c_init(struct drm_device *dev)
190 case 0: 314{
191 i2c->bit.setsda = nv04_i2c_setsda; 315 struct drm_nouveau_private *dev_priv = dev->dev_private;
192 i2c->bit.setscl = nv04_i2c_setscl; 316 struct nvbios *bios = &dev_priv->vbios;
193 i2c->bit.getsda = nv04_i2c_getsda; 317 struct nouveau_i2c_chan *port;
194 i2c->bit.getscl = nv04_i2c_getscl; 318 u8 *i2c, *entry, legacy[2][4] = {};
195 i2c->rd = entry->read; 319 u8 version, entries, recordlen;
196 i2c->wr = entry->write; 320 int ret, i;
197 break; 321
198 case 4: 322 INIT_LIST_HEAD(&dev_priv->i2c_ports);
199 i2c->bit.setsda = nv4e_i2c_setsda; 323
200 i2c->bit.setscl = nv4e_i2c_setscl; 324 i2c = i2c_table(dev, &version);
201 i2c->bit.getsda = nv4e_i2c_getsda; 325 if (!i2c) {
202 i2c->bit.getscl = nv4e_i2c_getscl; 326 u8 *bmp = &bios->data[bios->offset];
203 i2c->rd = 0x600800 + entry->read; 327 if (bios->type != NVBIOS_BMP)
204 i2c->wr = 0x600800 + entry->write; 328 return -ENODEV;
205 break; 329
206 case 5: 330 legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
207 i2c->bit.setsda = nv50_i2c_setsda; 331 legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
208 i2c->bit.setscl = nv50_i2c_setscl; 332 legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
209 if (dev_priv->card_type < NV_D0) { 333 legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
210 i2c->bit.getsda = nv50_i2c_getsda; 334
211 i2c->bit.getscl = nv50_i2c_getscl; 335 /* BMP (from v4.0) has i2c info in the structure, it's in a
212 i2c->rd = nv50_i2c_port[entry->read]; 336 * fixed location on earlier VBIOS
213 i2c->wr = i2c->rd; 337 */
214 } else { 338 if (bmp[5] < 4)
215 i2c->bit.getsda = nvd0_i2c_getsda; 339 i2c = &bios->data[0x48];
216 i2c->bit.getscl = nvd0_i2c_getscl; 340 else
217 i2c->rd = 0x00d014 + (entry->read * 0x20); 341 i2c = &bmp[0x36];
218 i2c->wr = i2c->rd; 342
219 } 343 if (i2c[4]) legacy[0][0] = i2c[4];
220 break; 344 if (i2c[5]) legacy[0][1] = i2c[5];
221 case 6: 345 if (i2c[6]) legacy[1][0] = i2c[6];
222 i2c->rd = entry->read; 346 if (i2c[7]) legacy[1][1] = i2c[7];
223 i2c->wr = entry->write;
224 break;
225 default:
226 NV_ERROR(dev, "DCB I2C port type %d unknown\n",
227 entry->port_type);
228 kfree(i2c);
229 return -EINVAL;
230 } 347 }
231 348
232 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 349 if (i2c && version >= 0x30) {
233 "nouveau-%s-%d", pci_name(dev->pdev), index); 350 entry = i2c[1] + i2c;
234 i2c->adapter.owner = THIS_MODULE; 351 entries = i2c[2];
235 i2c->adapter.dev.parent = &dev->pdev->dev; 352 recordlen = i2c[3];
236 i2c->dev = dev; 353 } else
237 i2c_set_adapdata(&i2c->adapter, i2c); 354 if (i2c) {
238 355 entry = i2c;
239 if (entry->port_type < 6) { 356 entries = 16;
240 i2c->adapter.algo_data = &i2c->bit; 357 recordlen = 4;
241 i2c->bit.udelay = 40;
242 i2c->bit.timeout = usecs_to_jiffies(5000);
243 i2c->bit.data = i2c;
244 ret = i2c_bit_add_bus(&i2c->adapter);
245 } else { 358 } else {
246 i2c->adapter.algo = &nouveau_dp_i2c_algo; 359 entry = legacy[0];
247 ret = i2c_add_adapter(&i2c->adapter); 360 entries = 2;
361 recordlen = 4;
248 } 362 }
249 363
250 if (ret) { 364 for (i = 0; i < entries; i++, entry += recordlen) {
251 NV_ERROR(dev, "Failed to register i2c %d\n", index); 365 port = kzalloc(sizeof(*port), GFP_KERNEL);
252 kfree(i2c); 366 if (port == NULL) {
253 return ret; 367 nouveau_i2c_fini(dev);
368 return -ENOMEM;
369 }
370
371 port->type = entry[3];
372 if (version < 0x30) {
373 port->type &= 0x07;
374 if (port->type == 0x07)
375 port->type = 0xff;
376 }
377
378 if (port->type == 0xff) {
379 kfree(port);
380 continue;
381 }
382
383 switch (port->type) {
384 case 0: /* NV04:NV50 */
385 port->drive = entry[0];
386 port->sense = entry[1];
387 port->adapter.algo = &i2c_bit_algo;
388 break;
389 case 4: /* NV4E */
390 port->drive = 0x600800 + entry[1];
391 port->sense = port->drive;
392 port->adapter.algo = &i2c_bit_algo;
393 break;
394 case 5: /* NV50- */
395 port->drive = entry[0] & 0x0f;
396 if (dev_priv->card_type < NV_D0) {
397 if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
398 break;
399 port->drive = nv50_i2c_port[port->drive];
400 port->sense = port->drive;
401 } else {
402 port->drive = 0x00d014 + (port->drive * 0x20);
403 port->sense = port->drive;
404 }
405 port->adapter.algo = &i2c_bit_algo;
406 break;
407 case 6: /* NV50- DP AUX */
408 port->drive = entry[0];
409 port->sense = port->drive;
410 port->adapter.algo = &nouveau_dp_i2c_algo;
411 break;
412 default:
413 break;
414 }
415
416 if (!port->adapter.algo) {
417 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
418 i, port->type, port->drive, port->sense);
419 kfree(port);
420 continue;
421 }
422
423 snprintf(port->adapter.name, sizeof(port->adapter.name),
424 "nouveau-%s-%d", pci_name(dev->pdev), i);
425 port->adapter.owner = THIS_MODULE;
426 port->adapter.dev.parent = &dev->pdev->dev;
427 port->dev = dev;
428 port->index = i;
429 port->dcb = ROM32(entry[0]);
430 i2c_set_adapdata(&port->adapter, i2c);
431
432 ret = i2c_add_adapter(&port->adapter);
433 if (ret) {
434 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
435 kfree(port);
436 continue;
437 }
438
439 list_add_tail(&port->head, &dev_priv->i2c_ports);
254 } 440 }
255 441
256 entry->chan = i2c;
257 return 0; 442 return 0;
258} 443}
259 444
260void 445void
261nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry) 446nouveau_i2c_fini(struct drm_device *dev)
262{ 447{
263 if (!entry->chan) 448 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 return; 449 struct nouveau_i2c_chan *port, *tmp;
265 450
266 i2c_del_adapter(&entry->chan->adapter); 451 list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
267 kfree(entry->chan); 452 i2c_del_adapter(&port->adapter);
268 entry->chan = NULL; 453 kfree(port);
454 }
269} 455}
270 456
271struct nouveau_i2c_chan * 457struct nouveau_i2c_chan *
272nouveau_i2c_find(struct drm_device *dev, int index) 458nouveau_i2c_find(struct drm_device *dev, u8 index)
273{ 459{
274 struct drm_nouveau_private *dev_priv = dev->dev_private; 460 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; 461 struct nouveau_i2c_chan *port;
462
463 if (index == NV_I2C_DEFAULT(0) ||
464 index == NV_I2C_DEFAULT(1)) {
465 u8 version, *i2c = i2c_table(dev, &version);
466 if (i2c && version >= 0x30) {
467 if (index == NV_I2C_DEFAULT(0))
468 index = (i2c[4] & 0x0f);
469 else
470 index = (i2c[4] & 0xf0) >> 4;
471 } else {
472 index = 2;
473 }
474 }
276 475
277 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 476 list_for_each_entry(port, &dev_priv->i2c_ports, head) {
278 return NULL; 477 if (port->index == index)
478 break;
479 }
279 480
280 if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { 481 if (&port->head == &dev_priv->i2c_ports)
281 uint32_t reg = 0xe500, val; 482 return NULL;
282 483
283 if (i2c->port_type == 6) { 484 if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
284 reg += i2c->read * 0x50; 485 u32 reg = 0x00e500, val;
486 if (port->type == 6) {
487 reg += port->drive * 0x50;
285 val = 0x2002; 488 val = 0x2002;
286 } else { 489 } else {
287 reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; 490 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
288 val = 0xe001; 491 val = 0xe001;
289 } 492 }
290 493
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
294 nv_mask(dev, reg + 0x00, 0x0000f003, val); 497 nv_mask(dev, reg + 0x00, 0x0000f003, val);
295 } 498 }
296 499
297 if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) 500 return port;
298 return NULL;
299 return i2c->chan;
300} 501}
301 502
302bool 503bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
331 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); 532 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
332 int i; 533 int i;
333 534
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 535 if (!i2c) {
536 NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
537 return -ENODEV;
538 }
335 539
336 for (i = 0; i2c && info[i].addr; i++) { 540 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
541 for (i = 0; info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 542 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 543 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 544 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
342 } 547 }
343 548
344 NV_DEBUG(dev, "No devices found.\n"); 549 NV_DEBUG(dev, "No devices found.\n");
345
346 return -ENODEV; 550 return -ENODEV;
347} 551}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 422b62fd827..4d2e4e9031b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -27,20 +27,25 @@
27#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
28#include "drm_dp_helper.h" 28#include "drm_dp_helper.h"
29 29
30struct dcb_i2c_entry; 30#define NV_I2C_PORT(n) (0x00 + (n))
31#define NV_I2C_PORT_NUM 0x10
32#define NV_I2C_DEFAULT(n) (0x80 + (n))
31 33
32struct nouveau_i2c_chan { 34struct nouveau_i2c_chan {
33 struct i2c_adapter adapter; 35 struct i2c_adapter adapter;
34 struct drm_device *dev; 36 struct drm_device *dev;
35 struct i2c_algo_bit_data bit; 37 struct list_head head;
36 unsigned rd; 38 u8 index;
37 unsigned wr; 39 u8 type;
38 unsigned data; 40 u32 dcb;
41 u32 drive;
42 u32 sense;
43 u32 state;
39}; 44};
40 45
41int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index); 46int nouveau_i2c_init(struct drm_device *);
42void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); 47void nouveau_i2c_fini(struct drm_device *);
43struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); 48struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
44bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); 49bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
45int nouveau_i2c_identify(struct drm_device *dev, const char *what, 50int nouveau_i2c_identify(struct drm_device *dev, const char *what,
46 struct i2c_board_info *info, 51 struct i2c_board_info *info,
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 36bec480770..c3a5745e9c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
407 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); 407 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
408 if (ret) 408 if (ret)
409 return ret; 409 return ret;
410 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
411 if (ret) {
412 /* Reset to default value. */
413 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
414 }
415
410 416
411 ret = nouveau_ttm_global_init(dev_priv); 417 ret = nouveau_ttm_global_init(dev_priv);
412 if (ret) 418 if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
638 return; 644 return;
639 645
640 if (P.version == 1) 646 if (P.version == 1)
641 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]); 647 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
642 else 648 else
643 if (P.version == 2) 649 if (P.version == 2)
644 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]); 650 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
645 else { 651 else {
646 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); 652 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
647 } 653 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644
index 00000000000..8bccddf4eff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -0,0 +1,677 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/acpi.h>
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29
30#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
31#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
32
33static u8 *
34mxms_data(struct drm_device *dev)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 return dev_priv->mxms;
38
39}
40
41static u16
42mxms_version(struct drm_device *dev)
43{
44 u8 *mxms = mxms_data(dev);
45 u16 version = (mxms[4] << 8) | mxms[5];
46 switch (version ) {
47 case 0x0200:
48 case 0x0201:
49 case 0x0300:
50 return version;
51 default:
52 break;
53 }
54
55 MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
56 return 0x0000;
57}
58
59static u16
60mxms_headerlen(struct drm_device *dev)
61{
62 return 8;
63}
64
65static u16
66mxms_structlen(struct drm_device *dev)
67{
68 return *(u16 *)&mxms_data(dev)[6];
69}
70
71static bool
72mxms_checksum(struct drm_device *dev)
73{
74 u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
75 u8 *mxms = mxms_data(dev), sum = 0;
76 while (size--)
77 sum += *mxms++;
78 if (sum) {
79 MXM_DBG(dev, "checksum invalid\n");
80 return false;
81 }
82 return true;
83}
84
85static bool
86mxms_valid(struct drm_device *dev)
87{
88 u8 *mxms = mxms_data(dev);
89 if (*(u32 *)mxms != 0x5f4d584d) {
90 MXM_DBG(dev, "signature invalid\n");
91 return false;
92 }
93
94 if (!mxms_version(dev) || !mxms_checksum(dev))
95 return false;
96
97 return true;
98}
99
100static bool
101mxms_foreach(struct drm_device *dev, u8 types,
102 bool (*exec)(struct drm_device *, u8 *, void *), void *info)
103{
104 u8 *mxms = mxms_data(dev);
105 u8 *desc = mxms + mxms_headerlen(dev);
106 u8 *fini = desc + mxms_structlen(dev) - 1;
107 while (desc < fini) {
108 u8 type = desc[0] & 0x0f;
109 u8 headerlen = 0;
110 u8 recordlen = 0;
111 u8 entries = 0;
112
113 switch (type) {
114 case 0: /* Output Device Structure */
115 if (mxms_version(dev) >= 0x0300)
116 headerlen = 8;
117 else
118 headerlen = 6;
119 break;
120 case 1: /* System Cooling Capability Structure */
121 case 2: /* Thermal Structure */
122 case 3: /* Input Power Structure */
123 headerlen = 4;
124 break;
125 case 4: /* GPIO Device Structure */
126 headerlen = 4;
127 recordlen = 2;
128 entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
129 break;
130 case 5: /* Vendor Specific Structure */
131 headerlen = 8;
132 break;
133 case 6: /* Backlight Control Structure */
134 if (mxms_version(dev) >= 0x0300) {
135 headerlen = 4;
136 recordlen = 8;
137 entries = (desc[1] & 0xf0) >> 4;
138 } else {
139 headerlen = 8;
140 }
141 break;
142 case 7: /* Fan Control Structure */
143 headerlen = 8;
144 recordlen = 4;
145 entries = desc[1] & 0x07;
146 break;
147 default:
148 MXM_DBG(dev, "unknown descriptor type %d\n", type);
149 return false;
150 }
151
152 if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
153 static const char * mxms_desc_name[] = {
154 "ODS", "SCCS", "TS", "IPS",
155 "GSD", "VSS", "BCS", "FCS",
156 };
157 u8 *dump = desc;
158 int i, j;
159
160 MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
161 for (j = headerlen - 1; j >= 0; j--)
162 printk("%02x", dump[j]);
163 printk("\n");
164 dump += headerlen;
165
166 for (i = 0; i < entries; i++, dump += recordlen) {
167 MXM_DBG(dev, " ");
168 for (j = recordlen - 1; j >= 0; j--)
169 printk("%02x", dump[j]);
170 printk("\n");
171 }
172 }
173
174 if (types & (1 << type)) {
175 if (!exec(dev, desc, info))
176 return false;
177 }
178
179 desc += headerlen + (entries * recordlen);
180 }
181
182 return true;
183}
184
185static u8 *
186mxm_table(struct drm_device *dev, u8 *size)
187{
188 struct bit_entry x;
189
190 if (bit_table(dev, 'x', &x)) {
191 MXM_DBG(dev, "BIT 'x' table not present\n");
192 return NULL;
193 }
194
195 if (x.version != 1 || x.length < 3) {
196 MXM_MSG(dev, "BIT x table %d/%d unknown\n",
197 x.version, x.length);
198 return NULL;
199 }
200
201 *size = x.length;
202 return x.data;
203}
204
205/* These map MXM v2.x digital connection values to the appropriate SOR/link,
206 * hopefully they're correct for all boards within the same chipset...
207 *
208 * MXM v3.x VBIOS are nicer and provide pointers to these tables.
209 */
210static u8 nv84_sor_map[16] = {
211 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
213};
214
215static u8 nv92_sor_map[16] = {
216 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
217 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
218};
219
220static u8 nv94_sor_map[16] = {
221 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
222 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
223};
224
225static u8 nv96_sor_map[16] = {
226 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
227 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
228};
229
230static u8 nv98_sor_map[16] = {
231 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
232 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
233};
234
235static u8
236mxm_sor_map(struct drm_device *dev, u8 conn)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 u8 len, *mxm = mxm_table(dev, &len);
240 if (mxm && len >= 6) {
241 u8 *map = ROMPTR(dev, mxm[4]);
242 if (map) {
243 if (map[0] == 0x10) {
244 if (conn < map[3])
245 return map[map[1] + conn];
246 return 0x00;
247 }
248
249 MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
250 }
251 }
252
253 if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
254 return nv84_sor_map[conn];
255 if (dev_priv->chipset == 0x92)
256 return nv92_sor_map[conn];
257 if (dev_priv->chipset == 0x94)
258 return nv94_sor_map[conn];
259 if (dev_priv->chipset == 0x96)
260 return nv96_sor_map[conn];
261 if (dev_priv->chipset == 0x98)
262 return nv98_sor_map[conn];
263
264 MXM_MSG(dev, "missing sor map\n");
265 return 0x00;
266}
267
268static u8
269mxm_ddc_map(struct drm_device *dev, u8 port)
270{
271 u8 len, *mxm = mxm_table(dev, &len);
272 if (mxm && len >= 8) {
273 u8 *map = ROMPTR(dev, mxm[6]);
274 if (map) {
275 if (map[0] == 0x10) {
276 if (port < map[3])
277 return map[map[1] + port];
278 return 0x00;
279 }
280
281 MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
282 }
283 }
284
285 /* v2.x: directly write port as dcb i2cidx */
286 return (port << 4) | port;
287}
288
289struct mxms_odev {
290 u8 outp_type;
291 u8 conn_type;
292 u8 ddc_port;
293 u8 dig_conn;
294};
295
296static void
297mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
298{
299 u64 data = ROM32(pdata[0]);
300 if (mxms_version(dev) >= 0x0300)
301 data |= (u64)ROM16(pdata[4]) << 32;
302
303 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
304 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
305 desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
306 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
307}
308
309struct context {
310 u32 *outp;
311 struct mxms_odev desc;
312};
313
314static bool
315mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
316{
317 struct context *ctx = info;
318 struct mxms_odev desc;
319
320 mxms_output_device(dev, data, &desc);
321 if (desc.outp_type == 2 &&
322 desc.dig_conn == ctx->desc.dig_conn)
323 return false;
324 return true;
325}
326
327static bool
328mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
329{
330 struct context *ctx = info;
331 u64 desc = *(u64 *)data;
332
333 mxms_output_device(dev, data, &ctx->desc);
334
335 /* match dcb encoder type to mxm-ods device type */
336 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
337 return true;
338
339 /* digital output, have some extra stuff to match here, there's a
340 * table in the vbios that provides a mapping from the mxm digital
341 * connection enum values to SOR/link
342 */
343 if ((desc & 0x00000000000000f0) >= 0x20) {
344 /* check against sor index */
345 u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
346 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
347 return true;
348
349 /* check dcb entry has a compatible link field */
350 link = (link & 0x30) >> 4;
351 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
352 return true;
353 }
354
355 /* mark this descriptor accounted for by setting invalid device type,
356 * except of course some manufactures don't follow specs properly and
357 * we need to avoid killing off the TMDS function on DP connectors
358 * if MXM-SIS is missing an entry for it.
359 */
360 data[0] &= ~0xf0;
361 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
362 mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
363 data[0] |= 0x20; /* modify descriptor to match TMDS now */
364 } else {
365 data[0] |= 0xf0;
366 }
367
368 return false;
369}
370
371static int
372mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
373{
374 struct context ctx = { .outp = (u32 *)dcbe };
375 u8 type, i2cidx, link;
376 u8 *conn;
377
378 /* look for an output device structure that matches this dcb entry.
379 * if one isn't found, disable it.
380 */
381 if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
382 MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
383 idx, ctx.outp[0], ctx.outp[1]);
384 ctx.outp[0] |= 0x0000000f;
385 return 0;
386 }
387
388 /* modify the output's ddc/aux port, there's a pointer to a table
389 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
390 * vbios mxm table
391 */
392 i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
393 if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
394 i2cidx = (i2cidx & 0x0f) << 4;
395 else
396 i2cidx = (i2cidx & 0xf0);
397
398 if (i2cidx != 0xf0) {
399 ctx.outp[0] &= ~0x000000f0;
400 ctx.outp[0] |= i2cidx;
401 }
402
403 /* override dcb sorconf.link, based on what mxm data says */
404 switch (ctx.desc.outp_type) {
405 case 0x00: /* Analog CRT */
406 case 0x01: /* Analog TV/HDTV */
407 break;
408 default:
409 link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
410 ctx.outp[1] &= ~0x00000030;
411 ctx.outp[1] |= link;
412 break;
413 }
414
415 /* we may need to fixup various other vbios tables based on what
416 * the descriptor says the connector type should be.
417 *
418 * in a lot of cases, the vbios tables will claim DVI-I is possible,
419 * and the mxm data says the connector is really HDMI. another
420 * common example is DP->eDP.
421 */
422 conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
423 type = conn[0];
424 switch (ctx.desc.conn_type) {
425 case 0x01: /* LVDS */
426 ctx.outp[1] |= 0x00000004; /* use_power_scripts */
427 /* XXX: modify default link width in LVDS table */
428 break;
429 case 0x02: /* HDMI */
430 type = DCB_CONNECTOR_HDMI_1;
431 break;
432 case 0x03: /* DVI-D */
433 type = DCB_CONNECTOR_DVI_D;
434 break;
435 case 0x0e: /* eDP, falls through to DPint */
436 ctx.outp[1] |= 0x00010000;
437 case 0x07: /* DP internal, wtf is this?? HP8670w */
438 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
439 type = DCB_CONNECTOR_eDP;
440 break;
441 default:
442 break;
443 }
444
445 if (mxms_version(dev) >= 0x0300)
446 conn[0] = type;
447
448 return 0;
449}
450
451static bool
452mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
453{
454 u64 desc = *(u64 *)data;
455 if ((desc & 0xf0) != 0xf0)
456 MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
457 return true;
458}
459
460static void
461mxm_dcb_sanitise(struct drm_device *dev)
462{
463 u8 *dcb = dcb_table(dev);
464 if (!dcb || dcb[0] != 0x40) {
465 MXM_DBG(dev, "unsupported DCB version\n");
466 return;
467 }
468
469 dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
470 mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
471}
472
473static bool
474mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
475 u8 offset, u8 size, u8 *data)
476{
477 struct i2c_msg msgs[] = {
478 { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
479 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
480 };
481
482 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
483}
484
485static bool
486mxm_shadow_rom(struct drm_device *dev, u8 version)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_i2c_chan *i2c = NULL;
490 u8 i2cidx, mxms[6], addr, size;
491
492 i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
493 if (i2cidx < 0x0f)
494 i2c = nouveau_i2c_find(dev, i2cidx);
495 if (!i2c)
496 return false;
497
498 addr = 0x54;
499 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
500 addr = 0x56;
501 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
502 return false;
503 }
504
505 dev_priv->mxms = mxms;
506 size = mxms_headerlen(dev) + mxms_structlen(dev);
507 dev_priv->mxms = kmalloc(size, GFP_KERNEL);
508
509 if (dev_priv->mxms &&
510 mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
511 return true;
512
513 kfree(dev_priv->mxms);
514 dev_priv->mxms = NULL;
515 return false;
516}
517
518#if defined(CONFIG_ACPI)
519static bool
520mxm_shadow_dsm(struct drm_device *dev, u8 version)
521{
522 struct drm_nouveau_private *dev_priv = dev->dev_private;
523 static char muid[] = {
524 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
525 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
526 };
527 u32 mxms_args[] = { 0x00000000 };
528 union acpi_object args[4] = {
529 /* _DSM MUID */
530 { .buffer.type = 3,
531 .buffer.length = sizeof(muid),
532 .buffer.pointer = muid,
533 },
534 /* spec says this can be zero to mean "highest revision", but
535 * of course there's at least one bios out there which fails
536 * unless you pass in exactly the version it supports..
537 */
538 { .integer.type = ACPI_TYPE_INTEGER,
539 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
540 },
541 /* MXMS function */
542 { .integer.type = ACPI_TYPE_INTEGER,
543 .integer.value = 0x00000010,
544 },
545 /* Pointer to MXMS arguments */
546 { .buffer.type = ACPI_TYPE_BUFFER,
547 .buffer.length = sizeof(mxms_args),
548 .buffer.pointer = (char *)mxms_args,
549 },
550 };
551 struct acpi_object_list list = { ARRAY_SIZE(args), args };
552 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
553 union acpi_object *obj;
554 acpi_handle handle;
555 int ret;
556
557 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
558 if (!handle)
559 return false;
560
561 ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
562 if (ret) {
563 MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
564 return false;
565 }
566
567 obj = retn.pointer;
568 if (obj->type == ACPI_TYPE_BUFFER) {
569 dev_priv->mxms = kmemdup(obj->buffer.pointer,
570 obj->buffer.length, GFP_KERNEL);
571 } else
572 if (obj->type == ACPI_TYPE_INTEGER) {
573 MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
574 }
575
576 kfree(obj);
577 return dev_priv->mxms != NULL;
578}
579#endif
580
581#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
582
583#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
584
585static bool
586mxm_shadow_wmi(struct drm_device *dev, u8 version)
587{
588 struct drm_nouveau_private *dev_priv = dev->dev_private;
589 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
590 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
591 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
592 union acpi_object *obj;
593 acpi_status status;
594
595 if (!wmi_has_guid(WMI_WMMX_GUID))
596 return false;
597
598 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
599 if (ACPI_FAILURE(status)) {
600 MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
601 return false;
602 }
603
604 obj = retn.pointer;
605 if (obj->type == ACPI_TYPE_BUFFER) {
606 dev_priv->mxms = kmemdup(obj->buffer.pointer,
607 obj->buffer.length, GFP_KERNEL);
608 }
609
610 kfree(obj);
611 return dev_priv->mxms != NULL;
612}
613#endif
614
615struct mxm_shadow_h {
616 const char *name;
617 bool (*exec)(struct drm_device *, u8 version);
618} _mxm_shadow[] = {
619 { "ROM", mxm_shadow_rom },
620#if defined(CONFIG_ACPI)
621 { "DSM", mxm_shadow_dsm },
622#endif
623#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
624 { "WMI", mxm_shadow_wmi },
625#endif
626 {}
627};
628
629static int
630mxm_shadow(struct drm_device *dev, u8 version)
631{
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633 struct mxm_shadow_h *shadow = _mxm_shadow;
634 do {
635 MXM_DBG(dev, "checking %s\n", shadow->name);
636 if (shadow->exec(dev, version)) {
637 if (mxms_valid(dev))
638 return 0;
639 kfree(dev_priv->mxms);
640 dev_priv->mxms = NULL;
641 }
642 } while ((++shadow)->name);
643 return -ENOENT;
644}
645
646int
647nouveau_mxm_init(struct drm_device *dev)
648{
649 u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
650 if (!mxm || !mxm[0]) {
651 MXM_MSG(dev, "no VBIOS data, nothing to do\n");
652 return 0;
653 }
654
655 MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
656
657 if (mxm_shadow(dev, mxm[0])) {
658 MXM_MSG(dev, "failed to locate valid SIS\n");
659 return -EINVAL;
660 }
661
662 MXM_MSG(dev, "MXMS Version %d.%d\n",
663 mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
664 mxms_foreach(dev, 0, NULL, NULL);
665
666 if (nouveau_mxmdcb)
667 mxm_dcb_sanitise(dev);
668 return 0;
669}
670
671void
672nouveau_mxm_fini(struct drm_device *dev)
673{
674 struct drm_nouveau_private *dev_priv = dev->dev_private;
675 kfree(dev_priv->mxms);
676 dev_priv->mxms = NULL;
677}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6abdbe6530a..2ef883c4bbc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
115 struct drm_nouveau_private *dev_priv = dev->dev_private; 115 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 struct nouveau_gpuobj *nobj = NULL; 116 struct nouveau_gpuobj *nobj = NULL;
117 struct drm_mm_node *mem; 117 struct drm_mm_node *mem;
118 uint32_t offset; 118 uint64_t offset;
119 int target, ret; 119 int target, ret;
120 120
121 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, 121 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 960c0ae0c0c..cc419fae794 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
723 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 723 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
724 724
725 /* map display semaphore buffers into channel's vm */ 725 /* map display semaphore buffers into channel's vm */
726 if (dev_priv->card_type >= NV_D0) 726 for (i = 0; i < dev->mode_config.num_crtc; i++) {
727 return 0; 727 struct nouveau_bo *bo;
728 728 if (dev_priv->card_type >= NV_D0)
729 for (i = 0; i < 2; i++) { 729 bo = nvd0_display_crtc_sema(dev, i);
730 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; 730 else
731 731 bo = nv50_display(dev)->crtc[i].sem.bo;
732 ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, 732
733 &chan->dispc_vma[i]); 733 ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
734 if (ret) 734 if (ret)
735 return ret; 735 return ret;
736 } 736 }
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
879 879
880 NV_DEBUG(dev, "ch%d\n", chan->id); 880 NV_DEBUG(dev, "ch%d\n", chan->id);
881 881
882 if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) { 882 if (dev_priv->card_type >= NV_D0) {
883 for (i = 0; i < dev->mode_config.num_crtc; i++) {
884 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
885 nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
886 }
887 } else
888 if (dev_priv->card_type >= NV_50) {
883 struct nv50_display *disp = nv50_display(dev); 889 struct nv50_display *disp = nv50_display(dev);
884
885 for (i = 0; i < dev->mode_config.num_crtc; i++) { 890 for (i = 0; i < dev->mode_config.num_crtc; i++) {
886 struct nv50_display_crtc *dispc = &disp->crtc[i]; 891 struct nv50_display_crtc *dispc = &disp->crtc[i];
887 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); 892 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 33d03fbf00d..58f497343ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
41 return; 41 return;
42 } 42 }
43 43
44 perf = ROMPTR(bios, bmp[0x73]); 44 perf = ROMPTR(dev, bmp[0x73]);
45 if (!perf) { 45 if (!perf) {
46 NV_DEBUG(dev, "No memclock table pointer found.\n"); 46 NV_DEBUG(dev, "No memclock table pointer found.\n");
47 return; 47 return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
87 * ramcfg to select the correct subentry 87 * ramcfg to select the correct subentry
88 */ 88 */
89 if (P->version == 2) { 89 if (P->version == 2) {
90 u8 *tmap = ROMPTR(bios, P->data[4]); 90 u8 *tmap = ROMPTR(dev, P->data[4]);
91 if (!tmap) { 91 if (!tmap) {
92 NV_DEBUG(dev, "no timing map pointer\n"); 92 NV_DEBUG(dev, "no timing map pointer\n");
93 return NULL; 93 return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
140 struct nouveau_pm_level *perflvl) 140 struct nouveau_pm_level *perflvl)
141{ 141{
142 struct drm_nouveau_private *dev_priv = dev->dev_private; 142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nvbios *bios = &dev_priv->vbios;
144 u8 *vmap; 143 u8 *vmap;
145 int id; 144 int id;
146 145
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
165 return; 164 return;
166 } 165 }
167 166
168 vmap = ROMPTR(bios, P->data[32]); 167 vmap = ROMPTR(dev, P->data[32]);
169 if (!vmap) { 168 if (!vmap) {
170 NV_DEBUG(dev, "volt map table pointer invalid\n"); 169 NV_DEBUG(dev, "volt map table pointer invalid\n");
171 return; 170 return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
200 return; 199 return;
201 } 200 }
202 201
203 perf = ROMPTR(bios, P.data[0]); 202 perf = ROMPTR(dev, P.data[0]);
204 version = perf[0]; 203 version = perf[0];
205 headerlen = perf[1]; 204 headerlen = perf[1];
206 if (version < 0x40) { 205 if (version < 0x40) {
207 recordlen = perf[3] + (perf[4] * perf[5]); 206 recordlen = perf[3] + (perf[4] * perf[5]);
208 entries = perf[2]; 207 entries = perf[2];
208
209 pm->pwm_divisor = ROM16(perf[6]);
209 } else { 210 } else {
210 recordlen = perf[2] + (perf[3] * perf[4]); 211 recordlen = perf[2] + (perf[3] * perf[4]);
211 entries = perf[5]; 212 entries = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
216 return; 217 return;
217 } 218 }
218 219
219 perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); 220 perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
220 if (!perf) { 221 if (!perf) {
221 NV_DEBUG(dev, "perf table pointer invalid\n"); 222 NV_DEBUG(dev, "perf table pointer invalid\n");
222 return; 223 return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
283 perflvl->memory = ROM16(entry[11]) * 1000; 284 perflvl->memory = ROM16(entry[11]) * 1000;
284 else 285 else
285 perflvl->memory = ROM16(entry[11]) * 2000; 286 perflvl->memory = ROM16(entry[11]) * 2000;
286
287 break; 287 break;
288 case 0x25: 288 case 0x25:
289 perflvl->fanspeed = entry[4]; 289 perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
300 perflvl->core = ROM16(entry[8]) * 1000; 300 perflvl->core = ROM16(entry[8]) * 1000;
301 perflvl->shader = ROM16(entry[10]) * 1000; 301 perflvl->shader = ROM16(entry[10]) * 1000;
302 perflvl->memory = ROM16(entry[12]) * 1000; 302 perflvl->memory = ROM16(entry[12]) * 1000;
303 /*XXX: confirm on 0x35 */ 303 perflvl->vdec = ROM16(entry[16]) * 1000;
304 perflvl->unk05 = ROM16(entry[16]) * 1000; 304 perflvl->dom6 = ROM16(entry[20]) * 1000;
305 break; 305 break;
306 case 0x40: 306 case 0x40:
307#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000 307#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a539fd25792..9064d7f1979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -26,6 +26,7 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
29 30
30#ifdef CONFIG_ACPI 31#ifdef CONFIG_ACPI
31#include <linux/acpi.h> 32#include <linux/acpi.h>
@@ -35,22 +36,95 @@
35#include <linux/hwmon-sysfs.h> 36#include <linux/hwmon-sysfs.h>
36 37
37static int 38static int
38nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl, 39nouveau_pwmfan_get(struct drm_device *dev)
39 u8 id, u32 khz)
40{ 40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private; 41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 42 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
43 void *pre_state; 43 struct gpio_func gpio;
44 u32 divs, duty;
45 int ret;
44 46
45 if (khz == 0) 47 if (!pm->pwm_get)
46 return 0; 48 return -ENODEV;
49
50 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
51 if (ret == 0) {
52 ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
53 if (ret == 0) {
54 divs = max(divs, duty);
55 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
56 duty = divs - duty;
57 return (duty * 100) / divs;
58 }
59
60 return nouveau_gpio_func_get(dev, gpio.func) * 100;
61 }
62
63 return -ENODEV;
64}
65
66static int
67nouveau_pwmfan_set(struct drm_device *dev, int percent)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
71 struct gpio_func gpio;
72 u32 divs, duty;
73 int ret;
74
75 if (!pm->pwm_set)
76 return -ENODEV;
77
78 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
79 if (ret == 0) {
80 divs = pm->pwm_divisor;
81 if (pm->fan.pwm_freq) {
82 /*XXX: PNVIO clock more than likely... */
83 divs = 135000 / pm->fan.pwm_freq;
84 if (dev_priv->chipset < 0xa3)
85 divs /= 4;
86 }
87
88 duty = ((divs * percent) + 99) / 100;
89 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
90 duty = divs - duty;
47 91
48 pre_state = pm->clock_pre(dev, perflvl, id, khz); 92 return pm->pwm_set(dev, gpio.line, divs, duty);
49 if (IS_ERR(pre_state)) 93 }
50 return PTR_ERR(pre_state); 94
95 return -ENODEV;
96}
97
98static int
99nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
100 struct nouveau_pm_level *a, struct nouveau_pm_level *b)
101{
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
104 int ret;
105
106 /*XXX: not on all boards, we should control based on temperature
107 * on recent boards.. or maybe on some other factor we don't
108 * know about?
109 */
110 if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
111 ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
112 if (ret && ret != -ENODEV) {
113 NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
114 return ret;
115 }
116 }
117
118 if (pm->voltage.supported && pm->voltage_set) {
119 if (perflvl->volt_min && b->volt_min > a->volt_min) {
120 ret = pm->voltage_set(dev, perflvl->volt_min);
121 if (ret) {
122 NV_ERROR(dev, "voltage set failed: %d\n", ret);
123 return ret;
124 }
125 }
126 }
51 127
52 if (pre_state)
53 pm->clock_set(dev, pre_state);
54 return 0; 128 return 0;
55} 129}
56 130
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
59{ 133{
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 134 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 135 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
136 void *state;
62 int ret; 137 int ret;
63 138
64 if (perflvl == pm->cur) 139 if (perflvl == pm->cur)
65 return 0; 140 return 0;
66 141
67 if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) { 142 ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
68 ret = pm->voltage_set(dev, perflvl->volt_min); 143 if (ret)
69 if (ret) { 144 return ret;
70 NV_ERROR(dev, "voltage_set %d failed: %d\n",
71 perflvl->volt_min, ret);
72 }
73 }
74 145
75 if (pm->clocks_pre) { 146 state = pm->clocks_pre(dev, perflvl);
76 void *state = pm->clocks_pre(dev, perflvl); 147 if (IS_ERR(state))
77 if (IS_ERR(state)) 148 return PTR_ERR(state);
78 return PTR_ERR(state); 149 pm->clocks_set(dev, state);
79 pm->clocks_set(dev, state); 150
80 } else 151 ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
81 if (pm->clock_set) { 152 if (ret)
82 nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); 153 return ret;
83 nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
84 nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
85 nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
86 }
87 154
88 pm->cur = perflvl; 155 pm->cur = perflvl;
89 return 0; 156 return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
130 197
131 memset(perflvl, 0, sizeof(*perflvl)); 198 memset(perflvl, 0, sizeof(*perflvl));
132 199
133 if (pm->clocks_get) { 200 ret = pm->clocks_get(dev, perflvl);
134 ret = pm->clocks_get(dev, perflvl); 201 if (ret)
135 if (ret) 202 return ret;
136 return ret;
137 } else
138 if (pm->clock_get) {
139 ret = pm->clock_get(dev, PLL_CORE);
140 if (ret > 0)
141 perflvl->core = ret;
142
143 ret = pm->clock_get(dev, PLL_MEMORY);
144 if (ret > 0)
145 perflvl->memory = ret;
146
147 ret = pm->clock_get(dev, PLL_SHADER);
148 if (ret > 0)
149 perflvl->shader = ret;
150
151 ret = pm->clock_get(dev, PLL_UNK05);
152 if (ret > 0)
153 perflvl->unk05 = ret;
154 }
155 203
156 if (pm->voltage.supported && pm->voltage_get) { 204 if (pm->voltage.supported && pm->voltage_get) {
157 ret = pm->voltage_get(dev); 205 ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
161 } 209 }
162 } 210 }
163 211
212 ret = nouveau_pwmfan_get(dev);
213 if (ret > 0)
214 perflvl->fanspeed = ret;
215
164 return 0; 216 return 0;
165} 217}
166 218
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
412 nouveau_hwmon_show_update_rate, 464 nouveau_hwmon_show_update_rate,
413 NULL, 0); 465 NULL, 0);
414 466
467static ssize_t
468nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
469 char *buf)
470{
471 struct drm_device *dev = dev_get_drvdata(d);
472 struct drm_nouveau_private *dev_priv = dev->dev_private;
473 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
474 struct gpio_func gpio;
475 u32 cycles, cur, prev;
476 u64 start;
477 int ret;
478
479 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
480 if (ret)
481 return ret;
482
483 /* Monitor the GPIO input 0x3b for 250ms.
484 * When the fan spins, it changes the value of GPIO FAN_SENSE.
485 * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
486 */
487 start = ptimer->read(dev);
488 prev = nouveau_gpio_sense(dev, 0, gpio.line);
489 cycles = 0;
490 do {
491 cur = nouveau_gpio_sense(dev, 0, gpio.line);
492 if (prev != cur) {
493 cycles++;
494 prev = cur;
495 }
496
497 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
498 } while (ptimer->read(dev) - start < 250000000);
499
500 /* interpolate to get rpm */
501 return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
502}
503static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
504 NULL, 0);
505
506static ssize_t
507nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
508{
509 struct drm_device *dev = dev_get_drvdata(d);
510 int ret;
511
512 ret = nouveau_pwmfan_get(dev);
513 if (ret < 0)
514 return ret;
515
516 return sprintf(buf, "%i\n", ret);
517}
518
519static ssize_t
520nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
521 const char *buf, size_t count)
522{
523 struct drm_device *dev = dev_get_drvdata(d);
524 struct drm_nouveau_private *dev_priv = dev->dev_private;
525 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
526 int ret = -ENODEV;
527 long value;
528
529 if (nouveau_perflvl_wr != 7777)
530 return -EPERM;
531
532 if (strict_strtol(buf, 10, &value) == -EINVAL)
533 return -EINVAL;
534
535 if (value < pm->fan.min_duty)
536 value = pm->fan.min_duty;
537 if (value > pm->fan.max_duty)
538 value = pm->fan.max_duty;
539
540 ret = nouveau_pwmfan_set(dev, value);
541 if (ret)
542 return ret;
543
544 return count;
545}
546
547static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
548 nouveau_hwmon_get_pwm0,
549 nouveau_hwmon_set_pwm0, 0);
550
551static ssize_t
552nouveau_hwmon_get_pwm0_min(struct device *d,
553 struct device_attribute *a, char *buf)
554{
555 struct drm_device *dev = dev_get_drvdata(d);
556 struct drm_nouveau_private *dev_priv = dev->dev_private;
557 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
558
559 return sprintf(buf, "%i\n", pm->fan.min_duty);
560}
561
562static ssize_t
563nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
564 const char *buf, size_t count)
565{
566 struct drm_device *dev = dev_get_drvdata(d);
567 struct drm_nouveau_private *dev_priv = dev->dev_private;
568 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
569 long value;
570
571 if (strict_strtol(buf, 10, &value) == -EINVAL)
572 return -EINVAL;
573
574 if (value < 0)
575 value = 0;
576
577 if (pm->fan.max_duty - value < 10)
578 value = pm->fan.max_duty - 10;
579
580 if (value < 10)
581 pm->fan.min_duty = 10;
582 else
583 pm->fan.min_duty = value;
584
585 return count;
586}
587
588static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
589 nouveau_hwmon_get_pwm0_min,
590 nouveau_hwmon_set_pwm0_min, 0);
591
592static ssize_t
593nouveau_hwmon_get_pwm0_max(struct device *d,
594 struct device_attribute *a, char *buf)
595{
596 struct drm_device *dev = dev_get_drvdata(d);
597 struct drm_nouveau_private *dev_priv = dev->dev_private;
598 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
599
600 return sprintf(buf, "%i\n", pm->fan.max_duty);
601}
602
603static ssize_t
604nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
605 const char *buf, size_t count)
606{
607 struct drm_device *dev = dev_get_drvdata(d);
608 struct drm_nouveau_private *dev_priv = dev->dev_private;
609 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
610 long value;
611
612 if (strict_strtol(buf, 10, &value) == -EINVAL)
613 return -EINVAL;
614
615 if (value < 0)
616 value = 0;
617
618 if (value - pm->fan.min_duty < 10)
619 value = pm->fan.min_duty + 10;
620
621 if (value > 100)
622 pm->fan.max_duty = 100;
623 else
624 pm->fan.max_duty = value;
625
626 return count;
627}
628
629static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
630 nouveau_hwmon_get_pwm0_max,
631 nouveau_hwmon_set_pwm0_max, 0);
632
415static struct attribute *hwmon_attributes[] = { 633static struct attribute *hwmon_attributes[] = {
416 &sensor_dev_attr_temp1_input.dev_attr.attr, 634 &sensor_dev_attr_temp1_input.dev_attr.attr,
417 &sensor_dev_attr_temp1_max.dev_attr.attr, 635 &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
420 &sensor_dev_attr_update_rate.dev_attr.attr, 638 &sensor_dev_attr_update_rate.dev_attr.attr,
421 NULL 639 NULL
422}; 640};
641static struct attribute *hwmon_fan_rpm_attributes[] = {
642 &sensor_dev_attr_fan0_input.dev_attr.attr,
643 NULL
644};
645static struct attribute *hwmon_pwm_fan_attributes[] = {
646 &sensor_dev_attr_pwm0.dev_attr.attr,
647 &sensor_dev_attr_pwm0_min.dev_attr.attr,
648 &sensor_dev_attr_pwm0_max.dev_attr.attr,
649 NULL
650};
423 651
424static const struct attribute_group hwmon_attrgroup = { 652static const struct attribute_group hwmon_attrgroup = {
425 .attrs = hwmon_attributes, 653 .attrs = hwmon_attributes,
426}; 654};
655static const struct attribute_group hwmon_fan_rpm_attrgroup = {
656 .attrs = hwmon_fan_rpm_attributes,
657};
658static const struct attribute_group hwmon_pwm_fan_attrgroup = {
659 .attrs = hwmon_pwm_fan_attributes,
660};
427#endif 661#endif
428 662
429static int 663static int
430nouveau_hwmon_init(struct drm_device *dev) 664nouveau_hwmon_init(struct drm_device *dev)
431{ 665{
432#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
433 struct drm_nouveau_private *dev_priv = dev->dev_private; 666 struct drm_nouveau_private *dev_priv = dev->dev_private;
434 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 667 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
668#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
435 struct device *hwmon_dev; 669 struct device *hwmon_dev;
436 int ret; 670 int ret = 0;
437 671
438 if (!pm->temp_get) 672 if (!pm->temp_get)
439 return -ENODEV; 673 return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
446 return ret; 680 return ret;
447 } 681 }
448 dev_set_drvdata(hwmon_dev, dev); 682 dev_set_drvdata(hwmon_dev, dev);
683
684 /* default sysfs entries */
449 ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 685 ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
450 if (ret) { 686 if (ret) {
451 NV_ERROR(dev, 687 if (ret)
452 "Unable to create hwmon sysfs file: %d\n", ret); 688 goto error;
453 hwmon_device_unregister(hwmon_dev); 689 }
454 return ret; 690
691 /* if the card has a pwm fan */
692 /*XXX: incorrect, need better detection for this, some boards have
693 * the gpio entries for pwm fan control even when there's no
694 * actual fan connected to it... therm table? */
695 if (nouveau_pwmfan_get(dev) >= 0) {
696 ret = sysfs_create_group(&dev->pdev->dev.kobj,
697 &hwmon_pwm_fan_attrgroup);
698 if (ret)
699 goto error;
700 }
701
702 /* if the card can read the fan rpm */
703 if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
704 ret = sysfs_create_group(&dev->pdev->dev.kobj,
705 &hwmon_fan_rpm_attrgroup);
706 if (ret)
707 goto error;
455 } 708 }
456 709
457 pm->hwmon = hwmon_dev; 710 pm->hwmon = hwmon_dev;
458#endif 711
712 return 0;
713
714error:
715 NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
716 hwmon_device_unregister(hwmon_dev);
717 pm->hwmon = NULL;
718 return ret;
719#else
720 pm->hwmon = NULL;
459 return 0; 721 return 0;
722#endif
460} 723}
461 724
462static void 725static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
468 731
469 if (pm->hwmon) { 732 if (pm->hwmon) {
470 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 733 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
734 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
735 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
736
471 hwmon_device_unregister(pm->hwmon); 737 hwmon_device_unregister(pm->hwmon);
472 } 738 }
473#endif 739#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 8ac02cdd03a..2f8e14fbcff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
47void nouveau_mem_timing_fini(struct drm_device *); 47void nouveau_mem_timing_fini(struct drm_device *);
48 48
49/* nv04_pm.c */ 49/* nv04_pm.c */
50int nv04_pm_clock_get(struct drm_device *, u32 id); 50int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
51void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, 51void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
52 u32 id, int khz); 52int nv04_pm_clocks_set(struct drm_device *, void *);
53void nv04_pm_clock_set(struct drm_device *, void *);
54 53
55/* nv40_pm.c */ 54/* nv40_pm.c */
56int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); 55int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
57void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); 56void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
58void nv40_pm_clocks_set(struct drm_device *, void *); 57int nv40_pm_clocks_set(struct drm_device *, void *);
58int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
59int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
59 60
60/* nv50_pm.c */ 61/* nv50_pm.c */
61int nv50_pm_clock_get(struct drm_device *, u32 id); 62int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
62void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, 63void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
63 u32 id, int khz); 64int nv50_pm_clocks_set(struct drm_device *, void *);
64void nv50_pm_clock_set(struct drm_device *, void *); 65int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
66int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
65 67
66/* nva3_pm.c */ 68/* nva3_pm.c */
67int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); 69int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
68void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); 70void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
69void nva3_pm_clocks_set(struct drm_device *, void *); 71int nva3_pm_clocks_set(struct drm_device *, void *);
70 72
71/* nvc0_pm.c */ 73/* nvc0_pm.c */
72int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); 74int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
75void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
76int nvc0_pm_clocks_set(struct drm_device *, void *);
73 77
74/* nouveau_temp.c */ 78/* nouveau_temp.c */
75void nouveau_temp_init(struct drm_device *dev); 79void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c8a463b76c8..47f245edf53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,91 +8,30 @@
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9 9
10struct nouveau_sgdma_be { 10struct nouveau_sgdma_be {
11 struct ttm_backend backend; 11 /* this has to be the first field so populate/unpopulated in
12 * nouve_bo.c works properly, otherwise have to move them here
13 */
14 struct ttm_dma_tt ttm;
12 struct drm_device *dev; 15 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16 bool unmap_pages;
17
18 u64 offset; 16 u64 offset;
19 bool bound;
20}; 17};
21 18
22static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
26{
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev;
29 int i;
30
31 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
32
33 nvbe->pages = dma_addrs;
34 nvbe->nr_pages = num_pages;
35 nvbe->unmap_pages = true;
36
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe->unmap_pages = false;
40 return 0;
41 }
42
43 for (i = 0; i < num_pages; i++) {
44 nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46 if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
47 nvbe->nr_pages = --i;
48 be->func->clear(be);
49 return -EFAULT;
50 }
51 }
52
53 return 0;
54}
55
56static void 19static void
57nouveau_sgdma_clear(struct ttm_backend *be) 20nouveau_sgdma_destroy(struct ttm_tt *ttm)
58{ 21{
59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
60 struct drm_device *dev = nvbe->dev;
61
62 if (nvbe->bound)
63 be->func->unbind(be);
64 23
65 if (nvbe->unmap_pages) { 24 if (ttm) {
66 while (nvbe->nr_pages--) {
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 }
70 nvbe->unmap_pages = false;
71 }
72
73 nvbe->pages = NULL;
74}
75
76static void
77nouveau_sgdma_destroy(struct ttm_backend *be)
78{
79 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
80
81 if (be) {
82 NV_DEBUG(nvbe->dev, "\n"); 25 NV_DEBUG(nvbe->dev, "\n");
83 26 ttm_dma_tt_fini(&nvbe->ttm);
84 if (nvbe) { 27 kfree(nvbe);
85 if (nvbe->pages)
86 be->func->clear(be);
87 kfree(nvbe);
88 }
89 } 28 }
90} 29}
91 30
92static int 31static int
93nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 32nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
94{ 33{
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
96 struct drm_device *dev = nvbe->dev; 35 struct drm_device *dev = nvbe->dev;
97 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 37 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
102 41
103 nvbe->offset = mem->start << PAGE_SHIFT; 42 nvbe->offset = mem->start << PAGE_SHIFT;
104 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 43 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
105 for (i = 0; i < nvbe->nr_pages; i++) { 44 for (i = 0; i < ttm->num_pages; i++) {
106 dma_addr_t dma_offset = nvbe->pages[i]; 45 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
107 uint32_t offset_l = lower_32_bits(dma_offset); 46 uint32_t offset_l = lower_32_bits(dma_offset);
108 47
109 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 48 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
112 } 51 }
113 } 52 }
114 53
115 nvbe->bound = true;
116 return 0; 54 return 0;
117} 55}
118 56
119static int 57static int
120nv04_sgdma_unbind(struct ttm_backend *be) 58nv04_sgdma_unbind(struct ttm_tt *ttm)
121{ 59{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123 struct drm_device *dev = nvbe->dev; 61 struct drm_device *dev = nvbe->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 63 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
127 65
128 NV_DEBUG(dev, "\n"); 66 NV_DEBUG(dev, "\n");
129 67
130 if (!nvbe->bound) 68 if (ttm->state != tt_bound)
131 return 0; 69 return 0;
132 70
133 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 71 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
134 for (i = 0; i < nvbe->nr_pages; i++) { 72 for (i = 0; i < ttm->num_pages; i++) {
135 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) 73 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
136 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); 74 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
137 } 75 }
138 76
139 nvbe->bound = false;
140 return 0; 77 return 0;
141} 78}
142 79
143static struct ttm_backend_func nv04_sgdma_backend = { 80static struct ttm_backend_func nv04_sgdma_backend = {
144 .populate = nouveau_sgdma_populate,
145 .clear = nouveau_sgdma_clear,
146 .bind = nv04_sgdma_bind, 81 .bind = nv04_sgdma_bind,
147 .unbind = nv04_sgdma_unbind, 82 .unbind = nv04_sgdma_unbind,
148 .destroy = nouveau_sgdma_destroy 83 .destroy = nouveau_sgdma_destroy
@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
161} 96}
162 97
163static int 98static int
164nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 99nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
165{ 100{
166 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 101 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
167 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 102 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
168 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 103 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
169 dma_addr_t *list = nvbe->pages; 104 dma_addr_t *list = nvbe->ttm.dma_address;
170 u32 pte = mem->start << 2; 105 u32 pte = mem->start << 2;
171 u32 cnt = nvbe->nr_pages; 106 u32 cnt = ttm->num_pages;
172 107
173 nvbe->offset = mem->start << PAGE_SHIFT; 108 nvbe->offset = mem->start << PAGE_SHIFT;
174 109
@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
178 } 113 }
179 114
180 nv41_sgdma_flush(nvbe); 115 nv41_sgdma_flush(nvbe);
181 nvbe->bound = true;
182 return 0; 116 return 0;
183} 117}
184 118
185static int 119static int
186nv41_sgdma_unbind(struct ttm_backend *be) 120nv41_sgdma_unbind(struct ttm_tt *ttm)
187{ 121{
188 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
189 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 123 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
190 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 124 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
191 u32 pte = (nvbe->offset >> 12) << 2; 125 u32 pte = (nvbe->offset >> 12) << 2;
192 u32 cnt = nvbe->nr_pages; 126 u32 cnt = ttm->num_pages;
193 127
194 while (cnt--) { 128 while (cnt--) {
195 nv_wo32(pgt, pte, 0x00000000); 129 nv_wo32(pgt, pte, 0x00000000);
@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
197 } 131 }
198 132
199 nv41_sgdma_flush(nvbe); 133 nv41_sgdma_flush(nvbe);
200 nvbe->bound = false;
201 return 0; 134 return 0;
202} 135}
203 136
204static struct ttm_backend_func nv41_sgdma_backend = { 137static struct ttm_backend_func nv41_sgdma_backend = {
205 .populate = nouveau_sgdma_populate,
206 .clear = nouveau_sgdma_clear,
207 .bind = nv41_sgdma_bind, 138 .bind = nv41_sgdma_bind,
208 .unbind = nv41_sgdma_unbind, 139 .unbind = nv41_sgdma_unbind,
209 .destroy = nouveau_sgdma_destroy 140 .destroy = nouveau_sgdma_destroy
210}; 141};
211 142
212static void 143static void
213nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) 144nv44_sgdma_flush(struct ttm_tt *ttm)
214{ 145{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
215 struct drm_device *dev = nvbe->dev; 147 struct drm_device *dev = nvbe->dev;
216 148
217 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); 149 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
218 nv_wr32(dev, 0x100808, nvbe->offset | 0x20); 150 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
219 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) 151 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
220 NV_ERROR(dev, "gart flush timeout: 0x%08x\n", 152 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
273} 205}
274 206
275static int 207static int
276nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 208nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
277{ 209{
278 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 210 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
279 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 211 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
280 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 212 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
281 dma_addr_t *list = nvbe->pages; 213 dma_addr_t *list = nvbe->ttm.dma_address;
282 u32 pte = mem->start << 2, tmp[4]; 214 u32 pte = mem->start << 2, tmp[4];
283 u32 cnt = nvbe->nr_pages; 215 u32 cnt = ttm->num_pages;
284 int i; 216 int i;
285 217
286 nvbe->offset = mem->start << PAGE_SHIFT; 218 nvbe->offset = mem->start << PAGE_SHIFT;
@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
308 if (cnt) 240 if (cnt)
309 nv44_sgdma_fill(pgt, list, pte, cnt); 241 nv44_sgdma_fill(pgt, list, pte, cnt);
310 242
311 nv44_sgdma_flush(nvbe); 243 nv44_sgdma_flush(ttm);
312 nvbe->bound = true;
313 return 0; 244 return 0;
314} 245}
315 246
316static int 247static int
317nv44_sgdma_unbind(struct ttm_backend *be) 248nv44_sgdma_unbind(struct ttm_tt *ttm)
318{ 249{
319 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 250 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
320 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 251 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
321 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 252 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
322 u32 pte = (nvbe->offset >> 12) << 2; 253 u32 pte = (nvbe->offset >> 12) << 2;
323 u32 cnt = nvbe->nr_pages; 254 u32 cnt = ttm->num_pages;
324 255
325 if (pte & 0x0000000c) { 256 if (pte & 0x0000000c) {
326 u32 max = 4 - ((pte >> 2) & 0x3); 257 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
342 if (cnt) 273 if (cnt)
343 nv44_sgdma_fill(pgt, NULL, pte, cnt); 274 nv44_sgdma_fill(pgt, NULL, pte, cnt);
344 275
345 nv44_sgdma_flush(nvbe); 276 nv44_sgdma_flush(ttm);
346 nvbe->bound = false;
347 return 0; 277 return 0;
348} 278}
349 279
350static struct ttm_backend_func nv44_sgdma_backend = { 280static struct ttm_backend_func nv44_sgdma_backend = {
351 .populate = nouveau_sgdma_populate,
352 .clear = nouveau_sgdma_clear,
353 .bind = nv44_sgdma_bind, 281 .bind = nv44_sgdma_bind,
354 .unbind = nv44_sgdma_unbind, 282 .unbind = nv44_sgdma_unbind,
355 .destroy = nouveau_sgdma_destroy 283 .destroy = nouveau_sgdma_destroy
356}; 284};
357 285
358static int 286static int
359nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 287nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
360{ 288{
361 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 289 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
362 struct nouveau_mem *node = mem->mm_node; 290 struct nouveau_mem *node = mem->mm_node;
291
363 /* noop: bound in move_notify() */ 292 /* noop: bound in move_notify() */
364 node->pages = nvbe->pages; 293 node->pages = nvbe->ttm.dma_address;
365 nvbe->pages = (dma_addr_t *)node;
366 nvbe->bound = true;
367 return 0; 294 return 0;
368} 295}
369 296
370static int 297static int
371nv50_sgdma_unbind(struct ttm_backend *be) 298nv50_sgdma_unbind(struct ttm_tt *ttm)
372{ 299{
373 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
374 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
375 /* noop: unbound in move_notify() */ 300 /* noop: unbound in move_notify() */
376 nvbe->pages = node->pages;
377 node->pages = NULL;
378 nvbe->bound = false;
379 return 0; 301 return 0;
380} 302}
381 303
382static struct ttm_backend_func nv50_sgdma_backend = { 304static struct ttm_backend_func nv50_sgdma_backend = {
383 .populate = nouveau_sgdma_populate,
384 .clear = nouveau_sgdma_clear,
385 .bind = nv50_sgdma_bind, 305 .bind = nv50_sgdma_bind,
386 .unbind = nv50_sgdma_unbind, 306 .unbind = nv50_sgdma_unbind,
387 .destroy = nouveau_sgdma_destroy 307 .destroy = nouveau_sgdma_destroy
388}; 308};
389 309
390struct ttm_backend * 310struct ttm_tt *
391nouveau_sgdma_init_ttm(struct drm_device *dev) 311nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
312 unsigned long size, uint32_t page_flags,
313 struct page *dummy_read_page)
392{ 314{
393 struct drm_nouveau_private *dev_priv = dev->dev_private; 315 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
316 struct drm_device *dev = dev_priv->dev;
394 struct nouveau_sgdma_be *nvbe; 317 struct nouveau_sgdma_be *nvbe;
395 318
396 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 319 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
398 return NULL; 321 return NULL;
399 322
400 nvbe->dev = dev; 323 nvbe->dev = dev;
324 nvbe->ttm.ttm.func = dev_priv->gart_info.func;
401 325
402 nvbe->backend.func = dev_priv->gart_info.func; 326 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
403 return &nvbe->backend; 327 kfree(nvbe);
328 return NULL;
329 }
330 return &nvbe->ttm.ttm;
404} 331}
405 332
406int 333int
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index d8831ab42bb..f5e98910d17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,6 +36,7 @@
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fbcon.h" 37#include "nouveau_fbcon.h"
38#include "nouveau_ramht.h" 38#include "nouveau_ramht.h"
39#include "nouveau_gpio.h"
39#include "nouveau_pm.h" 40#include "nouveau_pm.h"
40#include "nv50_display.h" 41#include "nv50_display.h"
41 42
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
80 engine->display.early_init = nv04_display_early_init; 81 engine->display.early_init = nv04_display_early_init;
81 engine->display.late_takedown = nv04_display_late_takedown; 82 engine->display.late_takedown = nv04_display_late_takedown;
82 engine->display.create = nv04_display_create; 83 engine->display.create = nv04_display_create;
83 engine->display.init = nv04_display_init;
84 engine->display.destroy = nv04_display_destroy; 84 engine->display.destroy = nv04_display_destroy;
85 engine->gpio.init = nouveau_stub_init; 85 engine->display.init = nv04_display_init;
86 engine->gpio.takedown = nouveau_stub_takedown; 86 engine->display.fini = nv04_display_fini;
87 engine->gpio.get = NULL; 87 engine->pm.clocks_get = nv04_pm_clocks_get;
88 engine->gpio.set = NULL; 88 engine->pm.clocks_pre = nv04_pm_clocks_pre;
89 engine->gpio.irq_enable = NULL; 89 engine->pm.clocks_set = nv04_pm_clocks_set;
90 engine->pm.clock_get = nv04_pm_clock_get;
91 engine->pm.clock_pre = nv04_pm_clock_pre;
92 engine->pm.clock_set = nv04_pm_clock_set;
93 engine->vram.init = nouveau_mem_detect; 90 engine->vram.init = nouveau_mem_detect;
94 engine->vram.takedown = nouveau_stub_takedown; 91 engine->vram.takedown = nouveau_stub_takedown;
95 engine->vram.flags_valid = nouveau_mem_flags_valid; 92 engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
129 engine->display.early_init = nv04_display_early_init; 126 engine->display.early_init = nv04_display_early_init;
130 engine->display.late_takedown = nv04_display_late_takedown; 127 engine->display.late_takedown = nv04_display_late_takedown;
131 engine->display.create = nv04_display_create; 128 engine->display.create = nv04_display_create;
132 engine->display.init = nv04_display_init;
133 engine->display.destroy = nv04_display_destroy; 129 engine->display.destroy = nv04_display_destroy;
134 engine->gpio.init = nouveau_stub_init; 130 engine->display.init = nv04_display_init;
135 engine->gpio.takedown = nouveau_stub_takedown; 131 engine->display.fini = nv04_display_fini;
136 engine->gpio.get = nv10_gpio_get; 132 engine->gpio.drive = nv10_gpio_drive;
137 engine->gpio.set = nv10_gpio_set; 133 engine->gpio.sense = nv10_gpio_sense;
138 engine->gpio.irq_enable = NULL; 134 engine->pm.clocks_get = nv04_pm_clocks_get;
139 engine->pm.clock_get = nv04_pm_clock_get; 135 engine->pm.clocks_pre = nv04_pm_clocks_pre;
140 engine->pm.clock_pre = nv04_pm_clock_pre; 136 engine->pm.clocks_set = nv04_pm_clocks_set;
141 engine->pm.clock_set = nv04_pm_clock_set;
142 engine->vram.init = nouveau_mem_detect; 137 engine->vram.init = nouveau_mem_detect;
143 engine->vram.takedown = nouveau_stub_takedown; 138 engine->vram.takedown = nouveau_stub_takedown;
144 engine->vram.flags_valid = nouveau_mem_flags_valid; 139 engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
178 engine->display.early_init = nv04_display_early_init; 173 engine->display.early_init = nv04_display_early_init;
179 engine->display.late_takedown = nv04_display_late_takedown; 174 engine->display.late_takedown = nv04_display_late_takedown;
180 engine->display.create = nv04_display_create; 175 engine->display.create = nv04_display_create;
181 engine->display.init = nv04_display_init;
182 engine->display.destroy = nv04_display_destroy; 176 engine->display.destroy = nv04_display_destroy;
183 engine->gpio.init = nouveau_stub_init; 177 engine->display.init = nv04_display_init;
184 engine->gpio.takedown = nouveau_stub_takedown; 178 engine->display.fini = nv04_display_fini;
185 engine->gpio.get = nv10_gpio_get; 179 engine->gpio.drive = nv10_gpio_drive;
186 engine->gpio.set = nv10_gpio_set; 180 engine->gpio.sense = nv10_gpio_sense;
187 engine->gpio.irq_enable = NULL; 181 engine->pm.clocks_get = nv04_pm_clocks_get;
188 engine->pm.clock_get = nv04_pm_clock_get; 182 engine->pm.clocks_pre = nv04_pm_clocks_pre;
189 engine->pm.clock_pre = nv04_pm_clock_pre; 183 engine->pm.clocks_set = nv04_pm_clocks_set;
190 engine->pm.clock_set = nv04_pm_clock_set;
191 engine->vram.init = nouveau_mem_detect; 184 engine->vram.init = nouveau_mem_detect;
192 engine->vram.takedown = nouveau_stub_takedown; 185 engine->vram.takedown = nouveau_stub_takedown;
193 engine->vram.flags_valid = nouveau_mem_flags_valid; 186 engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
227 engine->display.early_init = nv04_display_early_init; 220 engine->display.early_init = nv04_display_early_init;
228 engine->display.late_takedown = nv04_display_late_takedown; 221 engine->display.late_takedown = nv04_display_late_takedown;
229 engine->display.create = nv04_display_create; 222 engine->display.create = nv04_display_create;
230 engine->display.init = nv04_display_init;
231 engine->display.destroy = nv04_display_destroy; 223 engine->display.destroy = nv04_display_destroy;
232 engine->gpio.init = nouveau_stub_init; 224 engine->display.init = nv04_display_init;
233 engine->gpio.takedown = nouveau_stub_takedown; 225 engine->display.fini = nv04_display_fini;
234 engine->gpio.get = nv10_gpio_get; 226 engine->gpio.drive = nv10_gpio_drive;
235 engine->gpio.set = nv10_gpio_set; 227 engine->gpio.sense = nv10_gpio_sense;
236 engine->gpio.irq_enable = NULL; 228 engine->pm.clocks_get = nv04_pm_clocks_get;
237 engine->pm.clock_get = nv04_pm_clock_get; 229 engine->pm.clocks_pre = nv04_pm_clocks_pre;
238 engine->pm.clock_pre = nv04_pm_clock_pre; 230 engine->pm.clocks_set = nv04_pm_clocks_set;
239 engine->pm.clock_set = nv04_pm_clock_set;
240 engine->pm.voltage_get = nouveau_voltage_gpio_get; 231 engine->pm.voltage_get = nouveau_voltage_gpio_get;
241 engine->pm.voltage_set = nouveau_voltage_gpio_set; 232 engine->pm.voltage_set = nouveau_voltage_gpio_set;
242 engine->vram.init = nouveau_mem_detect; 233 engine->vram.init = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
279 engine->display.early_init = nv04_display_early_init; 270 engine->display.early_init = nv04_display_early_init;
280 engine->display.late_takedown = nv04_display_late_takedown; 271 engine->display.late_takedown = nv04_display_late_takedown;
281 engine->display.create = nv04_display_create; 272 engine->display.create = nv04_display_create;
282 engine->display.init = nv04_display_init;
283 engine->display.destroy = nv04_display_destroy; 273 engine->display.destroy = nv04_display_destroy;
284 engine->gpio.init = nouveau_stub_init; 274 engine->display.init = nv04_display_init;
285 engine->gpio.takedown = nouveau_stub_takedown; 275 engine->display.fini = nv04_display_fini;
286 engine->gpio.get = nv10_gpio_get; 276 engine->gpio.init = nv10_gpio_init;
287 engine->gpio.set = nv10_gpio_set; 277 engine->gpio.fini = nv10_gpio_fini;
288 engine->gpio.irq_enable = NULL; 278 engine->gpio.drive = nv10_gpio_drive;
279 engine->gpio.sense = nv10_gpio_sense;
280 engine->gpio.irq_enable = nv10_gpio_irq_enable;
289 engine->pm.clocks_get = nv40_pm_clocks_get; 281 engine->pm.clocks_get = nv40_pm_clocks_get;
290 engine->pm.clocks_pre = nv40_pm_clocks_pre; 282 engine->pm.clocks_pre = nv40_pm_clocks_pre;
291 engine->pm.clocks_set = nv40_pm_clocks_set; 283 engine->pm.clocks_set = nv40_pm_clocks_set;
292 engine->pm.voltage_get = nouveau_voltage_gpio_get; 284 engine->pm.voltage_get = nouveau_voltage_gpio_get;
293 engine->pm.voltage_set = nouveau_voltage_gpio_set; 285 engine->pm.voltage_set = nouveau_voltage_gpio_set;
294 engine->pm.temp_get = nv40_temp_get; 286 engine->pm.temp_get = nv40_temp_get;
287 engine->pm.pwm_get = nv40_pm_pwm_get;
288 engine->pm.pwm_set = nv40_pm_pwm_set;
295 engine->vram.init = nouveau_mem_detect; 289 engine->vram.init = nouveau_mem_detect;
296 engine->vram.takedown = nouveau_stub_takedown; 290 engine->vram.takedown = nouveau_stub_takedown;
297 engine->vram.flags_valid = nouveau_mem_flags_valid; 291 engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
334 engine->display.early_init = nv50_display_early_init; 328 engine->display.early_init = nv50_display_early_init;
335 engine->display.late_takedown = nv50_display_late_takedown; 329 engine->display.late_takedown = nv50_display_late_takedown;
336 engine->display.create = nv50_display_create; 330 engine->display.create = nv50_display_create;
337 engine->display.init = nv50_display_init;
338 engine->display.destroy = nv50_display_destroy; 331 engine->display.destroy = nv50_display_destroy;
332 engine->display.init = nv50_display_init;
333 engine->display.fini = nv50_display_fini;
339 engine->gpio.init = nv50_gpio_init; 334 engine->gpio.init = nv50_gpio_init;
340 engine->gpio.takedown = nv50_gpio_fini; 335 engine->gpio.fini = nv50_gpio_fini;
341 engine->gpio.get = nv50_gpio_get; 336 engine->gpio.drive = nv50_gpio_drive;
342 engine->gpio.set = nv50_gpio_set; 337 engine->gpio.sense = nv50_gpio_sense;
343 engine->gpio.irq_register = nv50_gpio_irq_register;
344 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
345 engine->gpio.irq_enable = nv50_gpio_irq_enable; 338 engine->gpio.irq_enable = nv50_gpio_irq_enable;
346 switch (dev_priv->chipset) { 339 switch (dev_priv->chipset) {
347 case 0x84: 340 case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
354 case 0xaa: 347 case 0xaa:
355 case 0xac: 348 case 0xac:
356 case 0x50: 349 case 0x50:
357 engine->pm.clock_get = nv50_pm_clock_get; 350 engine->pm.clocks_get = nv50_pm_clocks_get;
358 engine->pm.clock_pre = nv50_pm_clock_pre; 351 engine->pm.clocks_pre = nv50_pm_clocks_pre;
359 engine->pm.clock_set = nv50_pm_clock_set; 352 engine->pm.clocks_set = nv50_pm_clocks_set;
360 break; 353 break;
361 default: 354 default:
362 engine->pm.clocks_get = nva3_pm_clocks_get; 355 engine->pm.clocks_get = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
370 engine->pm.temp_get = nv84_temp_get; 363 engine->pm.temp_get = nv84_temp_get;
371 else 364 else
372 engine->pm.temp_get = nv40_temp_get; 365 engine->pm.temp_get = nv40_temp_get;
366 engine->pm.pwm_get = nv50_pm_pwm_get;
367 engine->pm.pwm_set = nv50_pm_pwm_set;
373 engine->vram.init = nv50_vram_init; 368 engine->vram.init = nv50_vram_init;
374 engine->vram.takedown = nv50_vram_fini; 369 engine->vram.takedown = nv50_vram_fini;
375 engine->vram.get = nv50_vram_new; 370 engine->vram.get = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
407 engine->display.early_init = nv50_display_early_init; 402 engine->display.early_init = nv50_display_early_init;
408 engine->display.late_takedown = nv50_display_late_takedown; 403 engine->display.late_takedown = nv50_display_late_takedown;
409 engine->display.create = nv50_display_create; 404 engine->display.create = nv50_display_create;
410 engine->display.init = nv50_display_init;
411 engine->display.destroy = nv50_display_destroy; 405 engine->display.destroy = nv50_display_destroy;
406 engine->display.init = nv50_display_init;
407 engine->display.fini = nv50_display_fini;
412 engine->gpio.init = nv50_gpio_init; 408 engine->gpio.init = nv50_gpio_init;
413 engine->gpio.takedown = nouveau_stub_takedown; 409 engine->gpio.fini = nv50_gpio_fini;
414 engine->gpio.get = nv50_gpio_get; 410 engine->gpio.drive = nv50_gpio_drive;
415 engine->gpio.set = nv50_gpio_set; 411 engine->gpio.sense = nv50_gpio_sense;
416 engine->gpio.irq_register = nv50_gpio_irq_register;
417 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
418 engine->gpio.irq_enable = nv50_gpio_irq_enable; 412 engine->gpio.irq_enable = nv50_gpio_irq_enable;
419 engine->vram.init = nvc0_vram_init; 413 engine->vram.init = nvc0_vram_init;
420 engine->vram.takedown = nv50_vram_fini; 414 engine->vram.takedown = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
423 engine->vram.flags_valid = nvc0_vram_flags_valid; 417 engine->vram.flags_valid = nvc0_vram_flags_valid;
424 engine->pm.temp_get = nv84_temp_get; 418 engine->pm.temp_get = nv84_temp_get;
425 engine->pm.clocks_get = nvc0_pm_clocks_get; 419 engine->pm.clocks_get = nvc0_pm_clocks_get;
420 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
421 engine->pm.clocks_set = nvc0_pm_clocks_set;
426 engine->pm.voltage_get = nouveau_voltage_gpio_get; 422 engine->pm.voltage_get = nouveau_voltage_gpio_get;
427 engine->pm.voltage_set = nouveau_voltage_gpio_set; 423 engine->pm.voltage_set = nouveau_voltage_gpio_set;
424 engine->pm.pwm_get = nv50_pm_pwm_get;
425 engine->pm.pwm_set = nv50_pm_pwm_set;
428 break; 426 break;
429 case 0xd0: 427 case 0xd0:
430 engine->instmem.init = nvc0_instmem_init; 428 engine->instmem.init = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
457 engine->display.early_init = nouveau_stub_init; 455 engine->display.early_init = nouveau_stub_init;
458 engine->display.late_takedown = nouveau_stub_takedown; 456 engine->display.late_takedown = nouveau_stub_takedown;
459 engine->display.create = nvd0_display_create; 457 engine->display.create = nvd0_display_create;
460 engine->display.init = nvd0_display_init;
461 engine->display.destroy = nvd0_display_destroy; 458 engine->display.destroy = nvd0_display_destroy;
459 engine->display.init = nvd0_display_init;
460 engine->display.fini = nvd0_display_fini;
462 engine->gpio.init = nv50_gpio_init; 461 engine->gpio.init = nv50_gpio_init;
463 engine->gpio.takedown = nouveau_stub_takedown; 462 engine->gpio.fini = nv50_gpio_fini;
464 engine->gpio.get = nvd0_gpio_get; 463 engine->gpio.drive = nvd0_gpio_drive;
465 engine->gpio.set = nvd0_gpio_set; 464 engine->gpio.sense = nvd0_gpio_sense;
466 engine->gpio.irq_register = nv50_gpio_irq_register;
467 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
468 engine->gpio.irq_enable = nv50_gpio_irq_enable; 465 engine->gpio.irq_enable = nv50_gpio_irq_enable;
469 engine->vram.init = nvc0_vram_init; 466 engine->vram.init = nvc0_vram_init;
470 engine->vram.takedown = nv50_vram_fini; 467 engine->vram.takedown = nv50_vram_fini;
471 engine->vram.get = nvc0_vram_new; 468 engine->vram.get = nvc0_vram_new;
472 engine->vram.put = nv50_vram_del; 469 engine->vram.put = nv50_vram_del;
473 engine->vram.flags_valid = nvc0_vram_flags_valid; 470 engine->vram.flags_valid = nvc0_vram_flags_valid;
471 engine->pm.temp_get = nv84_temp_get;
474 engine->pm.clocks_get = nvc0_pm_clocks_get; 472 engine->pm.clocks_get = nvc0_pm_clocks_get;
473 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
474 engine->pm.clocks_set = nvc0_pm_clocks_set;
475 engine->pm.voltage_get = nouveau_voltage_gpio_get; 475 engine->pm.voltage_get = nouveau_voltage_gpio_get;
476 engine->pm.voltage_set = nouveau_voltage_gpio_set; 476 engine->pm.voltage_set = nouveau_voltage_gpio_set;
477 break; 477 break;
@@ -615,7 +615,7 @@ nouveau_card_init(struct drm_device *dev)
615 goto out_gart; 615 goto out_gart;
616 616
617 /* PGPIO */ 617 /* PGPIO */
618 ret = engine->gpio.init(dev); 618 ret = nouveau_gpio_create(dev);
619 if (ret) 619 if (ret)
620 goto out_mc; 620 goto out_mc;
621 621
@@ -648,6 +648,7 @@ nouveau_card_init(struct drm_device *dev)
648 nv50_graph_create(dev); 648 nv50_graph_create(dev);
649 break; 649 break;
650 case NV_C0: 650 case NV_C0:
651 case NV_D0:
651 nvc0_graph_create(dev); 652 nvc0_graph_create(dev);
652 break; 653 break;
653 default: 654 default:
@@ -663,6 +664,11 @@ nouveau_card_init(struct drm_device *dev)
663 case 0xa0: 664 case 0xa0:
664 nv84_crypt_create(dev); 665 nv84_crypt_create(dev);
665 break; 666 break;
667 case 0x98:
668 case 0xaa:
669 case 0xac:
670 nv98_crypt_create(dev);
671 break;
666 } 672 }
667 673
668 switch (dev_priv->card_type) { 674 switch (dev_priv->card_type) {
@@ -684,15 +690,25 @@ nouveau_card_init(struct drm_device *dev)
684 break; 690 break;
685 } 691 }
686 692
693 if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
694 nv84_bsp_create(dev);
695 nv84_vp_create(dev);
696 nv98_ppp_create(dev);
697 } else
698 if (dev_priv->chipset >= 0x84) {
699 nv50_mpeg_create(dev);
700 nv84_bsp_create(dev);
701 nv84_vp_create(dev);
702 } else
703 if (dev_priv->chipset >= 0x50) {
704 nv50_mpeg_create(dev);
705 } else
687 if (dev_priv->card_type == NV_40 || 706 if (dev_priv->card_type == NV_40 ||
688 dev_priv->chipset == 0x31 || 707 dev_priv->chipset == 0x31 ||
689 dev_priv->chipset == 0x34 || 708 dev_priv->chipset == 0x34 ||
690 dev_priv->chipset == 0x36) 709 dev_priv->chipset == 0x36) {
691 nv31_mpeg_create(dev); 710 nv31_mpeg_create(dev);
692 else 711 }
693 if (dev_priv->card_type == NV_50 &&
694 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
695 nv50_mpeg_create(dev);
696 712
697 for (e = 0; e < NVOBJ_ENGINE_NR; e++) { 713 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
698 if (dev_priv->eng[e]) { 714 if (dev_priv->eng[e]) {
@@ -712,27 +728,7 @@ nouveau_card_init(struct drm_device *dev)
712 if (ret) 728 if (ret)
713 goto out_fifo; 729 goto out_fifo;
714 730
715 /* initialise general modesetting */ 731 ret = nouveau_display_create(dev);
716 drm_mode_config_init(dev);
717 drm_mode_create_scaling_mode_property(dev);
718 drm_mode_create_dithering_property(dev);
719 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
720 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
721 dev->mode_config.min_width = 0;
722 dev->mode_config.min_height = 0;
723 if (dev_priv->card_type < NV_10) {
724 dev->mode_config.max_width = 2048;
725 dev->mode_config.max_height = 2048;
726 } else
727 if (dev_priv->card_type < NV_50) {
728 dev->mode_config.max_width = 4096;
729 dev->mode_config.max_height = 4096;
730 } else {
731 dev->mode_config.max_width = 8192;
732 dev->mode_config.max_height = 8192;
733 }
734
735 ret = engine->display.create(dev);
736 if (ret) 732 if (ret)
737 goto out_irq; 733 goto out_irq;
738 734
@@ -752,12 +748,11 @@ nouveau_card_init(struct drm_device *dev)
752 } 748 }
753 749
754 if (dev->mode_config.num_crtc) { 750 if (dev->mode_config.num_crtc) {
755 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 751 ret = nouveau_display_init(dev);
756 if (ret) 752 if (ret)
757 goto out_chan; 753 goto out_chan;
758 754
759 nouveau_fbcon_init(dev); 755 nouveau_fbcon_init(dev);
760 drm_kms_helper_poll_init(dev);
761 } 756 }
762 757
763 return 0; 758 return 0;
@@ -768,7 +763,7 @@ out_fence:
768 nouveau_fence_fini(dev); 763 nouveau_fence_fini(dev);
769out_disp: 764out_disp:
770 nouveau_backlight_exit(dev); 765 nouveau_backlight_exit(dev);
771 engine->display.destroy(dev); 766 nouveau_display_destroy(dev);
772out_irq: 767out_irq:
773 nouveau_irq_fini(dev); 768 nouveau_irq_fini(dev);
774out_fifo: 769out_fifo:
@@ -788,7 +783,7 @@ out_engine:
788out_timer: 783out_timer:
789 engine->timer.takedown(dev); 784 engine->timer.takedown(dev);
790out_gpio: 785out_gpio:
791 engine->gpio.takedown(dev); 786 nouveau_gpio_destroy(dev);
792out_mc: 787out_mc:
793 engine->mc.takedown(dev); 788 engine->mc.takedown(dev);
794out_gart: 789out_gart:
@@ -818,9 +813,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
818 int e; 813 int e;
819 814
820 if (dev->mode_config.num_crtc) { 815 if (dev->mode_config.num_crtc) {
821 drm_kms_helper_poll_fini(dev);
822 nouveau_fbcon_fini(dev); 816 nouveau_fbcon_fini(dev);
823 drm_vblank_cleanup(dev); 817 nouveau_display_fini(dev);
824 } 818 }
825 819
826 if (dev_priv->channel) { 820 if (dev_priv->channel) {
@@ -829,8 +823,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
829 } 823 }
830 824
831 nouveau_backlight_exit(dev); 825 nouveau_backlight_exit(dev);
832 engine->display.destroy(dev); 826 nouveau_display_destroy(dev);
833 drm_mode_config_cleanup(dev);
834 827
835 if (!dev_priv->noaccel) { 828 if (!dev_priv->noaccel) {
836 engine->fifo.takedown(dev); 829 engine->fifo.takedown(dev);
@@ -843,7 +836,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
843 } 836 }
844 engine->fb.takedown(dev); 837 engine->fb.takedown(dev);
845 engine->timer.takedown(dev); 838 engine->timer.takedown(dev);
846 engine->gpio.takedown(dev); 839 nouveau_gpio_destroy(dev);
847 engine->mc.takedown(dev); 840 engine->mc.takedown(dev);
848 engine->display.late_takedown(dev); 841 engine->display.late_takedown(dev);
849 842
@@ -1110,13 +1103,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1110 dev_priv->noaccel = !!nouveau_noaccel; 1103 dev_priv->noaccel = !!nouveau_noaccel;
1111 if (nouveau_noaccel == -1) { 1104 if (nouveau_noaccel == -1) {
1112 switch (dev_priv->chipset) { 1105 switch (dev_priv->chipset) {
1113#if 0 1106 case 0xd9: /* known broken */
1114 case 0xXX: /* known broken */
1115 NV_INFO(dev, "acceleration disabled by default, pass " 1107 NV_INFO(dev, "acceleration disabled by default, pass "
1116 "noaccel=0 to force enable\n"); 1108 "noaccel=0 to force enable\n");
1117 dev_priv->noaccel = true; 1109 dev_priv->noaccel = true;
1118 break; 1110 break;
1119#endif
1120 default: 1111 default:
1121 dev_priv->noaccel = false; 1112 dev_priv->noaccel = false;
1122 break; 1113 break;
@@ -1238,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1238 getparam->value = 1; 1229 getparam->value = 1;
1239 break; 1230 break;
1240 case NOUVEAU_GETPARAM_HAS_PAGEFLIP: 1231 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
1241 getparam->value = dev_priv->card_type < NV_D0; 1232 getparam->value = 1;
1242 break; 1233 break;
1243 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1234 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1244 /* NV40 and NV50 versions are quite different, but register 1235 /* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 5a46446dd5a..0f5a3016055 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
55 temps->down_clock = 100; 55 temps->down_clock = 100;
56 temps->fan_boost = 90; 56 temps->fan_boost = 90;
57 57
58 /* Set the default range for the pwm fan */
59 pm->fan.min_duty = 30;
60 pm->fan.max_duty = 100;
61
58 /* Set the known default values to setup the temperature sensor */ 62 /* Set the known default values to setup the temperature sensor */
59 if (dev_priv->card_type >= NV_40) { 63 if (dev_priv->card_type >= NV_40) {
60 switch (dev_priv->chipset) { 64 switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
156 case 0x13: 160 case 0x13:
157 sensor->slope_div = value; 161 sensor->slope_div = value;
158 break; 162 break;
163 case 0x22:
164 pm->fan.min_duty = value & 0xff;
165 pm->fan.max_duty = (value & 0xff00) >> 8;
166 break;
167 case 0x26:
168 pm->fan.pwm_freq = value;
169 break;
159 } 170 }
160 temp += recordlen; 171 temp += recordlen;
161 } 172 }
162 173
163 nouveau_temp_safety_checks(dev); 174 nouveau_temp_safety_checks(dev);
175
176 /* check the fan min/max settings */
177 if (pm->fan.min_duty < 10)
178 pm->fan.min_duty = 10;
179 if (pm->fan.max_duty > 100)
180 pm->fan.max_duty = 100;
181 if (pm->fan.max_duty < pm->fan.min_duty)
182 pm->fan.max_duty = pm->fan.min_duty;
164} 183}
165 184
166static int 185static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
267static void 286static void
268nouveau_temp_probe_i2c(struct drm_device *dev) 287nouveau_temp_probe_i2c(struct drm_device *dev)
269{ 288{
270 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 struct dcb_table *dcb = &dev_priv->vbios.dcb;
272 struct i2c_board_info info[] = { 289 struct i2c_board_info info[] = {
273 { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 290 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
274 { I2C_BOARD_INFO("w83781d", 0x2d) }, 291 { I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
277 { I2C_BOARD_INFO("lm99", 0x4c) }, 294 { I2C_BOARD_INFO("lm99", 0x4c) },
278 { } 295 { }
279 }; 296 };
280 int idx = (dcb->version >= 0x40 ?
281 dcb->i2c_default_indices & 0xf : 2);
282 297
283 nouveau_i2c_identify(dev, "monitoring device", info, 298 nouveau_i2c_identify(dev, "monitoring device", info,
284 probe_monitoring_device, idx); 299 probe_monitoring_device, NV_I2C_DEFAULT(0));
285} 300}
286 301
287void 302void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
297 return; 312 return;
298 313
299 if (P.version == 1) 314 if (P.version == 1)
300 temp = ROMPTR(bios, P.data[12]); 315 temp = ROMPTR(dev, P.data[12]);
301 else if (P.version == 2) 316 else if (P.version == 2)
302 temp = ROMPTR(bios, P.data[16]); 317 temp = ROMPTR(dev, P.data[16]);
303 else 318 else
304 NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); 319 NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
305 320
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index ef0832b29ad..2bf6c0350b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
78 78
79void 79void
80nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, 80nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem, dma_addr_t *list) 81 struct nouveau_mem *mem)
82{ 82{
83 struct nouveau_vm *vm = vma->vm; 83 struct nouveau_vm *vm = vma->vm;
84 dma_addr_t *list = mem->pages;
84 int big = vma->node->type != vm->spg_shift; 85 int big = vma->node->type != vm->spg_shift;
85 u32 offset = vma->node->offset + (delta >> 12); 86 u32 offset = vma->node->offset + (delta >> 12);
86 u32 bits = vma->node->type - 12; 87 u32 bits = vma->node->type - 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 6ce995f7797..4fb6e728734 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
89void nouveau_vm_unmap(struct nouveau_vma *); 89void nouveau_vm_unmap(struct nouveau_vma *);
90void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); 90void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
91void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 91void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
92 struct nouveau_mem *, dma_addr_t *); 92 struct nouveau_mem *);
93 93
94/* nv50_vm.c */ 94/* nv50_vm.c */
95void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 95void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 86d03e15735..b010cb997b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -26,6 +26,7 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
29 30
30static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 }; 31static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
31static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); 32static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
34nouveau_voltage_gpio_get(struct drm_device *dev) 35nouveau_voltage_gpio_get(struct drm_device *dev)
35{ 36{
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 37 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
38 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 38 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
39 u8 vid = 0; 39 u8 vid = 0;
40 int i; 40 int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
43 if (!(volt->vid_mask & (1 << i))) 43 if (!(volt->vid_mask & (1 << i)))
44 continue; 44 continue;
45 45
46 vid |= gpio->get(dev, vidtag[i]) << i; 46 vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
47 } 47 }
48 48
49 return nouveau_volt_lvl_lookup(dev, vid); 49 return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
53nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) 53nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
54{ 54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private; 55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
57 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 56 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
58 int vid, i; 57 int vid, i;
59 58
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
65 if (!(volt->vid_mask & (1 << i))) 64 if (!(volt->vid_mask & (1 << i)))
66 continue; 65 continue;
67 66
68 gpio->set(dev, vidtag[i], !!(vid & (1 << i))); 67 nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
69 } 68 }
70 69
71 return 0; 70 return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
117 return; 116 return;
118 117
119 if (P.version == 1) 118 if (P.version == 1)
120 volt = ROMPTR(bios, P.data[16]); 119 volt = ROMPTR(dev, P.data[16]);
121 else 120 else
122 if (P.version == 2) 121 if (P.version == 2)
123 volt = ROMPTR(bios, P.data[12]); 122 volt = ROMPTR(dev, P.data[12]);
124 else { 123 else {
125 NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); 124 NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
126 } 125 }
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
130 return; 129 return;
131 } 130 }
132 131
133 volt = ROMPTR(bios, bios->data[bios->offset + 0x98]); 132 volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
134 } 133 }
135 134
136 if (!volt) { 135 if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
194 return; 193 return;
195 } 194 }
196 195
197 if (!nouveau_bios_gpio_entry(dev, vidtag[i])) { 196 if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
198 NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); 197 NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
199 return; 198 return;
200 } 199 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 5e45398a9e2..728d07584d3 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
364 regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0); 364 regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
365 regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay; 365 regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
366 /* framebuffer can be larger than crtc scanout area. */ 366 /* framebuffer can be larger than crtc scanout area. */
367 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8; 367 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
368 regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00; 368 regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
369 regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart; 369 regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
370 regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd; 370 regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
377 377
378 /* framebuffer can be larger than crtc scanout area. */ 378 /* framebuffer can be larger than crtc scanout area. */
379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = 379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
380 XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 380 XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
381 regp->CRTC[NV_CIO_CRE_42] = 381 regp->CRTC[NV_CIO_CRE_42] =
382 XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11); 382 XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
383 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? 383 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
384 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; 384 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
385 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | 385 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
835 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, 835 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
836 regp->ramdac_gen_ctrl); 836 regp->ramdac_gen_ctrl);
837 837
838 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3; 838 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
839 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = 839 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
840 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 840 XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
841 regp->CRTC[NV_CIO_CRE_42] = 841 regp->CRTC[NV_CIO_CRE_42] =
842 XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11); 842 XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
843 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); 843 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
844 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); 844 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
845 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42); 845 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
846 846
847 /* Update the framebuffer location. */ 847 /* Update the framebuffer location. */
848 regp->fb_start = nv_crtc->fb.offset & ~3; 848 regp->fb_start = nv_crtc->fb.offset & ~3;
849 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); 849 regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
850 nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); 850 nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
851 851
852 /* Update the arbitration parameters. */ 852 /* Update the arbitration parameters. */
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index e000455e06d..8300266ffae 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -32,6 +32,7 @@
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "nouveau_hw.h"
35#include "nouveau_gpio.h"
35#include "nvreg.h" 36#include "nvreg.h"
36 37
37int nv04_dac_output_offset(struct drm_encoder *encoder) 38int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
220{ 221{
221 struct drm_device *dev = encoder->dev; 222 struct drm_device *dev = encoder->dev;
222 struct drm_nouveau_private *dev_priv = dev->dev_private; 223 struct drm_nouveau_private *dev_priv = dev->dev_private;
223 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
224 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 224 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
225 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 225 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
226 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 226 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
252 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); 252 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
253 } 253 }
254 254
255 saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1); 255 saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
256 saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0); 256 saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
257 257
258 gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); 258 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
259 gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); 259 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
260 260
261 msleep(4); 261 msleep(4);
262 262
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
306 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); 306 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
307 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); 307 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
308 308
309 gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1); 309 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
310 gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0); 310 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
311 311
312 return sample; 312 return sample;
313} 313}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 12098bf839c..2258746016f 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc); 289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
291 struct drm_display_mode *output_mode = &nv_encoder->mode; 291 struct drm_display_mode *output_mode = &nv_encoder->mode;
292 struct drm_connector *connector = &nv_connector->base;
292 uint32_t mode_ratio, panel_ratio; 293 uint32_t mode_ratio, panel_ratio;
293 294
294 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index); 295 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
340 output_mode->clock > 165000) 341 output_mode->clock > 165000)
341 regp->fp_control |= (2 << 24); 342 regp->fp_control |= (2 << 24);
342 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 343 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
343 bool duallink, dummy; 344 bool duallink = false, dummy;
345 if (nv_connector->edid &&
346 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
347 duallink = (((u8 *)nv_connector->edid)[121] == 2);
348 } else {
349 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
350 &duallink, &dummy);
351 }
344 352
345 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
346 &duallink, &dummy);
347 if (duallink) 353 if (duallink)
348 regp->fp_control |= (8 << 28); 354 regp->fp_control |= (8 << 28);
349 } else 355 } else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
407 } 413 }
408 414
409 /* Output property. */ 415 /* Output property. */
410 if (nv_connector->use_dithering) { 416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
411 if (dev_priv->chipset == 0x11) 419 if (dev_priv->chipset == 0x11)
412 regp->dither = savep->dither | 0x00010000; 420 regp->dither = savep->dither | 0x00010000;
413 else { 421 else {
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 6bd8518d7b2..7047d37e8da 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
243 return 0; 243 return 0;
244} 244}
245 245
246void
247nv04_display_fini(struct drm_device *dev)
248{
249}
250
246static void 251static void
247nv04_vblank_crtc0_isr(struct drm_device *dev) 252nv04_vblank_crtc0_isr(struct drm_device *dev)
248{ 253{
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 9ae92a87b8c..6e7589918fa 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -27,68 +27,111 @@
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30struct nv04_pm_state { 30int
31nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
32{
33 int ret;
34
35 ret = nouveau_hw_get_clock(dev, PLL_CORE);
36 if (ret < 0)
37 return ret;
38 perflvl->core = ret;
39
40 ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
41 if (ret < 0)
42 return ret;
43 perflvl->memory = ret;
44
45 return 0;
46}
47
48struct nv04_pm_clock {
31 struct pll_lims pll; 49 struct pll_lims pll;
32 struct nouveau_pll_vals calc; 50 struct nouveau_pll_vals calc;
33}; 51};
34 52
35int 53struct nv04_pm_state {
36nv04_pm_clock_get(struct drm_device *dev, u32 id) 54 struct nv04_pm_clock core;
55 struct nv04_pm_clock memory;
56};
57
58static int
59calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
37{ 60{
38 return nouveau_hw_get_clock(dev, id); 61 int ret;
62
63 ret = get_pll_limits(dev, id, &clk->pll);
64 if (ret)
65 return ret;
66
67 ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
68 if (!ret)
69 return -EINVAL;
70
71 return 0;
39} 72}
40 73
41void * 74void *
42nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 75nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
43 u32 id, int khz)
44{ 76{
45 struct nv04_pm_state *state; 77 struct nv04_pm_state *info;
46 int ret; 78 int ret;
47 79
48 state = kzalloc(sizeof(*state), GFP_KERNEL); 80 info = kzalloc(sizeof(*info), GFP_KERNEL);
49 if (!state) 81 if (!info)
50 return ERR_PTR(-ENOMEM); 82 return ERR_PTR(-ENOMEM);
51 83
52 ret = get_pll_limits(dev, id, &state->pll); 84 ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
53 if (ret) { 85 if (ret)
54 kfree(state); 86 goto error;
55 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
56 }
57 87
58 ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc); 88 if (perflvl->memory) {
59 if (!ret) { 89 ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
60 kfree(state); 90 if (ret)
61 return ERR_PTR(-EINVAL); 91 goto error;
62 } 92 }
63 93
64 return state; 94 return info;
95error:
96 kfree(info);
97 return ERR_PTR(ret);
65} 98}
66 99
67void 100static void
68nv04_pm_clock_set(struct drm_device *dev, void *pre_state) 101prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
69{ 102{
70 struct drm_nouveau_private *dev_priv = dev->dev_private; 103 struct drm_nouveau_private *dev_priv = dev->dev_private;
71 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 104 u32 reg = clk->pll.reg;
72 struct nv04_pm_state *state = pre_state;
73 u32 reg = state->pll.reg;
74 105
75 /* thank the insane nouveau_hw_setpll() interface for this */ 106 /* thank the insane nouveau_hw_setpll() interface for this */
76 if (dev_priv->card_type >= NV_40) 107 if (dev_priv->card_type >= NV_40)
77 reg += 4; 108 reg += 4;
78 109
79 nouveau_hw_setpll(dev, reg, &state->calc); 110 nouveau_hw_setpll(dev, reg, &clk->calc);
111}
112
113int
114nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
115{
116 struct drm_nouveau_private *dev_priv = dev->dev_private;
117 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
118 struct nv04_pm_state *state = pre_state;
119
120 prog_pll(dev, &state->core);
80 121
81 if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { 122 if (state->memory.pll.reg) {
82 if (dev_priv->card_type == NV_20) 123 prog_pll(dev, &state->memory);
83 nv_mask(dev, 0x1002c4, 0, 1 << 20); 124 if (dev_priv->card_type < NV_30) {
125 if (dev_priv->card_type == NV_20)
126 nv_mask(dev, 0x1002c4, 0, 1 << 20);
84 127
85 /* Reset the DLLs */ 128 /* Reset the DLLs */
86 nv_mask(dev, 0x1002c0, 0, 1 << 8); 129 nv_mask(dev, 0x1002c0, 0, 1 << 8);
130 }
87 } 131 }
88 132
89 if (reg == NV_PRAMDAC_NVPLL_COEFF) 133 ptimer->init(dev);
90 ptimer->init(dev);
91 134
92 kfree(state); 135 kfree(state);
136 return 0;
93} 137}
94
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 263301b809d..55c945290e5 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -2,6 +2,7 @@
2#include "drm.h" 2#include "drm.h"
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5#include "nouveau_hw.h"
5 6
6int 7int
7nv04_timer_init(struct drm_device *dev) 8nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
17 18
18 /* determine base clock for timer source */ 19 /* determine base clock for timer source */
19 if (dev_priv->chipset < 0x40) { 20 if (dev_priv->chipset < 0x40) {
20 n = dev_priv->engine.pm.clock_get(dev, PLL_CORE); 21 n = nouveau_hw_get_clock(dev, PLL_CORE);
21 } else 22 } else
22 if (dev_priv->chipset == 0x40) { 23 if (dev_priv->chipset == 0x40) {
23 /*XXX: figure this out */ 24 /*XXX: figure this out */
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 007fc29e2f8..550ad3fcf0a 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -27,66 +27,97 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_gpio.h"
30 31
31static bool 32int
32get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift, 33nv10_gpio_sense(struct drm_device *dev, int line)
33 uint32_t *mask)
34{ 34{
35 if (ent->line < 2) { 35 if (line < 2) {
36 *reg = NV_PCRTC_GPIO; 36 line = line * 16;
37 *shift = ent->line * 16; 37 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
38 *mask = 0x11; 38 return !!(line & 0x0100);
39 39 } else
40 } else if (ent->line < 10) { 40 if (line < 10) {
41 *reg = NV_PCRTC_GPIO_EXT; 41 line = (line - 2) * 4;
42 *shift = (ent->line - 2) * 4; 42 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
43 *mask = 0x3; 43 return !!(line & 0x04);
44 } else
45 if (line < 14) {
46 line = (line - 10) * 4;
47 line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
48 return !!(line & 0x04);
49 }
44 50
45 } else if (ent->line < 14) { 51 return -EINVAL;
46 *reg = NV_PCRTC_850; 52}
47 *shift = (ent->line - 10) * 4;
48 *mask = 0x3;
49 53
54int
55nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
56{
57 u32 reg, mask, data;
58
59 if (line < 2) {
60 line = line * 16;
61 reg = NV_PCRTC_GPIO;
62 mask = 0x00000011;
63 data = (dir << 4) | out;
64 } else
65 if (line < 10) {
66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003 << ((line - 2) * 4);
69 data = (dir << 1) | out;
70 } else
71 if (line < 14) {
72 line = (line - 10) * 4;
73 reg = NV_PCRTC_850;
74 mask = 0x00000003;
75 data = (dir << 1) | out;
50 } else { 76 } else {
51 return false; 77 return -EINVAL;
52 } 78 }
53 79
54 return true; 80 mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
81 NVWriteCRTC(dev, 0, reg, mask | (data << line));
82 return 0;
55} 83}
56 84
57int 85void
58nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) 86nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
59{ 87{
60 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); 88 u32 mask = 0x00010001 << line;
61 uint32_t reg, shift, mask, value;
62 89
63 if (!ent) 90 nv_wr32(dev, 0x001104, mask);
64 return -ENODEV; 91 nv_mask(dev, 0x001144, mask, on ? mask : 0);
92}
65 93
66 if (!get_gpio_location(ent, &reg, &shift, &mask)) 94static void
67 return -ENODEV; 95nv10_gpio_isr(struct drm_device *dev)
96{
97 u32 intr = nv_rd32(dev, 0x1104);
98 u32 hi = (intr & 0x0000ffff) >> 0;
99 u32 lo = (intr & 0xffff0000) >> 16;
68 100
69 value = NVReadCRTC(dev, 0, reg) >> shift; 101 nouveau_gpio_isr(dev, 0, hi | lo);
70 102
71 return (ent->invert ? 1 : 0) ^ (value & 1); 103 nv_wr32(dev, 0x001104, intr);
72} 104}
73 105
74int 106int
75nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) 107nv10_gpio_init(struct drm_device *dev)
76{ 108{
77 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); 109 nv_wr32(dev, 0x001140, 0x00000000);
78 uint32_t reg, shift, mask, value; 110 nv_wr32(dev, 0x001100, 0xffffffff);
79 111 nv_wr32(dev, 0x001144, 0x00000000);
80 if (!ent) 112 nv_wr32(dev, 0x001104, 0xffffffff);
81 return -ENODEV; 113 nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
82
83 if (!get_gpio_location(ent, &reg, &shift, &mask))
84 return -ENODEV;
85
86 value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
87 mask = ~(mask << shift);
88
89 NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
90
91 return 0; 114 return 0;
92} 115}
116
117void
118nv10_gpio_fini(struct drm_device *dev)
119{
120 nv_wr32(dev, 0x001140, 0x00000000);
121 nv_wr32(dev, 0x001144, 0x00000000);
122 nouveau_irq_unregister(dev, 28);
123}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 3900cebba56..696d7e7dc2a 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -30,6 +30,7 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 31#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
33#include "nouveau_gpio.h"
33#include "nouveau_hw.h" 34#include "nouveau_hw.h"
34#include "nv17_tv.h" 35#include "nv17_tv.h"
35 36
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
37{ 38{
38 struct drm_device *dev = encoder->dev; 39 struct drm_device *dev = encoder->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private; 40 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
41 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 41 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
42 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, 42 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
43 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; 43 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
53 head = (dacclk & 0x100) >> 8; 53 head = (dacclk & 0x100) >> 8;
54 54
55 /* Save the previous state. */ 55 /* Save the previous state. */
56 gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1); 56 gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
57 gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0); 57 gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
58 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); 58 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
59 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); 59 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
60 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); 60 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
65 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); 65 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
66 66
67 /* Prepare the DAC for load detection. */ 67 /* Prepare the DAC for load detection. */
68 gpio->set(dev, DCB_GPIO_TVDAC1, true); 68 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
69 gpio->set(dev, DCB_GPIO_TVDAC0, true); 69 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
70 70
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); 71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); 72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); 111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); 112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
113 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); 113 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
114 gpio->set(dev, DCB_GPIO_TVDAC1, gpio1); 114 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
115 gpio->set(dev, DCB_GPIO_TVDAC0, gpio0); 115 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
116 116
117 return sample; 117 return sample;
118} 118}
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
357static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) 357static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
358{ 358{
359 struct drm_device *dev = encoder->dev; 359 struct drm_device *dev = encoder->dev;
360 struct drm_nouveau_private *dev_priv = dev->dev_private;
361 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
362 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; 360 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
363 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 361 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
364 362
@@ -383,8 +381,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
383 381
384 nv_load_ptv(dev, regs, 200); 382 nv_load_ptv(dev, regs, 200);
385 383
386 gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); 384 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
387 gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); 385 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
388 386
389 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 387 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
390} 388}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index e676b0d5347..c7615381c5d 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
222 return true; 222 return true;
223} 223}
224 224
225void 225int
226nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) 226nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
227{ 227{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 228 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
231 struct bit_entry M; 231 struct bit_entry M;
232 u32 crtc_mask = 0; 232 u32 crtc_mask = 0;
233 u8 sr1[2]; 233 u8 sr1[2];
234 int i; 234 int i, ret = -EAGAIN;
235 235
236 /* determine which CRTCs are active, fetch VGA_SR1 for each */ 236 /* determine which CRTCs are active, fetch VGA_SR1 for each */
237 for (i = 0; i < 2; i++) { 237 for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
263 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev)) 263 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
264 goto resume; 264 goto resume;
265 265
266 ret = 0;
267
266 /* set engine clocks */ 268 /* set engine clocks */
267 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000); 269 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
268 nv_wr32(dev, 0x004004, info->npll_coef); 270 nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
345 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 347 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
346 348
347 kfree(info); 349 kfree(info);
350 return ret;
351}
352
353int
354nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
355{
356 if (line == 2) {
357 u32 reg = nv_rd32(dev, 0x0010f0);
358 if (reg & 0x80000000) {
359 *duty = (reg & 0x7fff0000) >> 16;
360 *divs = (reg & 0x00007fff);
361 return 0;
362 }
363 } else
364 if (line == 9) {
365 u32 reg = nv_rd32(dev, 0x0015f4);
366 if (reg & 0x80000000) {
367 *divs = nv_rd32(dev, 0x0015f8);
368 *duty = (reg & 0x7fffffff);
369 return 0;
370 }
371 } else {
372 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
373 return -ENODEV;
374 }
375
376 return -EINVAL;
377}
378
379int
380nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
381{
382 if (line == 2) {
383 nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
384 } else
385 if (line == 9) {
386 nv_wr32(dev, 0x0015f8, divs);
387 nv_wr32(dev, 0x0015f4, duty | 0x80000000);
388 } else {
389 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
390 return -ENODEV;
391 }
392
393 return 0;
348} 394}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 882080e0b4f..8f6c2ace3ad 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
132} 132}
133 133
134static int 134static int
135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) 135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
136{ 136{
137 struct drm_device *dev = nv_crtc->base.dev; 137 struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
138 struct nouveau_channel *evo = nv50_display(dev)->master; 138 struct nouveau_connector *nv_connector;
139 int ret; 139 struct drm_connector *connector;
140 140 int head = nv_crtc->index, ret;
141 NV_DEBUG_KMS(dev, "\n"); 141 u32 mode = 0x00;
142 142
143 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 143 nv_connector = nouveau_crtc_connector_get(nv_crtc);
144 if (ret) { 144 connector = &nv_connector->base;
145 NV_ERROR(dev, "no space while setting dither\n"); 145 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
146 return ret; 146 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
147 mode = DITHERING_MODE_DYNAMIC2X2;
148 } else {
149 mode = nv_connector->dithering_mode;
147 } 150 }
148 151
149 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1); 152 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
150 if (on) 153 if (connector->display_info.bpc >= 8)
151 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON); 154 mode |= DITHERING_DEPTH_8BPC;
152 else 155 } else {
153 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF); 156 mode |= nv_connector->dithering_depth;
157 }
154 158
155 if (update) { 159 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
156 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 160 if (ret == 0) {
157 OUT_RING(evo, 0); 161 BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
158 FIRE_RING(evo); 162 OUT_RING (evo, mode);
163 if (update) {
164 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
165 OUT_RING (evo, 0);
166 FIRE_RING (evo);
167 }
159 } 168 }
160 169
161 return 0; 170 return ret;
162} 171}
163 172
164struct nouveau_connector * 173struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
180} 189}
181 190
182static int 191static int
183nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) 192nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
184{ 193{
185 struct nouveau_connector *nv_connector = 194 struct nouveau_connector *nv_connector;
186 nouveau_crtc_connector_get(nv_crtc); 195 struct drm_crtc *crtc = &nv_crtc->base;
187 struct drm_device *dev = nv_crtc->base.dev; 196 struct drm_device *dev = crtc->dev;
188 struct nouveau_channel *evo = nv50_display(dev)->master; 197 struct nouveau_channel *evo = nv50_display(dev)->master;
189 struct drm_display_mode *native_mode = NULL; 198 struct drm_display_mode *umode = &crtc->mode;
190 struct drm_display_mode *mode = &nv_crtc->base.mode; 199 struct drm_display_mode *omode;
191 uint32_t outX, outY, horiz, vert; 200 int scaling_mode, ret;
192 int ret; 201 u32 ctrl = 0, oX, oY;
193 202
194 NV_DEBUG_KMS(dev, "\n"); 203 NV_DEBUG_KMS(dev, "\n");
195 204
196 switch (scaling_mode) { 205 nv_connector = nouveau_crtc_connector_get(nv_crtc);
197 case DRM_MODE_SCALE_NONE: 206 if (!nv_connector || !nv_connector->native_mode) {
198 break; 207 NV_ERROR(dev, "no native mode, forcing panel scaling\n");
199 default: 208 scaling_mode = DRM_MODE_SCALE_NONE;
200 if (!nv_connector || !nv_connector->native_mode) { 209 } else {
201 NV_ERROR(dev, "No native mode, forcing panel scaling\n"); 210 scaling_mode = nv_connector->scaling_mode;
202 scaling_mode = DRM_MODE_SCALE_NONE; 211 }
212
213 /* start off at the resolution we programmed the crtc for, this
214 * effectively handles NONE/FULL scaling
215 */
216 if (scaling_mode != DRM_MODE_SCALE_NONE)
217 omode = nv_connector->native_mode;
218 else
219 omode = umode;
220
221 oX = omode->hdisplay;
222 oY = omode->vdisplay;
223 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
224 oY *= 2;
225
226 /* add overscan compensation if necessary, will keep the aspect
227 * ratio the same as the backend mode unless overridden by the
228 * user setting both hborder and vborder properties.
229 */
230 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
231 (nv_connector->underscan == UNDERSCAN_AUTO &&
232 nv_connector->edid &&
233 drm_detect_hdmi_monitor(nv_connector->edid)))) {
234 u32 bX = nv_connector->underscan_hborder;
235 u32 bY = nv_connector->underscan_vborder;
236 u32 aspect = (oY << 19) / oX;
237
238 if (bX) {
239 oX -= (bX * 2);
240 if (bY) oY -= (bY * 2);
241 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
203 } else { 242 } else {
204 native_mode = nv_connector->native_mode; 243 oX -= (oX >> 4) + 32;
244 if (bY) oY -= (bY * 2);
245 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
205 } 246 }
206 break;
207 } 247 }
208 248
249 /* handle CENTER/ASPECT scaling, taking into account the areas
250 * removed already for overscan compensation
251 */
209 switch (scaling_mode) { 252 switch (scaling_mode) {
253 case DRM_MODE_SCALE_CENTER:
254 oX = min((u32)umode->hdisplay, oX);
255 oY = min((u32)umode->vdisplay, oY);
256 /* fall-through */
210 case DRM_MODE_SCALE_ASPECT: 257 case DRM_MODE_SCALE_ASPECT:
211 horiz = (native_mode->hdisplay << 19) / mode->hdisplay; 258 if (oY < oX) {
212 vert = (native_mode->vdisplay << 19) / mode->vdisplay; 259 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
213 260 oX = ((oY * aspect) + (aspect / 2)) >> 19;
214 if (vert > horiz) {
215 outX = (mode->hdisplay * horiz) >> 19;
216 outY = (mode->vdisplay * horiz) >> 19;
217 } else { 261 } else {
218 outX = (mode->hdisplay * vert) >> 19; 262 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
219 outY = (mode->vdisplay * vert) >> 19; 263 oY = ((oX * aspect) + (aspect / 2)) >> 19;
220 } 264 }
221 break; 265 break;
222 case DRM_MODE_SCALE_FULLSCREEN:
223 outX = native_mode->hdisplay;
224 outY = native_mode->vdisplay;
225 break;
226 case DRM_MODE_SCALE_CENTER:
227 case DRM_MODE_SCALE_NONE:
228 default: 266 default:
229 outX = mode->hdisplay;
230 outY = mode->vdisplay;
231 break; 267 break;
232 } 268 }
233 269
234 ret = RING_SPACE(evo, update ? 7 : 5); 270 if (umode->hdisplay != oX || umode->vdisplay != oY ||
271 umode->flags & DRM_MODE_FLAG_INTERLACE ||
272 umode->flags & DRM_MODE_FLAG_DBLSCAN)
273 ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
274
275 ret = RING_SPACE(evo, 5);
235 if (ret) 276 if (ret)
236 return ret; 277 return ret;
237 278
238 /* Got a better name for SCALER_ACTIVE? */
239 /* One day i've got to really figure out why this is needed. */
240 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); 279 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
241 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) || 280 OUT_RING (evo, ctrl);
242 (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
243 mode->hdisplay != outX || mode->vdisplay != outY) {
244 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
245 } else {
246 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
247 }
248
249 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); 281 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
250 OUT_RING(evo, outY << 16 | outX); 282 OUT_RING (evo, oY << 16 | oX);
251 OUT_RING(evo, outY << 16 | outX); 283 OUT_RING (evo, oY << 16 | oX);
252 284
253 if (update) { 285 if (update) {
254 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 286 nv50_display_flip_stop(crtc);
255 OUT_RING(evo, 0); 287 nv50_display_sync(dev);
256 FIRE_RING(evo); 288 nv50_display_flip_next(crtc, crtc->fb, NULL);
257 } 289 }
258 290
259 return 0; 291 return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
333 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 365 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
334 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 366 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
335 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 367 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
336 kfree(nv_crtc->mode);
337 kfree(nv_crtc); 368 kfree(nv_crtc);
338} 369}
339 370
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
441{ 472{
442} 473}
443 474
444static int
445nv50_crtc_wait_complete(struct drm_crtc *crtc)
446{
447 struct drm_device *dev = crtc->dev;
448 struct drm_nouveau_private *dev_priv = dev->dev_private;
449 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
450 struct nv50_display *disp = nv50_display(dev);
451 struct nouveau_channel *evo = disp->master;
452 u64 start;
453 int ret;
454
455 ret = RING_SPACE(evo, 6);
456 if (ret)
457 return ret;
458 BEGIN_RING(evo, 0, 0x0084, 1);
459 OUT_RING (evo, 0x80000000);
460 BEGIN_RING(evo, 0, 0x0080, 1);
461 OUT_RING (evo, 0);
462 BEGIN_RING(evo, 0, 0x0084, 1);
463 OUT_RING (evo, 0x00000000);
464
465 nv_wo32(disp->ntfy, 0x000, 0x00000000);
466 FIRE_RING (evo);
467
468 start = ptimer->read(dev);
469 do {
470 if (nv_ro32(disp->ntfy, 0x000))
471 return 0;
472 } while (ptimer->read(dev) - start < 2000000000ULL);
473
474 return -EBUSY;
475}
476
477static void 475static void
478nv50_crtc_prepare(struct drm_crtc *crtc) 476nv50_crtc_prepare(struct drm_crtc *crtc)
479{ 477{
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
497 495
498 nv50_crtc_blank(nv_crtc, false); 496 nv50_crtc_blank(nv_crtc, false);
499 drm_vblank_post_modeset(dev, nv_crtc->index); 497 drm_vblank_post_modeset(dev, nv_crtc->index);
500 nv50_crtc_wait_complete(crtc); 498 nv50_display_sync(dev);
501 nv50_display_flip_next(crtc, crtc->fb, NULL); 499 nv50_display_flip_next(crtc, crtc->fb, NULL);
502} 500}
503 501
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
593} 591}
594 592
595static int 593static int
596nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, 594nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
597 struct drm_display_mode *adjusted_mode, int x, int y, 595 struct drm_display_mode *mode, int x, int y,
598 struct drm_framebuffer *old_fb) 596 struct drm_framebuffer *old_fb)
599{ 597{
600 struct drm_device *dev = crtc->dev; 598 struct drm_device *dev = crtc->dev;
601 struct nouveau_channel *evo = nv50_display(dev)->master; 599 struct nouveau_channel *evo = nv50_display(dev)->master;
602 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 600 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
603 struct nouveau_connector *nv_connector = NULL; 601 u32 head = nv_crtc->index * 0x400;
604 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end; 602 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
605 uint32_t hunk1, vunk1, vunk2a, vunk2b; 603 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
604 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
605 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
606 u32 vblan2e = 0, vblan2s = 1;
606 int ret; 607 int ret;
607 608
608 /* Find the connector attached to this CRTC */ 609 /* hw timing description looks like this:
609 nv_connector = nouveau_crtc_connector_get(nv_crtc); 610 *
610 611 * <sync> <back porch> <---------display---------> <front porch>
611 *nv_crtc->mode = *adjusted_mode; 612 * ______
612 613 * |____________|---------------------------|____________|
613 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 614 *
615 * ^ synce ^ blanke ^ blanks ^ active
616 *
617 * interlaced modes also have 2 additional values pointing at the end
618 * and start of the next field's blanking period.
619 */
614 620
615 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 621 hactive = mode->htotal;
616 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 622 hsynce = mode->hsync_end - mode->hsync_start - 1;
617 hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start; 623 hbackp = mode->htotal - mode->hsync_end;
618 vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start; 624 hblanke = hsynce + hbackp;
619 /* I can't give this a proper name, anyone else can? */ 625 hfrontp = mode->hsync_start - mode->hdisplay;
620 hunk1 = adjusted_mode->htotal - 626 hblanks = mode->htotal - hfrontp - 1;
621 adjusted_mode->hsync_start + adjusted_mode->hdisplay; 627
622 vunk1 = adjusted_mode->vtotal - 628 vactive = mode->vtotal * vscan / ilace;
623 adjusted_mode->vsync_start + adjusted_mode->vdisplay; 629 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
624 /* Another strange value, this time only for interlaced adjusted_modes. */ 630 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
625 vunk2a = 2 * adjusted_mode->vtotal - 631 vblanke = vsynce + vbackp;
626 adjusted_mode->vsync_start + adjusted_mode->vdisplay; 632 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
627 vunk2b = adjusted_mode->vtotal - 633 vblanks = vactive - vfrontp - 1;
628 adjusted_mode->vsync_start + adjusted_mode->vtotal; 634 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
629 635 vblan2e = vactive + vsynce + vbackp;
630 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 636 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
631 vsync_dur /= 2; 637 vactive = (vactive * 2) + 1;
632 vsync_start_to_end /= 2;
633 vunk1 /= 2;
634 vunk2a /= 2;
635 vunk2b /= 2;
636 /* magic */
637 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
638 vsync_start_to_end -= 1;
639 vunk1 -= 1;
640 vunk2a -= 1;
641 vunk2b -= 1;
642 }
643 } 638 }
644 639
645 ret = RING_SPACE(evo, 17); 640 ret = RING_SPACE(evo, 18);
646 if (ret) 641 if (ret == 0) {
647 return ret; 642 BEGIN_RING(evo, 0, 0x0804 + head, 2);
648 643 OUT_RING (evo, 0x00800000 | mode->clock);
649 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2); 644 OUT_RING (evo, (ilace == 2) ? 2 : 0);
650 OUT_RING(evo, adjusted_mode->clock | 0x800000); 645 BEGIN_RING(evo, 0, 0x0810 + head, 6);
651 OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0); 646 OUT_RING (evo, 0x00000000); /* border colour */
652 647 OUT_RING (evo, (vactive << 16) | hactive);
653 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5); 648 OUT_RING (evo, ( vsynce << 16) | hsynce);
654 OUT_RING(evo, 0); 649 OUT_RING (evo, (vblanke << 16) | hblanke);
655 OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal); 650 OUT_RING (evo, (vblanks << 16) | hblanks);
656 OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1)); 651 OUT_RING (evo, (vblan2e << 16) | vblan2s);
657 OUT_RING(evo, (vsync_start_to_end - 1) << 16 | 652 BEGIN_RING(evo, 0, 0x082c + head, 1);
658 (hsync_start_to_end - 1)); 653 OUT_RING (evo, 0x00000000);
659 OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1)); 654 BEGIN_RING(evo, 0, 0x0900 + head, 1);
660 655 OUT_RING (evo, 0x00000311); /* makes sync channel work */
661 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 656 BEGIN_RING(evo, 0, 0x08c8 + head, 1);
662 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1); 657 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
663 OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1)); 658 BEGIN_RING(evo, 0, 0x08d4 + head, 1);
664 } else { 659 OUT_RING (evo, 0x00000000); /* screen position */
665 OUT_RING(evo, 0);
666 OUT_RING(evo, 0);
667 } 660 }
668 661
669 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1); 662 nv_crtc->set_dither(nv_crtc, false);
670 OUT_RING(evo, 0); 663 nv_crtc->set_scale(nv_crtc, false);
671
672 /* This is the actual resolution of the mode. */
673 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
674 OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
675 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
676 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
677
678 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
679 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
680 664
681 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 665 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
682} 666}
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
692 if (ret) 676 if (ret)
693 return ret; 677 return ret;
694 678
695 ret = nv50_crtc_wait_complete(crtc); 679 ret = nv50_display_sync(crtc->dev);
696 if (ret) 680 if (ret)
697 return ret; 681 return ret;
698 682
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
711 if (ret) 695 if (ret)
712 return ret; 696 return ret;
713 697
714 return nv50_crtc_wait_complete(crtc); 698 return nv50_display_sync(crtc->dev);
715} 699}
716 700
717static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { 701static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
737 if (!nv_crtc) 721 if (!nv_crtc)
738 return -ENOMEM; 722 return -ENOMEM;
739 723
740 nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
741 if (!nv_crtc->mode) {
742 kfree(nv_crtc);
743 return -ENOMEM;
744 }
745
746 /* Default CLUT parameters, will be activated on the hw upon 724 /* Default CLUT parameters, will be activated on the hw upon
747 * first mode set. 725 * first mode set.
748 */ 726 */
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
764 } 742 }
765 743
766 if (ret) { 744 if (ret) {
767 kfree(nv_crtc->mode);
768 kfree(nv_crtc); 745 kfree(nv_crtc);
769 return ret; 746 return ret;
770 } 747 }
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 808f3ec8f82..a0f2bebf49e 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -200,11 +200,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
200} 200}
201 201
202static void 202static void
203nv50_dac_prepare(struct drm_encoder *encoder)
204{
205}
206
207static void
208nv50_dac_commit(struct drm_encoder *encoder) 203nv50_dac_commit(struct drm_encoder *encoder)
209{ 204{
210} 205}
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
266 .save = nv50_dac_save, 261 .save = nv50_dac_save,
267 .restore = nv50_dac_restore, 262 .restore = nv50_dac_restore,
268 .mode_fixup = nv50_dac_mode_fixup, 263 .mode_fixup = nv50_dac_mode_fixup,
269 .prepare = nv50_dac_prepare, 264 .prepare = nv50_dac_disconnect,
270 .commit = nv50_dac_commit, 265 .commit = nv50_dac_commit,
271 .mode_set = nv50_dac_mode_set, 266 .mode_set = nv50_dac_mode_set,
272 .get_crtc = nv50_dac_crtc_get, 267 .get_crtc = nv50_dac_crtc_get,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 06de250fe61..7ba28e08ee3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
50 return 4; 50 return 4;
51} 51}
52 52
53static int
54evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
55{
56 int ret = 0;
57 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
58 nv_wr32(dev, 0x610304 + (ch * 0x08), data);
59 nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
60 if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
61 ret = -EBUSY;
62 if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
63 NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
64 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
65 return ret;
66}
67
53int 68int
54nv50_display_early_init(struct drm_device *dev) 69nv50_display_early_init(struct drm_device *dev)
55{ 70{
71 u32 ctrl = nv_rd32(dev, 0x610200);
72 int i;
73
74 /* check if master evo channel is already active, a good a sign as any
75 * that the display engine is in a weird state (hibernate/kexec), if
76 * it is, do our best to reset the display engine...
77 */
78 if ((ctrl & 0x00000003) == 0x00000003) {
79 NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
80
81 /* deactivate both heads first, PDISP will disappear forever
82 * (well, until you power cycle) on some boards as soon as
83 * PMC_ENABLE is hit unless they are..
84 */
85 for (i = 0; i < 2; i++) {
86 evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
87 evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
88 evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
89 evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
90 evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
91 evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
92 }
93 evo_icmd(dev, 0, 0x0080, 0);
94
95 /* reset PDISP */
96 nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
97 nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
98 }
99
56 return 0; 100 return 0;
57} 101}
58 102
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
62} 106}
63 107
64int 108int
65nv50_display_init(struct drm_device *dev) 109nv50_display_sync(struct drm_device *dev)
66{ 110{
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 112 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
69 struct drm_connector *connector; 113 struct nv50_display *disp = nv50_display(dev);
114 struct nouveau_channel *evo = disp->master;
115 u64 start;
116 int ret;
117
118 ret = RING_SPACE(evo, 6);
119 if (ret == 0) {
120 BEGIN_RING(evo, 0, 0x0084, 1);
121 OUT_RING (evo, 0x80000000);
122 BEGIN_RING(evo, 0, 0x0080, 1);
123 OUT_RING (evo, 0);
124 BEGIN_RING(evo, 0, 0x0084, 1);
125 OUT_RING (evo, 0x00000000);
126
127 nv_wo32(disp->ntfy, 0x000, 0x00000000);
128 FIRE_RING (evo);
129
130 start = ptimer->read(dev);
131 do {
132 if (nv_ro32(disp->ntfy, 0x000))
133 return 0;
134 } while (ptimer->read(dev) - start < 2000000000ULL);
135 }
136
137 return -EBUSY;
138}
139
140int
141nv50_display_init(struct drm_device *dev)
142{
70 struct nouveau_channel *evo; 143 struct nouveau_channel *evo;
71 int ret, i; 144 int ret, i;
72 u32 val; 145 u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
161 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | 234 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
162 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); 235 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
163 236
164 /* enable hotplug interrupts */
165 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
166 struct nouveau_connector *conn = nouveau_connector(connector);
167
168 if (conn->dcb->gpio_tag == 0xff)
169 continue;
170
171 pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
172 }
173
174 ret = nv50_evo_init(dev); 237 ret = nv50_evo_init(dev);
175 if (ret) 238 if (ret)
176 return ret; 239 return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
178 241
179 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 242 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
180 243
181 ret = RING_SPACE(evo, 15); 244 ret = RING_SPACE(evo, 3);
182 if (ret) 245 if (ret)
183 return ret; 246 return ret;
184 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2); 247 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
185 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED); 248 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
186 OUT_RING(evo, NvEvoSync); 249 OUT_RING (evo, NvEvoSync);
187 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
188 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
189 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
190 OUT_RING(evo, 0);
191 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
192 OUT_RING(evo, 0);
193 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
194 OUT_RING(evo, 0);
195 /* required to make display sync channels not hate life */
196 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
197 OUT_RING (evo, 0x00000311);
198 BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
199 OUT_RING (evo, 0x00000311);
200 FIRE_RING(evo);
201 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
202 NV_ERROR(dev, "evo pushbuf stalled\n");
203
204 250
205 return 0; 251 return nv50_display_sync(dev);
206} 252}
207 253
208static int nv50_display_disable(struct drm_device *dev) 254void
255nv50_display_fini(struct drm_device *dev)
209{ 256{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nv50_display *disp = nv50_display(dev); 257 struct nv50_display *disp = nv50_display(dev);
212 struct nouveau_channel *evo = disp->master; 258 struct nouveau_channel *evo = disp->master;
213 struct drm_crtc *drm_crtc; 259 struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
270 316
271 /* disable interrupts. */ 317 /* disable interrupts. */
272 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); 318 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
273
274 /* disable hotplug interrupts */
275 nv_wr32(dev, 0xe054, 0xffffffff);
276 nv_wr32(dev, 0xe050, 0x00000000);
277 if (dev_priv->chipset >= 0x90) {
278 nv_wr32(dev, 0xe074, 0xffffffff);
279 nv_wr32(dev, 0xe070, 0x00000000);
280 }
281 return 0;
282} 319}
283 320
284int nv50_display_create(struct drm_device *dev) 321int
322nv50_display_create(struct drm_device *dev)
285{ 323{
286 struct drm_nouveau_private *dev_priv = dev->dev_private; 324 struct drm_nouveau_private *dev_priv = dev->dev_private;
287 struct dcb_table *dcb = &dev_priv->vbios.dcb; 325 struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
341 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); 379 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
342 nouveau_irq_register(dev, 26, nv50_display_isr); 380 nouveau_irq_register(dev, 26, nv50_display_isr);
343 381
344 ret = nv50_display_init(dev); 382 ret = nv50_evo_create(dev);
345 if (ret) { 383 if (ret) {
346 nv50_display_destroy(dev); 384 nv50_display_destroy(dev);
347 return ret; 385 return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
357 395
358 NV_DEBUG_KMS(dev, "\n"); 396 NV_DEBUG_KMS(dev, "\n");
359 397
360 nv50_display_disable(dev); 398 nv50_evo_destroy(dev);
361 nouveau_irq_unregister(dev, 26); 399 nouveau_irq_unregister(dev, 26);
362 kfree(disp); 400 kfree(disp);
363} 401}
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
521 } else { 559 } else {
522 /* determine number of lvds links */ 560 /* determine number of lvds links */
523 if (nv_connector && nv_connector->edid && 561 if (nv_connector && nv_connector->edid &&
524 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) { 562 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
525 /* http://www.spwg.org */ 563 /* http://www.spwg.org */
526 if (((u8 *)nv_connector->edid)[121] == 2) 564 if (((u8 *)nv_connector->edid)[121] == 2)
527 script |= 0x0100; 565 script |= 0x0100;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
722 if (crtc >= 0) { 760 if (crtc >= 0) {
723 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); 761 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
724 pclk &= 0x003fffff; 762 pclk &= 0x003fffff;
725 763 if (pclk)
726 nv50_crtc_set_clock(dev, crtc, pclk); 764 nv50_crtc_set_clock(dev, crtc, pclk);
727 765
728 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); 766 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
729 tmp &= ~0x000000f; 767 tmp &= ~0x000000f;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c2da503a22a..95874f7c043 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
69void nv50_display_late_takedown(struct drm_device *dev); 69void nv50_display_late_takedown(struct drm_device *dev);
70int nv50_display_create(struct drm_device *dev); 70int nv50_display_create(struct drm_device *dev);
71int nv50_display_init(struct drm_device *dev); 71int nv50_display_init(struct drm_device *dev);
72void nv50_display_fini(struct drm_device *dev);
72void nv50_display_destroy(struct drm_device *dev); 73void nv50_display_destroy(struct drm_device *dev);
73int nv50_crtc_blank(struct nouveau_crtc *, bool blank); 74int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
74int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); 75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
75 76
77int nv50_display_sync(struct drm_device *);
76int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, 78int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
77 struct nouveau_channel *chan); 79 struct nouveau_channel *chan);
78void nv50_display_flip_stop(struct drm_crtc *); 80void nv50_display_flip_stop(struct drm_crtc *);
79 81
82int nv50_evo_create(struct drm_device *dev);
83void nv50_evo_destroy(struct drm_device *dev);
80int nv50_evo_init(struct drm_device *dev); 84int nv50_evo_init(struct drm_device *dev);
81void nv50_evo_fini(struct drm_device *dev); 85void nv50_evo_fini(struct drm_device *dev);
82void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base, 86void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c99d9751880..9b962e989d7 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
218 } 218 }
219} 219}
220 220
221static void 221void
222nv50_evo_destroy(struct drm_device *dev) 222nv50_evo_destroy(struct drm_device *dev)
223{ 223{
224 struct nv50_display *disp = nv50_display(dev); 224 struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
235 nv50_evo_channel_del(&disp->master); 235 nv50_evo_channel_del(&disp->master);
236} 236}
237 237
238static int 238int
239nv50_evo_create(struct drm_device *dev) 239nv50_evo_create(struct drm_device *dev)
240{ 240{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 241 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
388 struct nv50_display *disp = nv50_display(dev); 388 struct nv50_display *disp = nv50_display(dev);
389 int ret, i; 389 int ret, i;
390 390
391 if (!disp->master) {
392 ret = nv50_evo_create(dev);
393 if (ret)
394 return ret;
395 }
396
397 ret = nv50_evo_channel_init(disp->master); 391 ret = nv50_evo_channel_init(disp->master);
398 if (ret) 392 if (ret)
399 return ret; 393 return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
420 414
421 if (disp->master) 415 if (disp->master)
422 nv50_evo_channel_fini(disp->master); 416 nv50_evo_channel_fini(disp->master);
423
424 nv50_evo_destroy(dev);
425} 417}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index c34a074f7ea..3bc2a565c20 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
230 struct drm_device *dev = chan->dev; 230 struct drm_device *dev = chan->dev;
231 struct drm_nouveau_private *dev_priv = dev->dev_private; 231 struct drm_nouveau_private *dev_priv = dev->dev_private;
232 struct nouveau_gpuobj *ramfc = NULL; 232 struct nouveau_gpuobj *ramfc = NULL;
233 uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
233 unsigned long flags; 234 unsigned long flags;
234 int ret; 235 int ret;
235 236
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
280 nv_wo32(ramfc, 0x7c, 0x30000001); 281 nv_wo32(ramfc, 0x7c, 0x30000001);
281 nv_wo32(ramfc, 0x78, 0x00000000); 282 nv_wo32(ramfc, 0x78, 0x00000000);
282 nv_wo32(ramfc, 0x3c, 0x403f6078); 283 nv_wo32(ramfc, 0x3c, 0x403f6078);
283 nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4); 284 nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
284 nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); 285 nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
286 drm_order(chan->dma.ib_max + 1) << 16);
285 287
286 if (dev_priv->chipset != 0x50) { 288 if (dev_priv->chipset != 0x50) {
287 nv_wo32(chan->ramin, 0, chan->id); 289 nv_wo32(chan->ramin, 0, chan->id);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 793a5ccca12..f429e6a8ca7 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -25,229 +25,95 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28#include "nouveau_gpio.h"
28 29
29#include "nv50_display.h" 30#include "nv50_display.h"
30 31
31static void nv50_gpio_isr(struct drm_device *dev);
32static void nv50_gpio_isr_bh(struct work_struct *work);
33
34struct nv50_gpio_priv {
35 struct list_head handlers;
36 spinlock_t lock;
37};
38
39struct nv50_gpio_handler {
40 struct drm_device *dev;
41 struct list_head head;
42 struct work_struct work;
43 bool inhibit;
44
45 struct dcb_gpio_entry *gpio;
46
47 void (*handler)(void *data, int state);
48 void *data;
49};
50
51static int 32static int
52nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) 33nv50_gpio_location(int line, u32 *reg, u32 *shift)
53{ 34{
54 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 35 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
55 36
56 if (gpio->line >= 32) 37 if (line >= 32)
57 return -EINVAL; 38 return -EINVAL;
58 39
59 *reg = nv50_gpio_reg[gpio->line >> 3]; 40 *reg = nv50_gpio_reg[line >> 3];
60 *shift = (gpio->line & 7) << 2; 41 *shift = (line & 7) << 2;
61 return 0; 42 return 0;
62} 43}
63 44
64int 45int
65nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) 46nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
66{ 47{
67 struct dcb_gpio_entry *gpio; 48 u32 reg, shift;
68 uint32_t r, s, v;
69
70 gpio = nouveau_bios_gpio_entry(dev, tag);
71 if (!gpio)
72 return -ENOENT;
73 49
74 if (nv50_gpio_location(gpio, &r, &s)) 50 if (nv50_gpio_location(line, &reg, &shift))
75 return -EINVAL; 51 return -EINVAL;
76 52
77 v = nv_rd32(dev, r) >> (s + 2); 53 nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
78 return ((v & 1) == (gpio->state[1] & 1)); 54 return 0;
79} 55}
80 56
81int 57int
82nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) 58nv50_gpio_sense(struct drm_device *dev, int line)
83{ 59{
84 struct dcb_gpio_entry *gpio; 60 u32 reg, shift;
85 uint32_t r, s, v;
86
87 gpio = nouveau_bios_gpio_entry(dev, tag);
88 if (!gpio)
89 return -ENOENT;
90 61
91 if (nv50_gpio_location(gpio, &r, &s)) 62 if (nv50_gpio_location(line, &reg, &shift))
92 return -EINVAL; 63 return -EINVAL;
93 64
94 v = nv_rd32(dev, r) & ~(0x3 << s); 65 return !!(nv_rd32(dev, reg) & (4 << shift));
95 v |= (gpio->state[state] ^ 2) << s;
96 nv_wr32(dev, r, v);
97 return 0;
98} 66}
99 67
100int 68void
101nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) 69nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
102{ 70{
103 struct dcb_gpio_entry *gpio; 71 u32 reg = line < 16 ? 0xe050 : 0xe070;
104 u32 v; 72 u32 mask = 0x00010001 << (line & 0xf);
105
106 gpio = nouveau_bios_gpio_entry(dev, tag);
107 if (!gpio)
108 return -ENOENT;
109 73
110 v = nv_rd32(dev, 0x00d610 + (gpio->line * 4)); 74 nv_wr32(dev, reg + 4, mask);
111 v &= 0x00004000; 75 nv_mask(dev, reg + 0, mask, on ? mask : 0);
112 return (!!v == (gpio->state[1] & 1));
113} 76}
114 77
115int 78int
116nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) 79nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
117{ 80{
118 struct dcb_gpio_entry *gpio; 81 u32 data = ((dir ^ 1) << 13) | (out << 12);
119 u32 v; 82 nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
120 83 nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
121 gpio = nouveau_bios_gpio_entry(dev, tag);
122 if (!gpio)
123 return -ENOENT;
124
125 v = gpio->state[state] ^ 2;
126
127 nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
128 return 0; 84 return 0;
129} 85}
130 86
131int 87int
132nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, 88nvd0_gpio_sense(struct drm_device *dev, int line)
133 void (*handler)(void *, int), void *data)
134{ 89{
135 struct drm_nouveau_private *dev_priv = dev->dev_private; 90 return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
136 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
137 struct nv50_gpio_priv *priv = pgpio->priv;
138 struct nv50_gpio_handler *gpioh;
139 struct dcb_gpio_entry *gpio;
140 unsigned long flags;
141
142 gpio = nouveau_bios_gpio_entry(dev, tag);
143 if (!gpio)
144 return -ENOENT;
145
146 gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
147 if (!gpioh)
148 return -ENOMEM;
149
150 INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
151 gpioh->dev = dev;
152 gpioh->gpio = gpio;
153 gpioh->handler = handler;
154 gpioh->data = data;
155
156 spin_lock_irqsave(&priv->lock, flags);
157 list_add(&gpioh->head, &priv->handlers);
158 spin_unlock_irqrestore(&priv->lock, flags);
159 return 0;
160} 91}
161 92
162void 93static void
163nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, 94nv50_gpio_isr(struct drm_device *dev)
164 void (*handler)(void *, int), void *data)
165{
166 struct drm_nouveau_private *dev_priv = dev->dev_private;
167 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
168 struct nv50_gpio_priv *priv = pgpio->priv;
169 struct nv50_gpio_handler *gpioh, *tmp;
170 struct dcb_gpio_entry *gpio;
171 LIST_HEAD(tofree);
172 unsigned long flags;
173
174 gpio = nouveau_bios_gpio_entry(dev, tag);
175 if (!gpio)
176 return;
177
178 spin_lock_irqsave(&priv->lock, flags);
179 list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
180 if (gpioh->gpio != gpio ||
181 gpioh->handler != handler ||
182 gpioh->data != data)
183 continue;
184 list_move(&gpioh->head, &tofree);
185 }
186 spin_unlock_irqrestore(&priv->lock, flags);
187
188 list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
189 flush_work_sync(&gpioh->work);
190 kfree(gpioh);
191 }
192}
193
194bool
195nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
196{
197 struct dcb_gpio_entry *gpio;
198 u32 reg, mask;
199
200 gpio = nouveau_bios_gpio_entry(dev, tag);
201 if (!gpio)
202 return false;
203
204 reg = gpio->line < 16 ? 0xe050 : 0xe070;
205 mask = 0x00010001 << (gpio->line & 0xf);
206
207 nv_wr32(dev, reg + 4, mask);
208 reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
209 return (reg & mask) == mask;
210}
211
212static int
213nv50_gpio_create(struct drm_device *dev)
214{ 95{
215 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
216 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 97 u32 intr0, intr1 = 0;
217 struct nv50_gpio_priv *priv; 98 u32 hi, lo;
218
219 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
220 if (!priv)
221 return -ENOMEM;
222 99
223 INIT_LIST_HEAD(&priv->handlers); 100 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
224 spin_lock_init(&priv->lock); 101 if (dev_priv->chipset >= 0x90)
225 pgpio->priv = priv; 102 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
226 return 0;
227}
228 103
229static void 104 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
230nv50_gpio_destroy(struct drm_device *dev) 105 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
231{ 106 nouveau_gpio_isr(dev, 0, hi | lo);
232 struct drm_nouveau_private *dev_priv = dev->dev_private;
233 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
234 107
235 kfree(pgpio->priv); 108 nv_wr32(dev, 0xe054, intr0);
236 pgpio->priv = NULL; 109 if (dev_priv->chipset >= 0x90)
110 nv_wr32(dev, 0xe074, intr1);
237} 111}
238 112
239int 113int
240nv50_gpio_init(struct drm_device *dev) 114nv50_gpio_init(struct drm_device *dev)
241{ 115{
242 struct drm_nouveau_private *dev_priv = dev->dev_private; 116 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
244 int ret;
245
246 if (!pgpio->priv) {
247 ret = nv50_gpio_create(dev);
248 if (ret)
249 return ret;
250 }
251 117
252 /* disable, and ack any pending gpio interrupts */ 118 /* disable, and ack any pending gpio interrupts */
253 nv_wr32(dev, 0xe050, 0x00000000); 119 nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
270 if (dev_priv->chipset >= 0x90) 136 if (dev_priv->chipset >= 0x90)
271 nv_wr32(dev, 0xe070, 0x00000000); 137 nv_wr32(dev, 0xe070, 0x00000000);
272 nouveau_irq_unregister(dev, 21); 138 nouveau_irq_unregister(dev, 21);
273
274 nv50_gpio_destroy(dev);
275}
276
277static void
278nv50_gpio_isr_bh(struct work_struct *work)
279{
280 struct nv50_gpio_handler *gpioh =
281 container_of(work, struct nv50_gpio_handler, work);
282 struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
283 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
284 struct nv50_gpio_priv *priv = pgpio->priv;
285 unsigned long flags;
286 int state;
287
288 state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
289 if (state < 0)
290 return;
291
292 gpioh->handler(gpioh->data, state);
293
294 spin_lock_irqsave(&priv->lock, flags);
295 gpioh->inhibit = false;
296 spin_unlock_irqrestore(&priv->lock, flags);
297}
298
299static void
300nv50_gpio_isr(struct drm_device *dev)
301{
302 struct drm_nouveau_private *dev_priv = dev->dev_private;
303 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
304 struct nv50_gpio_priv *priv = pgpio->priv;
305 struct nv50_gpio_handler *gpioh;
306 u32 intr0, intr1 = 0;
307 u32 hi, lo, ch;
308
309 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
310 if (dev_priv->chipset >= 0x90)
311 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
312
313 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
314 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
315 ch = hi | lo;
316
317 nv_wr32(dev, 0xe054, intr0);
318 if (dev_priv->chipset >= 0x90)
319 nv_wr32(dev, 0xe074, intr1);
320
321 spin_lock(&priv->lock);
322 list_for_each_entry(gpioh, &priv->handlers, head) {
323 if (!(ch & (1 << gpioh->gpio->line)))
324 continue;
325
326 if (gpioh->inhibit)
327 continue;
328 gpioh->inhibit = true;
329
330 schedule_work(&gpioh->work);
331 }
332 spin_unlock(&priv->lock);
333} 139}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ac601f7c4e1..33d5711a918 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
616 } 616 }
617 break; 617 break;
618 case 7: /* MP error */ 618 case 7: /* MP error */
619 if (ustatus & 0x00010000) { 619 if (ustatus & 0x04030000) {
620 nv50_pgraph_mp_trap(dev, i, display); 620 nv50_pgraph_mp_trap(dev, i, display);
621 ustatus &= ~0x00010000; 621 ustatus &= ~0x04030000;
622 } 622 }
623 break; 623 break;
624 case 8: /* TPDMA error */ 624 case 8: /* TPDMA error */
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 3d5a86b9828..03937212e9d 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,122 +25,745 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_hw.h"
28#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h"
29 31
30struct nv50_pm_state { 32enum clk_src {
31 struct nouveau_pm_level *perflvl; 33 clk_src_crystal,
32 struct pll_lims pll; 34 clk_src_href,
33 enum pll_types type; 35 clk_src_hclk,
34 int N, M, P; 36 clk_src_hclkm3,
37 clk_src_hclkm3d2,
38 clk_src_host,
39 clk_src_nvclk,
40 clk_src_sclk,
41 clk_src_mclk,
42 clk_src_vdec,
43 clk_src_dom6
35}; 44};
36 45
46static u32 read_clk(struct drm_device *, enum clk_src);
47
48static u32
49read_div(struct drm_device *dev)
50{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52
53 switch (dev_priv->chipset) {
54 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
55 case 0x84:
56 case 0x86:
57 case 0x98:
58 case 0xa0:
59 return nv_rd32(dev, 0x004700);
60 case 0x92:
61 case 0x94:
62 case 0x96:
63 return nv_rd32(dev, 0x004800);
64 default:
65 return 0x00000000;
66 }
67}
68
69static u32
70read_pll_src(struct drm_device *dev, u32 base)
71{
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 u32 coef, ref = read_clk(dev, clk_src_crystal);
74 u32 rsel = nv_rd32(dev, 0x00e18c);
75 int P, N, M, id;
76
77 switch (dev_priv->chipset) {
78 case 0x50:
79 case 0xa0:
80 switch (base) {
81 case 0x4020:
82 case 0x4028: id = !!(rsel & 0x00000004); break;
83 case 0x4008: id = !!(rsel & 0x00000008); break;
84 case 0x4030: id = 0; break;
85 default:
86 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
87 return 0;
88 }
89
90 coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
91 ref *= (coef & 0x01000000) ? 2 : 4;
92 P = (coef & 0x00070000) >> 16;
93 N = ((coef & 0x0000ff00) >> 8) + 1;
94 M = ((coef & 0x000000ff) >> 0) + 1;
95 break;
96 case 0x84:
97 case 0x86:
98 case 0x92:
99 coef = nv_rd32(dev, 0x00e81c);
100 P = (coef & 0x00070000) >> 16;
101 N = (coef & 0x0000ff00) >> 8;
102 M = (coef & 0x000000ff) >> 0;
103 break;
104 case 0x94:
105 case 0x96:
106 case 0x98:
107 rsel = nv_rd32(dev, 0x00c050);
108 switch (base) {
109 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
110 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
111 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
112 case 0x4030: rsel = 3; break;
113 default:
114 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
115 return 0;
116 }
117
118 switch (rsel) {
119 case 0: id = 1; break;
120 case 1: return read_clk(dev, clk_src_crystal);
121 case 2: return read_clk(dev, clk_src_href);
122 case 3: id = 0; break;
123 }
124
125 coef = nv_rd32(dev, 0x00e81c + (id * 0x28));
126 P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
127 P += (coef & 0x00070000) >> 16;
128 N = (coef & 0x0000ff00) >> 8;
129 M = (coef & 0x000000ff) >> 0;
130 break;
131 default:
132 BUG_ON(1);
133 }
134
135 if (M)
136 return (ref * N / M) >> P;
137 return 0;
138}
139
140static u32
141read_pll_ref(struct drm_device *dev, u32 base)
142{
143 u32 src, mast = nv_rd32(dev, 0x00c040);
144
145 switch (base) {
146 case 0x004028:
147 src = !!(mast & 0x00200000);
148 break;
149 case 0x004020:
150 src = !!(mast & 0x00400000);
151 break;
152 case 0x004008:
153 src = !!(mast & 0x00010000);
154 break;
155 case 0x004030:
156 src = !!(mast & 0x02000000);
157 break;
158 case 0x00e810:
159 return read_clk(dev, clk_src_crystal);
160 default:
161 NV_ERROR(dev, "bad pll 0x%06x\n", base);
162 return 0;
163 }
164
165 if (src)
166 return read_clk(dev, clk_src_href);
167 return read_pll_src(dev, base);
168}
169
170static u32
171read_pll(struct drm_device *dev, u32 base)
172{
173 struct drm_nouveau_private *dev_priv = dev->dev_private;
174 u32 mast = nv_rd32(dev, 0x00c040);
175 u32 ctrl = nv_rd32(dev, base + 0);
176 u32 coef = nv_rd32(dev, base + 4);
177 u32 ref = read_pll_ref(dev, base);
178 u32 clk = 0;
179 int N1, N2, M1, M2;
180
181 if (base == 0x004028 && (mast & 0x00100000)) {
182 /* wtf, appears to only disable post-divider on nva0 */
183 if (dev_priv->chipset != 0xa0)
184 return read_clk(dev, clk_src_dom6);
185 }
186
187 N2 = (coef & 0xff000000) >> 24;
188 M2 = (coef & 0x00ff0000) >> 16;
189 N1 = (coef & 0x0000ff00) >> 8;
190 M1 = (coef & 0x000000ff);
191 if ((ctrl & 0x80000000) && M1) {
192 clk = ref * N1 / M1;
193 if ((ctrl & 0x40000100) == 0x40000000) {
194 if (M2)
195 clk = clk * N2 / M2;
196 else
197 clk = 0;
198 }
199 }
200
201 return clk;
202}
203
204static u32
205read_clk(struct drm_device *dev, enum clk_src src)
206{
207 struct drm_nouveau_private *dev_priv = dev->dev_private;
208 u32 mast = nv_rd32(dev, 0x00c040);
209 u32 P = 0;
210
211 switch (src) {
212 case clk_src_crystal:
213 return dev_priv->crystal;
214 case clk_src_href:
215 return 100000; /* PCIE reference clock */
216 case clk_src_hclk:
217 return read_clk(dev, clk_src_href) * 27778 / 10000;
218 case clk_src_hclkm3:
219 return read_clk(dev, clk_src_hclk) * 3;
220 case clk_src_hclkm3d2:
221 return read_clk(dev, clk_src_hclk) * 3 / 2;
222 case clk_src_host:
223 switch (mast & 0x30000000) {
224 case 0x00000000: return read_clk(dev, clk_src_href);
225 case 0x10000000: break;
226 case 0x20000000: /* !0x50 */
227 case 0x30000000: return read_clk(dev, clk_src_hclk);
228 }
229 break;
230 case clk_src_nvclk:
231 if (!(mast & 0x00100000))
232 P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
233 switch (mast & 0x00000003) {
234 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
235 case 0x00000001: return read_clk(dev, clk_src_dom6);
236 case 0x00000002: return read_pll(dev, 0x004020) >> P;
237 case 0x00000003: return read_pll(dev, 0x004028) >> P;
238 }
239 break;
240 case clk_src_sclk:
241 P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
242 switch (mast & 0x00000030) {
243 case 0x00000000:
244 if (mast & 0x00000080)
245 return read_clk(dev, clk_src_host) >> P;
246 return read_clk(dev, clk_src_crystal) >> P;
247 case 0x00000010: break;
248 case 0x00000020: return read_pll(dev, 0x004028) >> P;
249 case 0x00000030: return read_pll(dev, 0x004020) >> P;
250 }
251 break;
252 case clk_src_mclk:
253 P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
254 if (nv_rd32(dev, 0x004008) & 0x00000200) {
255 switch (mast & 0x0000c000) {
256 case 0x00000000:
257 return read_clk(dev, clk_src_crystal) >> P;
258 case 0x00008000:
259 case 0x0000c000:
260 return read_clk(dev, clk_src_href) >> P;
261 }
262 } else {
263 return read_pll(dev, 0x004008) >> P;
264 }
265 break;
266 case clk_src_vdec:
267 P = (read_div(dev) & 0x00000700) >> 8;
268 switch (dev_priv->chipset) {
269 case 0x84:
270 case 0x86:
271 case 0x92:
272 case 0x94:
273 case 0x96:
274 case 0xa0:
275 switch (mast & 0x00000c00) {
276 case 0x00000000:
277 if (dev_priv->chipset == 0xa0) /* wtf?? */
278 return read_clk(dev, clk_src_nvclk) >> P;
279 return read_clk(dev, clk_src_crystal) >> P;
280 case 0x00000400:
281 return 0;
282 case 0x00000800:
283 if (mast & 0x01000000)
284 return read_pll(dev, 0x004028) >> P;
285 return read_pll(dev, 0x004030) >> P;
286 case 0x00000c00:
287 return read_clk(dev, clk_src_nvclk) >> P;
288 }
289 break;
290 case 0x98:
291 switch (mast & 0x00000c00) {
292 case 0x00000000:
293 return read_clk(dev, clk_src_nvclk) >> P;
294 case 0x00000400:
295 return 0;
296 case 0x00000800:
297 return read_clk(dev, clk_src_hclkm3d2) >> P;
298 case 0x00000c00:
299 return read_clk(dev, clk_src_mclk) >> P;
300 }
301 break;
302 }
303 break;
304 case clk_src_dom6:
305 switch (dev_priv->chipset) {
306 case 0x50:
307 case 0xa0:
308 return read_pll(dev, 0x00e810) >> 2;
309 case 0x84:
310 case 0x86:
311 case 0x92:
312 case 0x94:
313 case 0x96:
314 case 0x98:
315 P = (read_div(dev) & 0x00000007) >> 0;
316 switch (mast & 0x0c000000) {
317 case 0x00000000: return read_clk(dev, clk_src_href);
318 case 0x04000000: break;
319 case 0x08000000: return read_clk(dev, clk_src_hclk);
320 case 0x0c000000:
321 return read_clk(dev, clk_src_hclkm3) >> P;
322 }
323 break;
324 default:
325 break;
326 }
327 default:
328 break;
329 }
330
331 NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
332 return 0;
333}
334
37int 335int
38nv50_pm_clock_get(struct drm_device *dev, u32 id) 336nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
39{ 337{
40 struct pll_lims pll; 338 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 int P, N, M, ret; 339 if (dev_priv->chipset == 0xaa ||
42 u32 reg0, reg1; 340 dev_priv->chipset == 0xac)
341 return 0;
43 342
44 ret = get_pll_limits(dev, id, &pll); 343 perflvl->core = read_clk(dev, clk_src_nvclk);
344 perflvl->shader = read_clk(dev, clk_src_sclk);
345 perflvl->memory = read_clk(dev, clk_src_mclk);
346 if (dev_priv->chipset != 0x50) {
347 perflvl->vdec = read_clk(dev, clk_src_vdec);
348 perflvl->dom6 = read_clk(dev, clk_src_dom6);
349 }
350
351 return 0;
352}
353
354struct nv50_pm_state {
355 struct hwsq_ucode mclk_hwsq;
356 u32 mscript;
357
358 u32 emast;
359 u32 nctrl;
360 u32 ncoef;
361 u32 sctrl;
362 u32 scoef;
363
364 u32 amast;
365 u32 pdivs;
366};
367
368static u32
369calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
370 u32 clk, int *N1, int *M1, int *log2P)
371{
372 struct nouveau_pll_vals coef;
373 int ret;
374
375 ret = get_pll_limits(dev, reg, pll);
45 if (ret) 376 if (ret)
46 return ret; 377 return 0;
378
379 pll->vco2.maxfreq = 0;
380 pll->refclk = read_pll_ref(dev, reg);
381 if (!pll->refclk)
382 return 0;
47 383
48 reg0 = nv_rd32(dev, pll.reg + 0); 384 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
49 reg1 = nv_rd32(dev, pll.reg + 4); 385 if (ret == 0)
386 return 0;
50 387
51 if ((reg0 & 0x80000000) == 0) { 388 *N1 = coef.N1;
52 if (id == PLL_SHADER) { 389 *M1 = coef.M1;
53 NV_DEBUG(dev, "Shader PLL is disabled. " 390 *log2P = coef.log2P;
54 "Shader clock is twice the core\n"); 391 return ret;
55 ret = nv50_pm_clock_get(dev, PLL_CORE); 392}
56 if (ret > 0) 393
57 return ret << 1; 394static inline u32
58 } else if (id == PLL_MEMORY) { 395calc_div(u32 src, u32 target, int *div)
59 NV_DEBUG(dev, "Memory PLL is disabled. " 396{
60 "Memory clock is equal to the ref_clk\n"); 397 u32 clk0 = src, clk1 = src;
61 return pll.refclk; 398 for (*div = 0; *div <= 7; (*div)++) {
399 if (clk0 <= target) {
400 clk1 = clk0 << (*div ? 1 : 0);
401 break;
62 } 402 }
403 clk0 >>= 1;
404 }
405
406 if (target - clk0 <= clk1 - target)
407 return clk0;
408 (*div)--;
409 return clk1;
410}
411
412static inline u32
413clk_same(u32 a, u32 b)
414{
415 return ((a / 1000) == (b / 1000));
416}
417
418static int
419calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 struct pll_lims pll;
423 u32 mast = nv_rd32(dev, 0x00c040);
424 u32 ctrl = nv_rd32(dev, 0x004008);
425 u32 coef = nv_rd32(dev, 0x00400c);
426 u32 orig = ctrl;
427 u32 crtc_mask = 0;
428 int N, M, P;
429 int ret, i;
430
431 /* use pcie refclock if possible, otherwise use mpll */
432 ctrl &= ~0x81ff0200;
433 if (clk_same(freq, read_clk(dev, clk_src_href))) {
434 ctrl |= 0x00000200 | (pll.log2p_bias << 19);
435 } else {
436 ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
437 if (ret == 0)
438 return -EINVAL;
439
440 ctrl |= 0x80000000 | (P << 22) | (P << 16);
441 ctrl |= pll.log2p_bias << 19;
442 coef = (N << 8) | M;
443 }
444
445 mast &= ~0xc0000000; /* get MCLK_2 from HREF */
446 mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
447
448 /* determine active crtcs */
449 for (i = 0; i < 2; i++) {
450 if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
451 crtc_mask |= (1 << i);
452 }
453
454 /* build the ucode which will reclock the memory for us */
455 hwsq_init(hwsq);
456 if (crtc_mask) {
457 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
458 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
63 } 459 }
460 if (dev_priv->chipset >= 0x92)
461 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
462 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
463 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
464
465 /* prepare memory controller */
466 hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
467 hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
468 hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
469 hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
64 470
65 P = (reg0 & 0x00070000) >> 16; 471 /* reclock memory */
66 N = (reg1 & 0x0000ff00) >> 8; 472 hwsq_wr32(hwsq, 0xc040, mast);
67 M = (reg1 & 0x000000ff); 473 hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
474 hwsq_wr32(hwsq, 0x400c, coef);
475 hwsq_wr32(hwsq, 0x4008, ctrl);
68 476
69 return ((pll.refclk * N / M) >> P); 477 /* restart memory controller */
478 hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
479 hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
480 hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
481 hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
482
483 hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
484 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
485 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
486 if (dev_priv->chipset >= 0x92)
487 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
488 hwsq_fini(hwsq);
489 return 0;
70} 490}
71 491
72void * 492void *
73nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 493nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
74 u32 id, int khz)
75{ 494{
76 struct nv50_pm_state *state; 495 struct drm_nouveau_private *dev_priv = dev->dev_private;
77 int dummy, ret; 496 struct nv50_pm_state *info;
497 struct pll_lims pll;
498 int ret = -EINVAL;
499 int N, M, P1, P2;
500 u32 clk, out;
78 501
79 state = kzalloc(sizeof(*state), GFP_KERNEL); 502 if (dev_priv->chipset == 0xaa ||
80 if (!state) 503 dev_priv->chipset == 0xac)
504 return ERR_PTR(-ENODEV);
505
506 info = kmalloc(sizeof(*info), GFP_KERNEL);
507 if (!info)
81 return ERR_PTR(-ENOMEM); 508 return ERR_PTR(-ENOMEM);
82 state->type = id;
83 state->perflvl = perflvl;
84 509
85 ret = get_pll_limits(dev, id, &state->pll); 510 /* core: for the moment at least, always use nvpll */
86 if (ret < 0) { 511 clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
87 kfree(state); 512 if (clk == 0)
88 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 513 goto error;
514
515 info->emast = 0x00000003;
516 info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
517 info->ncoef = (N << 8) | M;
518
519 /* shader: tie to nvclk if possible, otherwise use spll. have to be
520 * very careful that the shader clock is at least twice the core, or
521 * some chipsets will be very unhappy. i expect most or all of these
522 * cases will be handled by tying to nvclk, but it's possible there's
523 * corners
524 */
525 if (P1-- && perflvl->shader == (perflvl->core << 1)) {
526 info->emast |= 0x00000020;
527 info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16);
528 info->scoef = nv_rd32(dev, 0x004024);
529 } else {
530 clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
531 if (clk == 0)
532 goto error;
533
534 info->emast |= 0x00000030;
535 info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
536 info->scoef = (N << 8) | M;
537 }
538
539 /* memory: build hwsq ucode which we'll use to reclock memory */
540 info->mclk_hwsq.len = 0;
541 if (perflvl->memory) {
542 clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
543 if (clk < 0) {
544 ret = clk;
545 goto error;
546 }
547
548 info->mscript = perflvl->memscript;
549 }
550
551 /* vdec: avoid modifying xpll until we know exactly how the other
552 * clock domains work, i suspect at least some of them can also be
553 * tied to xpll...
554 */
555 info->amast = nv_rd32(dev, 0x00c040);
556 info->pdivs = read_div(dev);
557 if (perflvl->vdec) {
558 /* see how close we can get using nvclk as a source */
559 clk = calc_div(perflvl->core, perflvl->vdec, &P1);
560
561 /* see how close we can get using xpll/hclk as a source */
562 if (dev_priv->chipset != 0x98)
563 out = read_pll(dev, 0x004030);
564 else
565 out = read_clk(dev, clk_src_hclkm3d2);
566 out = calc_div(out, perflvl->vdec, &P2);
567
568 /* select whichever gets us closest */
569 info->amast &= ~0x00000c00;
570 info->pdivs &= ~0x00000700;
571 if (abs((int)perflvl->vdec - clk) <=
572 abs((int)perflvl->vdec - out)) {
573 if (dev_priv->chipset != 0x98)
574 info->amast |= 0x00000c00;
575 info->pdivs |= P1 << 8;
576 } else {
577 info->amast |= 0x00000800;
578 info->pdivs |= P2 << 8;
579 }
580 }
581
582 /* dom6: nfi what this is, but we're limited to various combinations
583 * of the host clock frequency
584 */
585 if (perflvl->dom6) {
586 info->amast &= ~0x0c000000;
587 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
588 info->amast |= 0x00000000;
589 } else
590 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
591 info->amast |= 0x08000000;
592 } else {
593 clk = read_clk(dev, clk_src_hclk) * 3;
594 clk = calc_div(clk, perflvl->dom6, &P1);
595
596 info->amast |= 0x0c000000;
597 info->pdivs = (info->pdivs & ~0x00000007) | P1;
598 }
89 } 599 }
90 600
91 ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M, 601 return info;
92 &dummy, &dummy, &state->P); 602error:
93 if (ret < 0) { 603 kfree(info);
94 kfree(state); 604 return ERR_PTR(ret);
95 return ERR_PTR(ret); 605}
606
607static int
608prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
609{
610 struct drm_nouveau_private *dev_priv = dev->dev_private;
611 u32 hwsq_data, hwsq_kick;
612 int i;
613
614 if (dev_priv->chipset < 0x90) {
615 hwsq_data = 0x001400;
616 hwsq_kick = 0x00000003;
617 } else {
618 hwsq_data = 0x080000;
619 hwsq_kick = 0x00000001;
96 } 620 }
97 621
98 return state; 622 /* upload hwsq ucode */
623 nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
624 nv_wr32(dev, 0x001304, 0x00000000);
625 for (i = 0; i < hwsq->len / 4; i++)
626 nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
627 nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
628
629 /* launch, and wait for completion */
630 nv_wr32(dev, 0x00130c, hwsq_kick);
631 if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
632 NV_ERROR(dev, "hwsq ucode exec timed out\n");
633 NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
634 for (i = 0; i < hwsq->len / 4; i++) {
635 NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
636 nv_rd32(dev, 0x001400 + (i * 4)));
637 }
638
639 return -EIO;
640 }
641
642 return 0;
99} 643}
100 644
101void 645int
102nv50_pm_clock_set(struct drm_device *dev, void *pre_state) 646nv50_pm_clocks_set(struct drm_device *dev, void *data)
103{ 647{
104 struct nv50_pm_state *state = pre_state; 648 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_pm_level *perflvl = state->perflvl; 649 struct nv50_pm_state *info = data;
106 u32 reg = state->pll.reg, tmp; 650 struct bit_entry M;
107 struct bit_entry BIT_M; 651 int ret = 0;
108 u16 script;
109 int N = state->N;
110 int M = state->M;
111 int P = state->P;
112 652
113 if (state->type == PLL_MEMORY && perflvl->memscript && 653 /* halt and idle execution engines */
114 bit_table(dev, 'M', &BIT_M) == 0 && 654 nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
115 BIT_M.version == 1 && BIT_M.length >= 0x0b) { 655 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
116 script = ROM16(BIT_M.data[0x05]); 656 goto error;
117 if (script)
118 nouveau_bios_run_init_table(dev, script, NULL, -1);
119 script = ROM16(BIT_M.data[0x07]);
120 if (script)
121 nouveau_bios_run_init_table(dev, script, NULL, -1);
122 script = ROM16(BIT_M.data[0x09]);
123 if (script)
124 nouveau_bios_run_init_table(dev, script, NULL, -1);
125 657
126 nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1); 658 /* memory: it is *very* important we change this first, the ucode
659 * we build in pre() now has hardcoded 0xc040 values, which can't
660 * change before we execute it or the engine clocks may end up
661 * messed up.
662 */
663 if (info->mclk_hwsq.len) {
664 /* execute some scripts that do ??? from the vbios.. */
665 if (!bit_table(dev, 'M', &M) && M.version == 1) {
666 if (M.length >= 6)
667 nouveau_bios_init_exec(dev, ROM16(M.data[5]));
668 if (M.length >= 8)
669 nouveau_bios_init_exec(dev, ROM16(M.data[7]));
670 if (M.length >= 10)
671 nouveau_bios_init_exec(dev, ROM16(M.data[9]));
672 nouveau_bios_init_exec(dev, info->mscript);
673 }
674
675 ret = prog_mclk(dev, &info->mclk_hwsq);
676 if (ret)
677 goto resume;
127 } 678 }
128 679
129 if (state->type == PLL_MEMORY) { 680 /* reclock vdec/dom6 */
130 nv_wr32(dev, 0x100210, 0); 681 nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
131 nv_wr32(dev, 0x1002dc, 1); 682 switch (dev_priv->chipset) {
683 case 0x92:
684 case 0x94:
685 case 0x96:
686 nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
687 break;
688 default:
689 nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
690 break;
132 } 691 }
692 nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
133 693
134 tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff; 694 /* core/shader: make sure sclk/nvclk are disconnected from their
135 tmp |= 0x80000000 | (P << 16); 695 * plls (nvclk to dom6, sclk to hclk), modify the plls, and
136 nv_wr32(dev, reg + 0, tmp); 696 * reconnect sclk/nvclk to their new clock source
137 nv_wr32(dev, reg + 4, (N << 8) | M); 697 */
698 if (dev_priv->chipset < 0x92)
699 nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
700 else
701 nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
702 nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
703 nv_wr32(dev, 0x004024, info->scoef);
704 nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
705 nv_wr32(dev, 0x00402c, info->ncoef);
706 nv_mask(dev, 0x00c040, 0x00100033, info->emast);
707
708 goto resume;
709error:
710 ret = -EBUSY;
711resume:
712 nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
713 kfree(info);
714 return ret;
715}
138 716
139 if (state->type == PLL_MEMORY) { 717static int
140 nv_wr32(dev, 0x1002dc, 0); 718pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
141 nv_wr32(dev, 0x100210, 0x80000000); 719{
720 if (*line == 0x04) {
721 *ctrl = 0x00e100;
722 *line = 4;
723 *indx = 0;
724 } else
725 if (*line == 0x09) {
726 *ctrl = 0x00e100;
727 *line = 9;
728 *indx = 1;
729 } else
730 if (*line == 0x10) {
731 *ctrl = 0x00e28c;
732 *line = 0;
733 *indx = 0;
734 } else {
735 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
736 return -ENODEV;
737 }
738
739 return 0;
740}
741
742int
743nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
744{
745 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
746 if (ret)
747 return ret;
748
749 if (nv_rd32(dev, ctrl) & (1 << line)) {
750 *divs = nv_rd32(dev, 0x00e114 + (id * 8));
751 *duty = nv_rd32(dev, 0x00e118 + (id * 8));
752 return 0;
142 } 753 }
143 754
144 kfree(state); 755 return -EINVAL;
145} 756}
146 757
758int
759nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
760{
761 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
762 if (ret)
763 return ret;
764
765 nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
766 nv_wr32(dev, 0x00e114 + (id * 8), divs);
767 nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
768 return 0;
769}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 2633aa8554e..c4423ba9c9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
60 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 60 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
61 OUT_RING (evo, 0); 61 OUT_RING (evo, 0);
62 62
63 nouveau_hdmi_mode_set(encoder, NULL);
64
63 nv_encoder->crtc = NULL; 65 nv_encoder->crtc = NULL;
64 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; 66 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
65} 67}
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
172static void 174static void
173nv50_sor_prepare(struct drm_encoder *encoder) 175nv50_sor_prepare(struct drm_encoder *encoder)
174{ 176{
177 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
178 nv50_sor_disconnect(encoder);
179 if (nv_encoder->dcb->type == OUTPUT_DP) {
180 /* avoid race between link training and supervisor intr */
181 nv50_display_sync(encoder->dev);
182 }
175} 183}
176 184
177static void 185static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
180} 188}
181 189
182static void 190static void
183nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 191nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
184 struct drm_display_mode *adjusted_mode) 192 struct drm_display_mode *mode)
185{ 193{
186 struct nouveau_channel *evo = nv50_display(encoder->dev)->master; 194 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
187 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 195 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
193 201
194 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", 202 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
195 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 203 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
204 nv_encoder->crtc = encoder->crtc;
196 205
197 switch (nv_encoder->dcb->type) { 206 switch (nv_encoder->dcb->type) {
198 case OUTPUT_TMDS: 207 case OUTPUT_TMDS:
199 if (nv_encoder->dcb->sorconf.link & 1) { 208 if (nv_encoder->dcb->sorconf.link & 1) {
200 if (adjusted_mode->clock < 165000) 209 if (mode->clock < 165000)
201 mode_ctl = 0x0100; 210 mode_ctl = 0x0100;
202 else 211 else
203 mode_ctl = 0x0500; 212 mode_ctl = 0x0500;
204 } else 213 } else
205 mode_ctl = 0x0200; 214 mode_ctl = 0x0200;
215
216 nouveau_hdmi_mode_set(encoder, mode);
206 break; 217 break;
207 case OUTPUT_DP: 218 case OUTPUT_DP:
208 nv_connector = nouveau_encoder_connector_get(nv_encoder); 219 nv_connector = nouveau_encoder_connector_get(nv_encoder);
209 if (nv_connector && nv_connector->base.display_info.bpc == 6) { 220 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
210 nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8; 221 nv_encoder->dp.datarate = mode->clock * 18 / 8;
211 mode_ctl |= 0x00020000; 222 mode_ctl |= 0x00020000;
212 } else { 223 } else {
213 nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8; 224 nv_encoder->dp.datarate = mode->clock * 24 / 8;
214 mode_ctl |= 0x00050000; 225 mode_ctl |= 0x00050000;
215 } 226 }
216 227
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
228 else 239 else
229 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; 240 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
230 241
231 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 242 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
232 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; 243 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
233 244
234 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 245 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
235 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; 246 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
236 247
237 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); 248 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
239 ret = RING_SPACE(evo, 2); 250 ret = RING_SPACE(evo, 2);
240 if (ret) { 251 if (ret) {
241 NV_ERROR(dev, "no space while connecting SOR\n"); 252 NV_ERROR(dev, "no space while connecting SOR\n");
253 nv_encoder->crtc = NULL;
242 return; 254 return;
243 } 255 }
244 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 256 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
245 OUT_RING(evo, mode_ctl); 257 OUT_RING(evo, mode_ctl);
246
247 nv_encoder->crtc = encoder->crtc;
248} 258}
249 259
250static struct drm_crtc * 260static struct drm_crtc *
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 40b84f22d81..6f38ceae3aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
48 phys |= 0x60; 48 phys |= 0x60;
49 else if (coverage <= 64 * 1024 * 1024) 49 else if (coverage <= 64 * 1024 * 1024)
50 phys |= 0x40; 50 phys |= 0x40;
51 else if (coverage < 128 * 1024 * 1024) 51 else if (coverage <= 128 * 1024 * 1024)
52 phys |= 0x20; 52 phys |= 0x20;
53 } 53 }
54 54
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644
index 00000000000..74875739bcc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_bsp.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_bsp.c...
34 */
35
36struct nv84_bsp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00008000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_bsp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
55 return 0;
56}
57
58static void
59nv84_bsp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, BSP);
64
65 kfree(pbsp);
66}
67
68int
69nv84_bsp_create(struct drm_device *dev)
70{
71 struct nv84_bsp_engine *pbsp;
72
73 pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
74 if (!pbsp)
75 return -ENOMEM;
76
77 pbsp->base.destroy = nv84_bsp_destroy;
78 pbsp->base.init = nv84_bsp_init;
79 pbsp->base.fini = nv84_bsp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644
index 00000000000..6570d300ab8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_vp.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_vp.c...
34 */
35
36struct nv84_vp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00020000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_vp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
55 return 0;
56}
57
58static void
59nv84_vp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_vp_engine *pvp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, VP);
64
65 kfree(pvp);
66}
67
68int
69nv84_vp_create(struct drm_device *dev)
70{
71 struct nv84_vp_engine *pvp;
72
73 pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
74 if (!pvp)
75 return -ENOMEM;
76
77 pvp->base.destroy = nv84_vp_destroy;
78 pvp->base.init = nv84_vp_init;
79 pvp->base.fini = nv84_vp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644
index 00000000000..db94ff0a9fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31struct nv98_crypt_engine {
32 struct nouveau_exec_engine base;
33};
34
35static int
36nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
37{
38 if (!(nv_rd32(dev, 0x000200) & 0x00004000))
39 return 0;
40
41 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
42 return 0;
43}
44
45static int
46nv98_crypt_init(struct drm_device *dev, int engine)
47{
48 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
50 return 0;
51}
52
53static void
54nv98_crypt_destroy(struct drm_device *dev, int engine)
55{
56 struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
57
58 NVOBJ_ENGINE_DEL(dev, CRYPT);
59
60 kfree(pcrypt);
61}
62
63int
64nv98_crypt_create(struct drm_device *dev)
65{
66 struct nv98_crypt_engine *pcrypt;
67
68 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
69 if (!pcrypt)
70 return -ENOMEM;
71
72 pcrypt->base.destroy = nv98_crypt_destroy;
73 pcrypt->base.init = nv98_crypt_init;
74 pcrypt->base.fini = nv98_crypt_fini;
75
76 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
77 return 0;
78}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644
index 00000000000..a987dd6e003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_ppp.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31struct nv98_ppp_engine {
32 struct nouveau_exec_engine base;
33};
34
35static int
36nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
37{
38 if (!(nv_rd32(dev, 0x000200) & 0x00000002))
39 return 0;
40
41 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
42 return 0;
43}
44
45static int
46nv98_ppp_init(struct drm_device *dev, int engine)
47{
48 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
50 return 0;
51}
52
53static void
54nv98_ppp_destroy(struct drm_device *dev, int engine)
55{
56 struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
57
58 NVOBJ_ENGINE_DEL(dev, PPP);
59
60 kfree(pppp);
61}
62
63int
64nv98_ppp_create(struct drm_device *dev)
65{
66 struct nv98_ppp_engine *pppp;
67
68 pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
69 if (!pppp)
70 return -ENOMEM;
71
72 pppp->base.destroy = nv98_ppp_destroy;
73 pppp->base.init = nv98_ppp_init;
74 pppp->base.fini = nv98_ppp_fini;
75
76 NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
77 return 0;
78}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index eaf35f8321e..abc36626fef 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -31,8 +31,9 @@
31 */ 31 */
32 32
33ifdef(`NVA3', 33ifdef(`NVA3',
34.section nva3_pcopy_data, 34.section #nva3_pcopy_data
35.section nvc0_pcopy_data 35,
36.section #nvc0_pcopy_data
36) 37)
37 38
38ctx_object: .b32 0 39ctx_object: .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query: .b32 0
42ctx_dma_src: .b32 0 43ctx_dma_src: .b32 0
43ctx_dma_dst: .b32 0 44ctx_dma_dst: .b32 0
44,) 45,)
45.equ ctx_dma_count 3 46.equ #ctx_dma_count 3
46ctx_query_address_high: .b32 0 47ctx_query_address_high: .b32 0
47ctx_query_address_low: .b32 0 48ctx_query_address_low: .b32 0
48ctx_query_counter: .b32 0 49ctx_query_counter: .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt: .b32 0
78dispatch_table: 79dispatch_table:
79// mthd 0x0000, NAME 80// mthd 0x0000, NAME
80.b16 0x000 1 81.b16 0x000 1
81.b32 ctx_object ~0xffffffff 82.b32 #ctx_object ~0xffffffff
82// mthd 0x0100, NOP 83// mthd 0x0100, NOP
83.b16 0x040 1 84.b16 0x040 1
84.b32 0x00010000 + cmd_nop ~0xffffffff 85.b32 0x00010000 + #cmd_nop ~0xffffffff
85// mthd 0x0140, PM_TRIGGER 86// mthd 0x0140, PM_TRIGGER
86.b16 0x050 1 87.b16 0x050 1
87.b32 0x00010000 + cmd_pm_trigger ~0xffffffff 88.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
88ifdef(`NVA3', ` 89ifdef(`NVA3', `
89// mthd 0x0180-0x018c, DMA_ 90// mthd 0x0180-0x018c, DMA_
90.b16 0x060 ctx_dma_count 91.b16 0x060 #ctx_dma_count
91dispatch_dma: 92dispatch_dma:
92.b32 0x00010000 + cmd_dma ~0xffffffff 93.b32 0x00010000 + #cmd_dma ~0xffffffff
93.b32 0x00010000 + cmd_dma ~0xffffffff 94.b32 0x00010000 + #cmd_dma ~0xffffffff
94.b32 0x00010000 + cmd_dma ~0xffffffff 95.b32 0x00010000 + #cmd_dma ~0xffffffff
95',) 96',)
96// mthd 0x0200-0x0218, SRC_TILE 97// mthd 0x0200-0x0218, SRC_TILE
97.b16 0x80 7 98.b16 0x80 7
98.b32 ctx_src_tile_mode ~0x00000fff 99.b32 #ctx_src_tile_mode ~0x00000fff
99.b32 ctx_src_xsize ~0x0007ffff 100.b32 #ctx_src_xsize ~0x0007ffff
100.b32 ctx_src_ysize ~0x00001fff 101.b32 #ctx_src_ysize ~0x00001fff
101.b32 ctx_src_zsize ~0x000007ff 102.b32 #ctx_src_zsize ~0x000007ff
102.b32 ctx_src_zoff ~0x00000fff 103.b32 #ctx_src_zoff ~0x00000fff
103.b32 ctx_src_xoff ~0x0007ffff 104.b32 #ctx_src_xoff ~0x0007ffff
104.b32 ctx_src_yoff ~0x00001fff 105.b32 #ctx_src_yoff ~0x00001fff
105// mthd 0x0220-0x0238, DST_TILE 106// mthd 0x0220-0x0238, DST_TILE
106.b16 0x88 7 107.b16 0x88 7
107.b32 ctx_dst_tile_mode ~0x00000fff 108.b32 #ctx_dst_tile_mode ~0x00000fff
108.b32 ctx_dst_xsize ~0x0007ffff 109.b32 #ctx_dst_xsize ~0x0007ffff
109.b32 ctx_dst_ysize ~0x00001fff 110.b32 #ctx_dst_ysize ~0x00001fff
110.b32 ctx_dst_zsize ~0x000007ff 111.b32 #ctx_dst_zsize ~0x000007ff
111.b32 ctx_dst_zoff ~0x00000fff 112.b32 #ctx_dst_zoff ~0x00000fff
112.b32 ctx_dst_xoff ~0x0007ffff 113.b32 #ctx_dst_xoff ~0x0007ffff
113.b32 ctx_dst_yoff ~0x00001fff 114.b32 #ctx_dst_yoff ~0x00001fff
114// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH 115// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
115.b16 0xc0 2 116.b16 0xc0 2
116.b32 0x00010000 + cmd_exec ~0xffffffff 117.b32 0x00010000 + #cmd_exec ~0xffffffff
117.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff 118.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
118// mthd 0x030c-0x0340, various stuff 119// mthd 0x030c-0x0340, various stuff
119.b16 0xc3 14 120.b16 0xc3 14
120.b32 ctx_src_address_high ~0x000000ff 121.b32 #ctx_src_address_high ~0x000000ff
121.b32 ctx_src_address_low ~0xfffffff0 122.b32 #ctx_src_address_low ~0xfffffff0
122.b32 ctx_dst_address_high ~0x000000ff 123.b32 #ctx_dst_address_high ~0x000000ff
123.b32 ctx_dst_address_low ~0xfffffff0 124.b32 #ctx_dst_address_low ~0xfffffff0
124.b32 ctx_src_pitch ~0x0007ffff 125.b32 #ctx_src_pitch ~0x0007ffff
125.b32 ctx_dst_pitch ~0x0007ffff 126.b32 #ctx_dst_pitch ~0x0007ffff
126.b32 ctx_xcnt ~0x0000ffff 127.b32 #ctx_xcnt ~0x0000ffff
127.b32 ctx_ycnt ~0x00001fff 128.b32 #ctx_ycnt ~0x00001fff
128.b32 ctx_format ~0x0333ffff 129.b32 #ctx_format ~0x0333ffff
129.b32 ctx_swz_const0 ~0xffffffff 130.b32 #ctx_swz_const0 ~0xffffffff
130.b32 ctx_swz_const1 ~0xffffffff 131.b32 #ctx_swz_const1 ~0xffffffff
131.b32 ctx_query_address_high ~0x000000ff 132.b32 #ctx_query_address_high ~0x000000ff
132.b32 ctx_query_address_low ~0xffffffff 133.b32 #ctx_query_address_low ~0xffffffff
133.b32 ctx_query_counter ~0xffffffff 134.b32 #ctx_query_counter ~0xffffffff
134.b16 0x800 0 135.b16 0x800 0
135 136
136ifdef(`NVA3', 137ifdef(`NVA3',
137.section nva3_pcopy_code, 138.section #nva3_pcopy_code
138.section nvc0_pcopy_code 139,
140.section #nvc0_pcopy_code
139) 141)
140 142
141main: 143main:
@@ -143,12 +145,12 @@ main:
143 mov $sp $r0 145 mov $sp $r0
144 146
145 // setup i0 handler and route fifo and ctxswitch to it 147 // setup i0 handler and route fifo and ctxswitch to it
146 mov $r1 ih 148 mov $r1 #ih
147 mov $iv0 $r1 149 mov $iv0 $r1
148 mov $r1 0x400 150 mov $r1 0x400
149 movw $r2 0xfff3 151 movw $r2 0xfff3
150 sethi $r2 0 152 sethi $r2 0
151 iowr I[$r2 + 0x300] $r2 153 iowr I[$r1 + 0x300] $r2
152 154
153 // enable interrupts 155 // enable interrupts
154 or $r2 0xc 156 or $r2 0xc
@@ -164,19 +166,19 @@ main:
164 bset $flags $p0 166 bset $flags $p0
165 spin: 167 spin:
166 sleep $p0 168 sleep $p0
167 bra spin 169 bra #spin
168 170
169// i0 handler 171// i0 handler
170ih: 172ih:
171 iord $r1 I[$r0 + 0x200] 173 iord $r1 I[$r0 + 0x200]
172 174
173 and $r2 $r1 0x00000008 175 and $r2 $r1 0x00000008
174 bra e ih_no_chsw 176 bra e #ih_no_chsw
175 call chsw 177 call #chsw
176 ih_no_chsw: 178 ih_no_chsw:
177 and $r2 $r1 0x00000004 179 and $r2 $r1 0x00000004
178 bra e ih_no_cmd 180 bra e #ih_no_cmd
179 call dispatch 181 call #dispatch
180 182
181 ih_no_cmd: 183 ih_no_cmd:
182 and $r1 $r1 0x0000000c 184 and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
235 sethi $r4 0x60000 237 sethi $r4 0x60000
236 238
237 // swap! 239 // swap!
238 bra $p1 swctx_load 240 bra $p1 #swctx_load
239 xdst $r0 $r4 241 xdst $r0 $r4
240 bra swctx_done 242 bra #swctx_done
241 swctx_load: 243 swctx_load:
242 xdld $r0 $r4 244 xdld $r0 $r4
243 swctx_done: 245 swctx_done:
@@ -251,9 +253,9 @@ chsw:
251 253
252 // if it's active, unload it and return 254 // if it's active, unload it and return
253 xbit $r15 $r3 0x1e 255 xbit $r15 $r3 0x1e
254 bra e chsw_no_unload 256 bra e #chsw_no_unload
255 bclr $flags $p1 257 bclr $flags $p1
256 call swctx 258 call #swctx
257 bclr $r3 0x1e 259 bclr $r3 0x1e
258 iowr I[$r2] $r3 260 iowr I[$r2] $r3
259 mov $r4 1 261 mov $r4 1
@@ -266,20 +268,20 @@ chsw:
266 268
267 // is there a channel waiting to be loaded? 269 // is there a channel waiting to be loaded?
268 xbit $r13 $r3 0x1e 270 xbit $r13 $r3 0x1e
269 bra e chsw_finish_load 271 bra e #chsw_finish_load
270 bset $flags $p1 272 bset $flags $p1
271 call swctx 273 call #swctx
272ifdef(`NVA3', 274ifdef(`NVA3',
273 // load dma objects back into TARGET regs 275 // load dma objects back into TARGET regs
274 mov $r5 ctx_dma 276 mov $r5 #ctx_dma
275 mov $r6 ctx_dma_count 277 mov $r6 #ctx_dma_count
276 chsw_load_ctx_dma: 278 chsw_load_ctx_dma:
277 ld b32 $r7 D[$r5 + $r6 * 4] 279 ld b32 $r7 D[$r5 + $r6 * 4]
278 add b32 $r8 $r6 0x180 280 add b32 $r8 $r6 0x180
279 shl b32 $r8 8 281 shl b32 $r8 8
280 iowr I[$r8] $r7 282 iowr I[$r8] $r7
281 sub b32 $r6 1 283 sub b32 $r6 1
282 bra nc chsw_load_ctx_dma 284 bra nc #chsw_load_ctx_dma
283,) 285,)
284 286
285 chsw_finish_load: 287 chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
297 shl b32 $r2 0x10 299 shl b32 $r2 0x10
298 300
299 // lookup method in the dispatch table, ILLEGAL_MTHD if not found 301 // lookup method in the dispatch table, ILLEGAL_MTHD if not found
300 mov $r5 dispatch_table 302 mov $r5 #dispatch_table
301 clear b32 $r6 303 clear b32 $r6
302 clear b32 $r7 304 clear b32 $r7
303 dispatch_loop: 305 dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
305 ld b16 $r7 D[$r5 + 2] 307 ld b16 $r7 D[$r5 + 2]
306 add b32 $r5 4 308 add b32 $r5 4
307 cmpu b32 $r4 $r6 309 cmpu b32 $r4 $r6
308 bra c dispatch_illegal_mthd 310 bra c #dispatch_illegal_mthd
309 add b32 $r7 $r6 311 add b32 $r7 $r6
310 cmpu b32 $r4 $r7 312 cmpu b32 $r4 $r7
311 bra c dispatch_valid_mthd 313 bra c #dispatch_valid_mthd
312 sub b32 $r7 $r6 314 sub b32 $r7 $r6
313 shl b32 $r7 3 315 shl b32 $r7 3
314 add b32 $r5 $r7 316 add b32 $r5 $r7
315 bra dispatch_loop 317 bra #dispatch_loop
316 318
317 // ensure no bits set in reserved fields, INVALID_BITFIELD 319 // ensure no bits set in reserved fields, INVALID_BITFIELD
318 dispatch_valid_mthd: 320 dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
322 ld b32 $r5 D[$r4 + 4] 324 ld b32 $r5 D[$r4 + 4]
323 and $r5 $r3 325 and $r5 $r3
324 cmpu b32 $r5 0 326 cmpu b32 $r5 0
325 bra ne dispatch_invalid_bitfield 327 bra ne #dispatch_invalid_bitfield
326 328
327 // depending on dispatch flags: execute method, or save data as state 329 // depending on dispatch flags: execute method, or save data as state
328 ld b16 $r5 D[$r4 + 0] 330 ld b16 $r5 D[$r4 + 0]
329 ld b16 $r6 D[$r4 + 2] 331 ld b16 $r6 D[$r4 + 2]
330 cmpu b32 $r6 0 332 cmpu b32 $r6 0
331 bra ne dispatch_cmd 333 bra ne #dispatch_cmd
332 st b32 D[$r5] $r3 334 st b32 D[$r5] $r3
333 bra dispatch_done 335 bra #dispatch_done
334 dispatch_cmd: 336 dispatch_cmd:
335 bclr $flags $p1 337 bclr $flags $p1
336 call $r5 338 call $r5
337 bra $p1 dispatch_error 339 bra $p1 #dispatch_error
338 bra dispatch_done 340 bra #dispatch_done
339 341
340 dispatch_invalid_bitfield: 342 dispatch_invalid_bitfield:
341 or $r2 2 343 or $r2 2
@@ -353,7 +355,7 @@ dispatch:
353 iord $r2 I[$r0 + 0x200] 355 iord $r2 I[$r0 + 0x200]
354 and $r2 0x40 356 and $r2 0x40
355 cmpu b32 $r2 0 357 cmpu b32 $r2 0
356 bra ne hostirq_wait 358 bra ne #hostirq_wait
357 359
358 dispatch_done: 360 dispatch_done:
359 mov $r2 0x1d00 361 mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
409// $r2: hostirq state 411// $r2: hostirq state
410// $r3: data 412// $r3: data
411cmd_dma: 413cmd_dma:
412 sub b32 $r4 dispatch_dma 414 sub b32 $r4 #dispatch_dma
413 shr b32 $r4 1 415 shr b32 $r4 1
414 bset $r3 0x1e 416 bset $r3 0x1e
415 st b32 D[$r4 + ctx_dma] $r3 417 st b32 D[$r4 + #ctx_dma] $r3
416 add b32 $r4 0x600 418 add b32 $r4 0x600
417 shl b32 $r4 6 419 shl b32 $r4 6
418 iowr I[$r4] $r3 420 iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
430 st b32 D[$sp + 0x0c] $r0 432 st b32 D[$sp + 0x0c] $r0
431 433
432 // extract cpp, src_ncomp and dst_ncomp from FORMAT 434 // extract cpp, src_ncomp and dst_ncomp from FORMAT
433 ld b32 $r4 D[$r0 + ctx_format] 435 ld b32 $r4 D[$r0 + #ctx_format]
434 extr $r5 $r4 16:17 436 extr $r5 $r4 16:17
435 add b32 $r5 1 437 add b32 $r5 1
436 extr $r6 $r4 20:21 438 extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
448 clear b32 $r11 450 clear b32 $r11
449 bpc_loop: 451 bpc_loop:
450 cmpu b8 $r10 4 452 cmpu b8 $r10 4
451 bra nc cmp_c0 453 bra nc #cmp_c0
452 mulu $r12 $r10 $r5 454 mulu $r12 $r10 $r5
453 add b32 $r12 $r11 455 add b32 $r12 $r11
454 bset $flags $p2 456 bset $flags $p2
455 bra bpc_next 457 bra #bpc_next
456 cmp_c0: 458 cmp_c0:
457 bra ne cmp_c1 459 bra ne #cmp_c1
458 mov $r12 0x10 460 mov $r12 0x10
459 add b32 $r12 $r11 461 add b32 $r12 $r11
460 bra bpc_next 462 bra #bpc_next
461 cmp_c1: 463 cmp_c1:
462 cmpu b8 $r10 6 464 cmpu b8 $r10 6
463 bra nc cmp_zero 465 bra nc #cmp_zero
464 mov $r12 0x14 466 mov $r12 0x14
465 add b32 $r12 $r11 467 add b32 $r12 $r11
466 bra bpc_next 468 bra #bpc_next
467 cmp_zero: 469 cmp_zero:
468 mov $r12 0x80 470 mov $r12 0x80
469 bpc_next: 471 bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
471 add b32 $r8 1 473 add b32 $r8 1
472 add b32 $r11 1 474 add b32 $r11 1
473 cmpu b32 $r11 $r5 475 cmpu b32 $r11 $r5
474 bra c bpc_loop 476 bra c #bpc_loop
475 add b32 $r9 1 477 add b32 $r9 1
476 cmpu b32 $r9 $r7 478 cmpu b32 $r9 $r7
477 bra c ncomp_loop 479 bra c #ncomp_loop
478 480
479 // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang) 481 // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
480 mulu $r6 $r5 482 mulu $r6 $r5
481 st b32 D[$r0 + ctx_src_cpp] $r6 483 st b32 D[$r0 + #ctx_src_cpp] $r6
482 ld b32 $r8 D[$r0 + ctx_xcnt] 484 ld b32 $r8 D[$r0 + #ctx_xcnt]
483 mulu $r6 $r8 485 mulu $r6 $r8
484 bra $p2 dst_xcnt 486 bra $p2 #dst_xcnt
485 clear b32 $r6 487 clear b32 $r6
486 488
487 dst_xcnt: 489 dst_xcnt:
488 mulu $r7 $r5 490 mulu $r7 $r5
489 st b32 D[$r0 + ctx_dst_cpp] $r7 491 st b32 D[$r0 + #ctx_dst_cpp] $r7
490 mulu $r7 $r8 492 mulu $r7 $r8
491 493
492 mov $r5 0x810 494 mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
494 iowr I[$r5 + 0x000] $r6 496 iowr I[$r5 + 0x000] $r6
495 iowr I[$r5 + 0x100] $r7 497 iowr I[$r5 + 0x100] $r7
496 add b32 $r5 0x800 498 add b32 $r5 0x800
497 ld b32 $r6 D[$r0 + ctx_dst_cpp] 499 ld b32 $r6 D[$r0 + #ctx_dst_cpp]
498 sub b32 $r6 1 500 sub b32 $r6 1
499 shl b32 $r6 8 501 shl b32 $r6 8
500 ld b32 $r7 D[$r0 + ctx_src_cpp] 502 ld b32 $r7 D[$r0 + #ctx_src_cpp]
501 sub b32 $r7 1 503 sub b32 $r7 1
502 or $r6 $r7 504 or $r6 $r7
503 iowr I[$r5 + 0x000] $r6 505 iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
511 ld b32 $r6 D[$sp + 0x0c] 513 ld b32 $r6 D[$sp + 0x0c]
512 iowr I[$r5 + 0x300] $r6 514 iowr I[$r5 + 0x300] $r6
513 add b32 $r5 0x400 515 add b32 $r5 0x400
514 ld b32 $r6 D[$r0 + ctx_swz_const0] 516 ld b32 $r6 D[$r0 + #ctx_swz_const0]
515 iowr I[$r5 + 0x000] $r6 517 iowr I[$r5 + 0x000] $r6
516 ld b32 $r6 D[$r0 + ctx_swz_const1] 518 ld b32 $r6 D[$r0 + #ctx_swz_const1]
517 iowr I[$r5 + 0x100] $r6 519 iowr I[$r5 + 0x100] $r6
518 add $sp 0x10 520 add $sp 0x10
519 ret 521 ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
543// 545//
544cmd_exec_set_surface_tiled: 546cmd_exec_set_surface_tiled:
545 // translate TILE_MODE into Tp, Th, Td shift values 547 // translate TILE_MODE into Tp, Th, Td shift values
546 ld b32 $r7 D[$r5 + ctx_src_tile_mode] 548 ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
547 extr $r9 $r7 8:11 549 extr $r9 $r7 8:11
548 extr $r8 $r7 4:7 550 extr $r8 $r7 4:7
549ifdef(`NVA3', 551ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
553) 555)
554 extr $r7 $r7 0:3 556 extr $r7 $r7 0:3
555 cmp b32 $r7 0xe 557 cmp b32 $r7 0xe
556 bra ne xtile64 558 bra ne #xtile64
557 mov $r7 4 559 mov $r7 4
558 bra xtileok 560 bra #xtileok
559 xtile64: 561 xtile64:
560 xbit $r7 $flags $p2 562 xbit $r7 $flags $p2
561 add b32 $r7 17 563 add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
565 567
566 // Op = (x * cpp) & ((1 << Tp) - 1) 568 // Op = (x * cpp) & ((1 << Tp) - 1)
567 // Tx = (x * cpp) >> Tp 569 // Tx = (x * cpp) >> Tp
568 ld b32 $r10 D[$r5 + ctx_src_xoff] 570 ld b32 $r10 D[$r5 + #ctx_src_xoff]
569 ld b32 $r11 D[$r5 + ctx_src_cpp] 571 ld b32 $r11 D[$r5 + #ctx_src_cpp]
570 mulu $r10 $r11 572 mulu $r10 $r11
571 mov $r11 1 573 mov $r11 1
572 shl b32 $r11 $r7 574 shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
576 578
577 // Tyo = y & ((1 << Th) - 1) 579 // Tyo = y & ((1 << Th) - 1)
578 // Ty = y >> Th 580 // Ty = y >> Th
579 ld b32 $r13 D[$r5 + ctx_src_yoff] 581 ld b32 $r13 D[$r5 + #ctx_src_yoff]
580 mov $r14 1 582 mov $r14 1
581 shl b32 $r14 $r8 583 shl b32 $r14 $r8
582 sub b32 $r14 1 584 sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
598 add b32 $r12 $r11 600 add b32 $r12 $r11
599 601
600 // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp) 602 // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
601 ld b32 $r15 D[$r5 + ctx_src_xsize] 603 ld b32 $r15 D[$r5 + #ctx_src_xsize]
602 ld b32 $r11 D[$r5 + ctx_src_cpp] 604 ld b32 $r11 D[$r5 + #ctx_src_cpp]
603 mulu $r15 $r11 605 mulu $r15 $r11
604 mov $r11 1 606 mov $r11 1
605 shl b32 $r11 $r7 607 shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
609 push $r15 611 push $r15
610 612
611 // nTy = (h + ((1 << Th) - 1)) >> Th 613 // nTy = (h + ((1 << Th) - 1)) >> Th
612 ld b32 $r15 D[$r5 + ctx_src_ysize] 614 ld b32 $r15 D[$r5 + #ctx_src_ysize]
613 mov $r11 1 615 mov $r11 1
614 shl b32 $r11 $r8 616 shl b32 $r11 $r8
615 sub b32 $r11 1 617 sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
629 // Tz = z >> Td 631 // Tz = z >> Td
630 // Op += Tzo << Tys 632 // Op += Tzo << Tys
631 // Ts = Tys + Td 633 // Ts = Tys + Td
632 ld b32 $r8 D[$r5 + ctx_src_zoff] 634 ld b32 $r8 D[$r5 + #ctx_src_zoff]
633 mov $r14 1 635 mov $r14 1
634 shl b32 $r14 $r9 636 shl b32 $r14 $r9
635 sub b32 $r14 1 637 sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
656 658
657 // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff 659 // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
658 // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16 660 // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
659 ld b32 $r7 D[$r5 + ctx_src_address_low] 661 ld b32 $r7 D[$r5 + #ctx_src_address_low]
660 ld b32 $r8 D[$r5 + ctx_src_address_high] 662 ld b32 $r8 D[$r5 + #ctx_src_address_high]
661 add b32 $r10 $r12 663 add b32 $r10 $r12
662 add b32 $r7 $r10 664 add b32 $r7 $r10
663 adc b32 $r8 0 665 adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
677 xbit $r6 $flags $p2 679 xbit $r6 $flags $p2
678 add b32 $r6 0x202 680 add b32 $r6 0x202
679 shl b32 $r6 8 681 shl b32 $r6 8
680 ld b32 $r7 D[$r5 + ctx_src_address_low] 682 ld b32 $r7 D[$r5 + #ctx_src_address_low]
681 iowr I[$r6 + 0x000] $r7 683 iowr I[$r6 + 0x000] $r7
682 add b32 $r6 0x400 684 add b32 $r6 0x400
683 ld b32 $r7 D[$r5 + ctx_src_address_high] 685 ld b32 $r7 D[$r5 + #ctx_src_address_high]
684 shl b32 $r7 16 686 shl b32 $r7 16
685 iowr I[$r6 + 0x000] $r7 687 iowr I[$r6 + 0x000] $r7
686 add b32 $r6 0x400 688 add b32 $r6 0x400
687 ld b32 $r7 D[$r5 + ctx_src_pitch] 689 ld b32 $r7 D[$r5 + #ctx_src_pitch]
688 iowr I[$r6 + 0x000] $r7 690 iowr I[$r6 + 0x000] $r7
689 ret 691 ret
690 692
@@ -697,7 +699,7 @@ cmd_exec_wait:
697 loop: 699 loop:
698 iord $r1 I[$r0] 700 iord $r1 I[$r0]
699 and $r1 1 701 and $r1 1
700 bra ne loop 702 bra ne #loop
701 pop $r1 703 pop $r1
702 pop $r0 704 pop $r0
703 ret 705 ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
705cmd_exec_query: 707cmd_exec_query:
706 // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI } 708 // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
707 xbit $r4 $r3 13 709 xbit $r4 $r3 13
708 bra ne query_counter 710 bra ne #query_counter
709 call cmd_exec_wait 711 call #cmd_exec_wait
710 mov $r4 0x80c 712 mov $r4 0x80c
711 shl b32 $r4 6 713 shl b32 $r4 6
712 ld b32 $r5 D[$r0 + ctx_query_address_low] 714 ld b32 $r5 D[$r0 + #ctx_query_address_low]
713 add b32 $r5 4 715 add b32 $r5 4
714 iowr I[$r4 + 0x000] $r5 716 iowr I[$r4 + 0x000] $r5
715 iowr I[$r4 + 0x100] $r0 717 iowr I[$r4 + 0x100] $r0
716 mov $r5 0xc 718 mov $r5 0xc
717 iowr I[$r4 + 0x200] $r5 719 iowr I[$r4 + 0x200] $r5
718 add b32 $r4 0x400 720 add b32 $r4 0x400
719 ld b32 $r5 D[$r0 + ctx_query_address_high] 721 ld b32 $r5 D[$r0 + #ctx_query_address_high]
720 shl b32 $r5 16 722 shl b32 $r5 16
721 iowr I[$r4 + 0x000] $r5 723 iowr I[$r4 + 0x000] $r5
722 add b32 $r4 0x500 724 add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
741 743
742 // write COUNTER 744 // write COUNTER
743 query_counter: 745 query_counter:
744 call cmd_exec_wait 746 call #cmd_exec_wait
745 mov $r4 0x80c 747 mov $r4 0x80c
746 shl b32 $r4 6 748 shl b32 $r4 6
747 ld b32 $r5 D[$r0 + ctx_query_address_low] 749 ld b32 $r5 D[$r0 + #ctx_query_address_low]
748 iowr I[$r4 + 0x000] $r5 750 iowr I[$r4 + 0x000] $r5
749 iowr I[$r4 + 0x100] $r0 751 iowr I[$r4 + 0x100] $r0
750 mov $r5 0x4 752 mov $r5 0x4
751 iowr I[$r4 + 0x200] $r5 753 iowr I[$r4 + 0x200] $r5
752 add b32 $r4 0x400 754 add b32 $r4 0x400
753 ld b32 $r5 D[$r0 + ctx_query_address_high] 755 ld b32 $r5 D[$r0 + #ctx_query_address_high]
754 shl b32 $r5 16 756 shl b32 $r5 16
755 iowr I[$r4 + 0x000] $r5 757 iowr I[$r4 + 0x000] $r5
756 add b32 $r4 0x500 758 add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
759 mov $r5 0x00001110 761 mov $r5 0x00001110
760 sethi $r5 0x13120000 762 sethi $r5 0x13120000
761 iowr I[$r4 + 0x100] $r5 763 iowr I[$r4 + 0x100] $r5
762 ld b32 $r5 D[$r0 + ctx_query_counter] 764 ld b32 $r5 D[$r0 + #ctx_query_counter]
763 add b32 $r4 0x500 765 add b32 $r4 0x500
764 iowr I[$r4 + 0x000] $r5 766 iowr I[$r4 + 0x000] $r5
765 mov $r5 0x00002601 767 mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
787// $r2: hostirq state 789// $r2: hostirq state
788// $r3: data 790// $r3: data
789cmd_exec: 791cmd_exec:
790 call cmd_exec_wait 792 call #cmd_exec_wait
791 793
792 // if format requested, call function to calculate it, otherwise 794 // if format requested, call function to calculate it, otherwise
793 // fill in cpp/xcnt for both surfaces as if (cpp == 1) 795 // fill in cpp/xcnt for both surfaces as if (cpp == 1)
794 xbit $r15 $r3 0 796 xbit $r15 $r3 0
795 bra e cmd_exec_no_format 797 bra e #cmd_exec_no_format
796 call cmd_exec_set_format 798 call #cmd_exec_set_format
797 mov $r4 0x200 799 mov $r4 0x200
798 bra cmd_exec_init_src_surface 800 bra #cmd_exec_init_src_surface
799 cmd_exec_no_format: 801 cmd_exec_no_format:
800 mov $r6 0x810 802 mov $r6 0x810
801 shl b32 $r6 6 803 shl b32 $r6 6
802 mov $r7 1 804 mov $r7 1
803 st b32 D[$r0 + ctx_src_cpp] $r7 805 st b32 D[$r0 + #ctx_src_cpp] $r7
804 st b32 D[$r0 + ctx_dst_cpp] $r7 806 st b32 D[$r0 + #ctx_dst_cpp] $r7
805 ld b32 $r7 D[$r0 + ctx_xcnt] 807 ld b32 $r7 D[$r0 + #ctx_xcnt]
806 iowr I[$r6 + 0x000] $r7 808 iowr I[$r6 + 0x000] $r7
807 iowr I[$r6 + 0x100] $r7 809 iowr I[$r6 + 0x100] $r7
808 clear b32 $r4 810 clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
811 bclr $flags $p2 813 bclr $flags $p2
812 clear b32 $r5 814 clear b32 $r5
813 xbit $r15 $r3 4 815 xbit $r15 $r3 4
814 bra e src_tiled 816 bra e #src_tiled
815 call cmd_exec_set_surface_linear 817 call #cmd_exec_set_surface_linear
816 bra cmd_exec_init_dst_surface 818 bra #cmd_exec_init_dst_surface
817 src_tiled: 819 src_tiled:
818 call cmd_exec_set_surface_tiled 820 call #cmd_exec_set_surface_tiled
819 bset $r4 7 821 bset $r4 7
820 822
821 cmd_exec_init_dst_surface: 823 cmd_exec_init_dst_surface:
822 bset $flags $p2 824 bset $flags $p2
823 mov $r5 ctx_dst_address_high - ctx_src_address_high 825 mov $r5 #ctx_dst_address_high - #ctx_src_address_high
824 xbit $r15 $r3 8 826 xbit $r15 $r3 8
825 bra e dst_tiled 827 bra e #dst_tiled
826 call cmd_exec_set_surface_linear 828 call #cmd_exec_set_surface_linear
827 bra cmd_exec_kick 829 bra #cmd_exec_kick
828 dst_tiled: 830 dst_tiled:
829 call cmd_exec_set_surface_tiled 831 call #cmd_exec_set_surface_tiled
830 bset $r4 8 832 bset $r4 8
831 833
832 cmd_exec_kick: 834 cmd_exec_kick:
833 mov $r5 0x800 835 mov $r5 0x800
834 shl b32 $r5 6 836 shl b32 $r5 6
835 ld b32 $r6 D[$r0 + ctx_ycnt] 837 ld b32 $r6 D[$r0 + #ctx_ycnt]
836 iowr I[$r5 + 0x100] $r6 838 iowr I[$r5 + 0x100] $r6
837 mov $r6 0x0041 839 mov $r6 0x0041
838 // SRC_TARGET = 1, DST_TARGET = 2 840 // SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
842 844
843 // if requested, queue up a QUERY write after the copy has completed 845 // if requested, queue up a QUERY write after the copy has completed
844 xbit $r15 $r3 12 846 xbit $r15 $r3 12
845 bra e cmd_exec_done 847 bra e #cmd_exec_done
846 call cmd_exec_query 848 call #cmd_exec_query
847 849
848 cmd_exec_done: 850 cmd_exec_done:
849 ret 851 ret
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 2731de22ebe..1f33fbdc00b 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
152 0xf10010fe, 152 0xf10010fe,
153 0xf1040017, 153 0xf1040017,
154 0xf0fff327, 154 0xf0fff327,
155 0x22d00023, 155 0x12d00023,
156 0x0c25f0c0, 156 0x0c25f0c0,
157 0xf40012d0, 157 0xf40012d0,
158 0x17f11031, 158 0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 618c144b7a3..9e636e6ef6d 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
287 return false; 287 return false;
288} 288}
289 289
290void 290int
291nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) 291nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
292{ 292{
293 struct drm_nouveau_private *dev_priv = dev->dev_private; 293 struct drm_nouveau_private *dev_priv = dev->dev_private;
294 struct nva3_pm_state *info = pre_state; 294 struct nva3_pm_state *info = pre_state;
295 unsigned long flags; 295 unsigned long flags;
296 int ret = -EAGAIN;
296 297
297 /* prevent any new grctx switches from starting */ 298 /* prevent any new grctx switches from starting */
298 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 299 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
328 nv_wr32(dev, 0x100210, 0x80000000); 329 nv_wr32(dev, 0x100210, 0x80000000);
329 } 330 }
330 331
332 ret = 0;
333
331cleanup: 334cleanup:
332 /* unfreeze PFIFO */ 335 /* unfreeze PFIFO */
333 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 336 nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
339 nv_mask(dev, 0x400824, 0x10000000, 0x10000000); 342 nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
340 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 343 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
341 kfree(info); 344 kfree(info);
345 return ret;
342} 346}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index 419903880e9..a8d17458ced 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
145 0xf10010fe, 145 0xf10010fe,
146 0xf1040017, 146 0xf1040017,
147 0xf0fff327, 147 0xf0fff327,
148 0x22d00023, 148 0x12d00023,
149 0x0c25f0c0, 149 0x0c25f0c0,
150 0xf40012d0, 150 0xf40012d0,
151 0x17f11031, 151 0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ecfafd70cf0..8ee3963f903 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -875,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
875 case 0xcf: /* 4/0/0/0, 3 */ 875 case 0xcf: /* 4/0/0/0, 3 */
876 priv->magic_not_rop_nr = 0x03; 876 priv->magic_not_rop_nr = 0x03;
877 break; 877 break;
878 case 0xd9: /* 1/0/0/0, 1 */
879 priv->magic_not_rop_nr = 0x01;
880 break;
878 } 881 }
879 882
880 if (!priv->magic_not_rop_nr) { 883 if (!priv->magic_not_rop_nr) {
881 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", 884 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
882 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], 885 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
883 priv->tp_nr[3], priv->rop_nr); 886 priv->tp_nr[3], priv->rop_nr);
884 /* use 0xc3's values... */ 887 priv->magic_not_rop_nr = 0x00;
885 priv->magic_not_rop_nr = 0x03;
886 } 888 }
887 889
888 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 890 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
index 2a4b6dc8f9d..e6b228844a3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -71,9 +71,9 @@ queue_put:
71 ld b32 $r9 D[$r13 + 0x4] // PUT 71 ld b32 $r9 D[$r13 + 0x4] // PUT
72 xor $r8 8 72 xor $r8 8
73 cmpu b32 $r8 $r9 73 cmpu b32 $r8 $r9
74 bra ne queue_put_next 74 bra ne #queue_put_next
75 mov $r15 E_CMD_OVERFLOW 75 mov $r15 E_CMD_OVERFLOW
76 call error 76 call #error
77 ret 77 ret
78 78
79 // store cmd/data on queue 79 // store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
104 ld b32 $r8 D[$r13 + 0x0] // GET 104 ld b32 $r8 D[$r13 + 0x0] // GET
105 ld b32 $r9 D[$r13 + 0x4] // PUT 105 ld b32 $r9 D[$r13 + 0x4] // PUT
106 cmpu b32 $r8 $r9 106 cmpu b32 $r8 $r9
107 bra e queue_get_done 107 bra e #queue_get_done
108 // fetch first cmd/data pair 108 // fetch first cmd/data pair
109 and $r9 $r8 7 109 and $r9 $r8 7
110 shl b32 $r9 3 110 shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
135 nv_rd32_wait: 135 nv_rd32_wait:
136 iord $r12 I[$r11 + 0x000] 136 iord $r12 I[$r11 + 0x000]
137 xbit $r12 $r12 31 137 xbit $r12 $r12 31
138 bra ne nv_rd32_wait 138 bra ne #nv_rd32_wait
139 mov $r10 6 // DONE_MMIO_RD 139 mov $r10 6 // DONE_MMIO_RD
140 call wait_doneo 140 call #wait_doneo
141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL 141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
142 ret 142 ret
143 143
@@ -157,7 +157,7 @@ nv_wr32:
157 nv_wr32_wait: 157 nv_wr32_wait:
158 iord $r12 I[$r11 + 0x000] 158 iord $r12 I[$r11 + 0x000]
159 xbit $r12 $r12 31 159 xbit $r12 $r12 31
160 bra ne nv_wr32_wait 160 bra ne #nv_wr32_wait
161 ret 161 ret
162 162
163// (re)set watchdog timer 163// (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
193 shl b32 $r8 6 193 shl b32 $r8 6
194 iord $r8 I[$r8 + 0x000] // DONE 194 iord $r8 I[$r8 + 0x000] // DONE
195 xbit $r8 $r8 $r10 195 xbit $r8 $r8 $r10
196 bra $2 wait_done_$1 196 bra $2 #wait_done_$1
197 trace_clr(T_WAIT) 197 trace_clr(T_WAIT)
198 ret 198 ret
199') 199')
@@ -216,7 +216,7 @@ mmctx_size:
216 add b32 $r9 $r8 216 add b32 $r9 $r8
217 add b32 $r14 4 217 add b32 $r14 4
218 cmpu b32 $r14 $r15 218 cmpu b32 $r14 $r15
219 bra ne nv_mmctx_size_loop 219 bra ne #nv_mmctx_size_loop
220 mov b32 $r15 $r9 220 mov b32 $r15 $r9
221 ret 221 ret
222 222
@@ -238,12 +238,12 @@ mmctx_xfer:
238 shl b32 $r8 6 238 shl b32 $r8 6
239 clear b32 $r9 239 clear b32 $r9
240 or $r11 $r11 240 or $r11 $r11
241 bra e mmctx_base_disabled 241 bra e #mmctx_base_disabled
242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE 242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
243 bset $r9 0 // BASE_EN 243 bset $r9 0 // BASE_EN
244 mmctx_base_disabled: 244 mmctx_base_disabled:
245 or $r14 $r14 245 or $r14 $r14
246 bra e mmctx_multi_disabled 246 bra e #mmctx_multi_disabled
247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE 247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK 248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
249 bset $r9 1 // MULTI_EN 249 bset $r9 1 // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
264 mmctx_wait_free: 264 mmctx_wait_free:
265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL 265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
266 and $r14 0x1f 266 and $r14 0x1f
267 bra e mmctx_wait_free 267 bra e #mmctx_wait_free
268 268
269 // queue up an entry 269 // queue up an entry
270 ld b32 $r14 D[$r12] 270 ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
272 iowr I[$r8 + 0x300] $r14 272 iowr I[$r8 + 0x300] $r14
273 add b32 $r12 4 273 add b32 $r12 4
274 cmpu b32 $r12 $r13 274 cmpu b32 $r12 $r13
275 bra ne mmctx_exec_loop 275 bra ne #mmctx_exec_loop
276 276
277 xbit $r11 $r10 2 277 xbit $r11 $r10 2
278 bra ne mmctx_stop 278 bra ne #mmctx_stop
279 // wait for queue to empty 279 // wait for queue to empty
280 mmctx_fini_wait: 280 mmctx_fini_wait:
281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL 281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
282 and $r11 0x1f 282 and $r11 0x1f
283 cmpu b32 $r11 0x10 283 cmpu b32 $r11 0x10
284 bra ne mmctx_fini_wait 284 bra ne #mmctx_fini_wait
285 mov $r10 2 // DONE_MMCTX 285 mov $r10 2 // DONE_MMCTX
286 call wait_donez 286 call #wait_donez
287 bra mmctx_done 287 bra #mmctx_done
288 mmctx_stop: 288 mmctx_stop:
289 xbit $r11 $r10 0 289 xbit $r11 $r10 0
290 shl b32 $r11 16 // DIR 290 shl b32 $r11 16 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
295 // wait for STOP_TRIGGER to clear 295 // wait for STOP_TRIGGER to clear
296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL 296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
297 xbit $r11 $r11 18 297 xbit $r11 $r11 18
298 bra ne mmctx_stop_wait 298 bra ne #mmctx_stop_wait
299 mmctx_done: 299 mmctx_done:
300 trace_clr(T_MMCTX) 300 trace_clr(T_MMCTX)
301 ret 301 ret
@@ -305,7 +305,7 @@ mmctx_xfer:
305strand_wait: 305strand_wait:
306 push $r10 306 push $r10
307 mov $r10 2 307 mov $r10 2
308 call wait_donez 308 call #wait_donez
309 pop $r10 309 pop $r10
310 ret 310 ret
311 311
@@ -316,7 +316,7 @@ strand_pre:
316 sethi $r8 0x20000 316 sethi $r8 0x20000
317 mov $r9 0xc 317 mov $r9 0xc
318 iowr I[$r8] $r9 318 iowr I[$r8] $r9
319 call strand_wait 319 call #strand_wait
320 ret 320 ret
321 321
322// unknown - call after issuing strand commands 322// unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
326 sethi $r8 0x20000 326 sethi $r8 0x20000
327 mov $r9 0xd 327 mov $r9 0xd
328 iowr I[$r8] $r9 328 iowr I[$r8] $r9
329 call strand_wait 329 call #strand_wait
330 ret 330 ret
331 331
332// Selects strand set?! 332// Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf 341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
342 mov $r12 0xb 342 mov $r12 0xb
343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb 343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
344 call strand_wait 344 call #strand_wait
345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id> 345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
346 mov $r12 0xa 346 mov $r12 0xa
347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa 347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
348 call strand_wait 348 call #strand_wait
349 ret 349 ret
350 350
351// Initialise strand context data 351// Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
357// 357//
358strand_ctx_init: 358strand_ctx_init:
359 trace_set(T_STRINIT) 359 trace_set(T_STRINIT)
360 call strand_pre 360 call #strand_pre
361 mov $r14 3 361 mov $r14 3
362 call strand_set 362 call #strand_set
363 mov $r10 0x46fc 363 mov $r10 0x46fc
364 sethi $r10 0x20000 364 sethi $r10 0x20000
365 add b32 $r11 $r10 0x400 365 add b32 $r11 $r10 0x400
366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
367 mov $r12 1 367 mov $r12 1
368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE 368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
369 call strand_wait 369 call #strand_wait
370 sub b32 $r12 $r0 1 370 sub b32 $r12 $r0 1
371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff 371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
372 mov $r12 2 372 mov $r12 2
373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT 373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
374 call strand_wait 374 call #strand_wait
375 call strand_post 375 call #strand_post
376 376
377 // read the size of each strand, poke the context offset of 377 // read the size of each strand, poke the context offset of
378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry 378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
391 add b32 $r14 $r10 391 add b32 $r14 $r10
392 add b32 $r8 4 392 add b32 $r8 4
393 sub b32 $r9 1 393 sub b32 $r9 1
394 bra ne ctx_init_strand_loop 394 bra ne #ctx_init_strand_loop
395 395
396 shl b32 $r14 8 396 shl b32 $r14 8
397 sub b32 $r15 $r14 $r15 397 sub b32 $r15 $r14 $r15
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 636fe9812f7..91d44ea662d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
87 case 0xc1: 87 case 0xc1:
88 return 0x9197; 88 return 0x9197;
89 case 0xc8: 89 case 0xc8:
90 case 0xd9:
90 return 0x9297; 91 return 0x9297;
91 default: 92 default:
92 return 0; 93 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 96b0b93d94c..de77842b31c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
1268static void 1268static void
1269nvc0_grctx_generate_90c0(struct drm_device *dev) 1269nvc0_grctx_generate_90c0(struct drm_device *dev)
1270{ 1270{
1271 struct drm_nouveau_private *dev_priv = dev->dev_private;
1272 int i;
1273
1274 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1275 nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1276 nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1277 nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
1278 nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
1279 nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
1280 nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
1281 }
1271 nv_mthd(dev, 0x90c0, 0x270c, 0x00000000); 1282 nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
1272 nv_mthd(dev, 0x90c0, 0x272c, 0x00000000); 1283 nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
1273 nv_mthd(dev, 0x90c0, 0x274c, 0x00000000); 1284 nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
1276 nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000); 1287 nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
1277 nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000); 1288 nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
1278 nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000); 1289 nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
1290 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1291 nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1292 nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1293 nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
1294 nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
1295 }
1279 nv_mthd(dev, 0x90c0, 0x030c, 0x00000001); 1296 nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
1280 nv_mthd(dev, 0x90c0, 0x1944, 0x00000000); 1297 nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
1281 nv_mthd(dev, 0x90c0, 0x0758, 0x00000100); 1298 nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
1471{ 1488{
1472 struct drm_nouveau_private *dev_priv = dev->dev_private; 1489 struct drm_nouveau_private *dev_priv = dev->dev_private;
1473 1490
1474 if (dev_priv->chipset != 0xc1) { 1491 if (dev_priv->chipset == 0xd9) {
1475 nv_wr32(dev, 0x405800, 0x078000bf);
1476 nv_wr32(dev, 0x405830, 0x02180000);
1477 } else {
1478 nv_wr32(dev, 0x405800, 0x0f8000bf); 1492 nv_wr32(dev, 0x405800, 0x0f8000bf);
1479 nv_wr32(dev, 0x405830, 0x02180218); 1493 nv_wr32(dev, 0x405830, 0x02180218);
1494 nv_wr32(dev, 0x405834, 0x08000000);
1495 } else
1496 if (dev_priv->chipset == 0xc1) {
1497 nv_wr32(dev, 0x405800, 0x0f8000bf);
1498 nv_wr32(dev, 0x405830, 0x02180218);
1499 nv_wr32(dev, 0x405834, 0x00000000);
1500 } else {
1501 nv_wr32(dev, 0x405800, 0x078000bf);
1502 nv_wr32(dev, 0x405830, 0x02180000);
1503 nv_wr32(dev, 0x405834, 0x00000000);
1480 } 1504 }
1481 nv_wr32(dev, 0x405834, 0x00000000);
1482 nv_wr32(dev, 0x405838, 0x00000000); 1505 nv_wr32(dev, 0x405838, 0x00000000);
1483 nv_wr32(dev, 0x405854, 0x00000000); 1506 nv_wr32(dev, 0x405854, 0x00000000);
1484 nv_wr32(dev, 0x405870, 0x00000001); 1507 nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1509 nv_wr32(dev, 0x4064ac, 0x00003fff); 1532 nv_wr32(dev, 0x4064ac, 0x00003fff);
1510 nv_wr32(dev, 0x4064b4, 0x00000000); 1533 nv_wr32(dev, 0x4064b4, 0x00000000);
1511 nv_wr32(dev, 0x4064b8, 0x00000000); 1534 nv_wr32(dev, 0x4064b8, 0x00000000);
1512 if (dev_priv->chipset == 0xc1) { 1535 if (dev_priv->chipset == 0xd9)
1536 nv_wr32(dev, 0x4064bc, 0x00000000);
1537 if (dev_priv->chipset == 0xc1 ||
1538 dev_priv->chipset == 0xd9) {
1513 nv_wr32(dev, 0x4064c0, 0x80140078); 1539 nv_wr32(dev, 0x4064c0, 0x80140078);
1514 nv_wr32(dev, 0x4064c4, 0x0086ffff); 1540 nv_wr32(dev, 0x4064c4, 0x0086ffff);
1515 } 1541 }
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
1550 /* ROPC_BROADCAST */ 1576 /* ROPC_BROADCAST */
1551 nv_wr32(dev, 0x408800, 0x02802a3c); 1577 nv_wr32(dev, 0x408800, 0x02802a3c);
1552 nv_wr32(dev, 0x408804, 0x00000040); 1578 nv_wr32(dev, 0x408804, 0x00000040);
1553 nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005); 1579 if (chipset == 0xd9) {
1554 nv_wr32(dev, 0x408900, 0x3080b801); 1580 nv_wr32(dev, 0x408808, 0x1043e005);
1555 nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001); 1581 nv_wr32(dev, 0x408900, 0x3080b801);
1556 nv_wr32(dev, 0x408908, 0x00c80929); 1582 nv_wr32(dev, 0x408904, 0x1043e005);
1583 nv_wr32(dev, 0x408908, 0x00c8102f);
1584 } else
1585 if (chipset == 0xc1) {
1586 nv_wr32(dev, 0x408808, 0x1003e005);
1587 nv_wr32(dev, 0x408900, 0x3080b801);
1588 nv_wr32(dev, 0x408904, 0x62000001);
1589 nv_wr32(dev, 0x408908, 0x00c80929);
1590 } else {
1591 nv_wr32(dev, 0x408808, 0x0003e00d);
1592 nv_wr32(dev, 0x408900, 0x3080b801);
1593 nv_wr32(dev, 0x408904, 0x02000001);
1594 nv_wr32(dev, 0x408908, 0x00c80929);
1595 }
1557 nv_wr32(dev, 0x40890c, 0x00000000); 1596 nv_wr32(dev, 0x40890c, 0x00000000);
1558 nv_wr32(dev, 0x408980, 0x0000011d); 1597 nv_wr32(dev, 0x408980, 0x0000011d);
1559} 1598}
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1572 nv_wr32(dev, 0x418408, 0x00000000); 1611 nv_wr32(dev, 0x418408, 0x00000000);
1573 nv_wr32(dev, 0x41840c, 0x00001008); 1612 nv_wr32(dev, 0x41840c, 0x00001008);
1574 nv_wr32(dev, 0x418410, 0x0fff0fff); 1613 nv_wr32(dev, 0x418410, 0x0fff0fff);
1575 nv_wr32(dev, 0x418414, 0x00200fff); 1614 nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
1576 nv_wr32(dev, 0x418450, 0x00000000); 1615 nv_wr32(dev, 0x418450, 0x00000000);
1577 nv_wr32(dev, 0x418454, 0x00000000); 1616 nv_wr32(dev, 0x418454, 0x00000000);
1578 nv_wr32(dev, 0x418458, 0x00000000); 1617 nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1587 nv_wr32(dev, 0x418700, 0x00000002); 1626 nv_wr32(dev, 0x418700, 0x00000002);
1588 nv_wr32(dev, 0x418704, 0x00000080); 1627 nv_wr32(dev, 0x418704, 0x00000080);
1589 nv_wr32(dev, 0x418708, 0x00000000); 1628 nv_wr32(dev, 0x418708, 0x00000000);
1590 nv_wr32(dev, 0x41870c, 0x07c80000); 1629 nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
1591 nv_wr32(dev, 0x418710, 0x00000000); 1630 nv_wr32(dev, 0x418710, 0x00000000);
1592 nv_wr32(dev, 0x418800, 0x0006860a); 1631 nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
1593 nv_wr32(dev, 0x418808, 0x00000000); 1632 nv_wr32(dev, 0x418808, 0x00000000);
1594 nv_wr32(dev, 0x41880c, 0x00000000); 1633 nv_wr32(dev, 0x41880c, 0x00000000);
1595 nv_wr32(dev, 0x418810, 0x00000000); 1634 nv_wr32(dev, 0x418810, 0x00000000);
1596 nv_wr32(dev, 0x418828, 0x00008442); 1635 nv_wr32(dev, 0x418828, 0x00008442);
1597 nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001); 1636 if (chipset == 0xc1 || chipset == 0xd9)
1637 nv_wr32(dev, 0x418830, 0x10000001);
1638 else
1639 nv_wr32(dev, 0x418830, 0x00000001);
1598 nv_wr32(dev, 0x4188d8, 0x00000008); 1640 nv_wr32(dev, 0x4188d8, 0x00000008);
1599 nv_wr32(dev, 0x4188e0, 0x01000000); 1641 nv_wr32(dev, 0x4188e0, 0x01000000);
1600 nv_wr32(dev, 0x4188e8, 0x00000000); 1642 nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1602 nv_wr32(dev, 0x4188f0, 0x00000000); 1644 nv_wr32(dev, 0x4188f0, 0x00000000);
1603 nv_wr32(dev, 0x4188f4, 0x00000000); 1645 nv_wr32(dev, 0x4188f4, 0x00000000);
1604 nv_wr32(dev, 0x4188f8, 0x00000000); 1646 nv_wr32(dev, 0x4188f8, 0x00000000);
1605 nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018); 1647 if (chipset == 0xd9)
1648 nv_wr32(dev, 0x4188fc, 0x20100008);
1649 else if (chipset == 0xc1)
1650 nv_wr32(dev, 0x4188fc, 0x00100018);
1651 else
1652 nv_wr32(dev, 0x4188fc, 0x00100000);
1606 nv_wr32(dev, 0x41891c, 0x00ff00ff); 1653 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1607 nv_wr32(dev, 0x418924, 0x00000000); 1654 nv_wr32(dev, 0x418924, 0x00000000);
1608 nv_wr32(dev, 0x418928, 0x00ffff00); 1655 nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1616 nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000); 1663 nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
1617 nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000); 1664 nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
1618 } 1665 }
1619 nv_wr32(dev, 0x418b00, 0x00000000); 1666 nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
1620 nv_wr32(dev, 0x418b08, 0x0a418820); 1667 nv_wr32(dev, 0x418b08, 0x0a418820);
1621 nv_wr32(dev, 0x418b0c, 0x062080e6); 1668 nv_wr32(dev, 0x418b0c, 0x062080e6);
1622 nv_wr32(dev, 0x418b10, 0x020398a4); 1669 nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1633 nv_wr32(dev, 0x418c24, 0x00000000); 1680 nv_wr32(dev, 0x418c24, 0x00000000);
1634 nv_wr32(dev, 0x418c28, 0x00000000); 1681 nv_wr32(dev, 0x418c28, 0x00000000);
1635 nv_wr32(dev, 0x418c2c, 0x00000000); 1682 nv_wr32(dev, 0x418c2c, 0x00000000);
1636 if (chipset == 0xc1) 1683 if (chipset == 0xc1 || chipset == 0xd9)
1637 nv_wr32(dev, 0x418c6c, 0x00000001); 1684 nv_wr32(dev, 0x418c6c, 0x00000001);
1638 nv_wr32(dev, 0x418c80, 0x20200004); 1685 nv_wr32(dev, 0x418c80, 0x20200004);
1639 nv_wr32(dev, 0x418c8c, 0x00000001); 1686 nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1653 nv_wr32(dev, 0x419818, 0x00000000); 1700 nv_wr32(dev, 0x419818, 0x00000000);
1654 nv_wr32(dev, 0x41983c, 0x00038bc7); 1701 nv_wr32(dev, 0x41983c, 0x00038bc7);
1655 nv_wr32(dev, 0x419848, 0x00000000); 1702 nv_wr32(dev, 0x419848, 0x00000000);
1656 nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129); 1703 if (chipset == 0xc1 || chipset == 0xd9)
1704 nv_wr32(dev, 0x419864, 0x00000129);
1705 else
1706 nv_wr32(dev, 0x419864, 0x0000012a);
1657 nv_wr32(dev, 0x419888, 0x00000000); 1707 nv_wr32(dev, 0x419888, 0x00000000);
1658 nv_wr32(dev, 0x419a00, 0x000001f0); 1708 nv_wr32(dev, 0x419a00, 0x000001f0);
1659 nv_wr32(dev, 0x419a04, 0x00000001); 1709 nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1663 nv_wr32(dev, 0x419a14, 0x00000200); 1713 nv_wr32(dev, 0x419a14, 0x00000200);
1664 nv_wr32(dev, 0x419a1c, 0x00000000); 1714 nv_wr32(dev, 0x419a1c, 0x00000000);
1665 nv_wr32(dev, 0x419a20, 0x00000800); 1715 nv_wr32(dev, 0x419a20, 0x00000800);
1666 if (chipset != 0xc0 && chipset != 0xc8) 1716 if (chipset == 0xd9)
1717 nv_wr32(dev, 0x00419ac4, 0x0017f440);
1718 else if (chipset != 0xc0 && chipset != 0xc8)
1667 nv_wr32(dev, 0x00419ac4, 0x0007f440); 1719 nv_wr32(dev, 0x00419ac4, 0x0007f440);
1668 nv_wr32(dev, 0x419b00, 0x0a418820); 1720 nv_wr32(dev, 0x419b00, 0x0a418820);
1669 nv_wr32(dev, 0x419b04, 0x062080e6); 1721 nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1672 nv_wr32(dev, 0x419b10, 0x0a418820); 1724 nv_wr32(dev, 0x419b10, 0x0a418820);
1673 nv_wr32(dev, 0x419b14, 0x000000e6); 1725 nv_wr32(dev, 0x419b14, 0x000000e6);
1674 nv_wr32(dev, 0x419bd0, 0x00900103); 1726 nv_wr32(dev, 0x419bd0, 0x00900103);
1675 nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001); 1727 if (chipset == 0xc1 || chipset == 0xd9)
1728 nv_wr32(dev, 0x419be0, 0x00400001);
1729 else
1730 nv_wr32(dev, 0x419be0, 0x00000001);
1676 nv_wr32(dev, 0x419be4, 0x00000000); 1731 nv_wr32(dev, 0x419be4, 0x00000000);
1677 nv_wr32(dev, 0x419c00, 0x00000002); 1732 nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
1678 nv_wr32(dev, 0x419c04, 0x00000006); 1733 nv_wr32(dev, 0x419c04, 0x00000006);
1679 nv_wr32(dev, 0x419c08, 0x00000002); 1734 nv_wr32(dev, 0x419c08, 0x00000002);
1680 nv_wr32(dev, 0x419c20, 0x00000000); 1735 nv_wr32(dev, 0x419c20, 0x00000000);
1681 if (chipset == 0xce || chipset == 0xcf) 1736 if (dev_priv->chipset == 0xd9) {
1737 nv_wr32(dev, 0x419c24, 0x00084210);
1738 nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
1682 nv_wr32(dev, 0x419cb0, 0x00020048); 1739 nv_wr32(dev, 0x419cb0, 0x00020048);
1683 else 1740 } else
1741 if (chipset == 0xce || chipset == 0xcf) {
1742 nv_wr32(dev, 0x419cb0, 0x00020048);
1743 } else {
1684 nv_wr32(dev, 0x419cb0, 0x00060048); 1744 nv_wr32(dev, 0x419cb0, 0x00060048);
1745 }
1685 nv_wr32(dev, 0x419ce8, 0x00000000); 1746 nv_wr32(dev, 0x419ce8, 0x00000000);
1686 nv_wr32(dev, 0x419cf4, 0x00000183); 1747 nv_wr32(dev, 0x419cf4, 0x00000183);
1687 nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); 1748 if (chipset == 0xc1 || chipset == 0xd9)
1749 nv_wr32(dev, 0x419d20, 0x12180000);
1750 else
1751 nv_wr32(dev, 0x419d20, 0x02180000);
1688 nv_wr32(dev, 0x419d24, 0x00001fff); 1752 nv_wr32(dev, 0x419d24, 0x00001fff);
1689 if (chipset == 0xc1) 1753 if (chipset == 0xc1 || chipset == 0xd9)
1690 nv_wr32(dev, 0x419d44, 0x02180218); 1754 nv_wr32(dev, 0x419d44, 0x02180218);
1691 nv_wr32(dev, 0x419e04, 0x00000000); 1755 nv_wr32(dev, 0x419e04, 0x00000000);
1692 nv_wr32(dev, 0x419e08, 0x00000000); 1756 nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1986 nv_icmd(dev, 0x00000215, 0x00000040); 2050 nv_icmd(dev, 0x00000215, 0x00000040);
1987 nv_icmd(dev, 0x00000216, 0x00000040); 2051 nv_icmd(dev, 0x00000216, 0x00000040);
1988 nv_icmd(dev, 0x00000217, 0x00000040); 2052 nv_icmd(dev, 0x00000217, 0x00000040);
2053 if (dev_priv->chipset == 0xd9) {
2054 for (i = 0x0400; i <= 0x0417; i++)
2055 nv_icmd(dev, i, 0x00000040);
2056 }
1989 nv_icmd(dev, 0x00000218, 0x0000c080); 2057 nv_icmd(dev, 0x00000218, 0x0000c080);
1990 nv_icmd(dev, 0x00000219, 0x0000c080); 2058 nv_icmd(dev, 0x00000219, 0x0000c080);
1991 nv_icmd(dev, 0x0000021a, 0x0000c080); 2059 nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1994 nv_icmd(dev, 0x0000021d, 0x0000c080); 2062 nv_icmd(dev, 0x0000021d, 0x0000c080);
1995 nv_icmd(dev, 0x0000021e, 0x0000c080); 2063 nv_icmd(dev, 0x0000021e, 0x0000c080);
1996 nv_icmd(dev, 0x0000021f, 0x0000c080); 2064 nv_icmd(dev, 0x0000021f, 0x0000c080);
2065 if (dev_priv->chipset == 0xd9) {
2066 for (i = 0x0440; i <= 0x0457; i++)
2067 nv_icmd(dev, i, 0x0000c080);
2068 }
1997 nv_icmd(dev, 0x000000ad, 0x0000013e); 2069 nv_icmd(dev, 0x000000ad, 0x0000013e);
1998 nv_icmd(dev, 0x000000e1, 0x00000010); 2070 nv_icmd(dev, 0x000000e1, 0x00000010);
1999 nv_icmd(dev, 0x00000290, 0x00000000); 2071 nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2556 nv_icmd(dev, 0x0000053f, 0xffff0000); 2628 nv_icmd(dev, 0x0000053f, 0xffff0000);
2557 nv_icmd(dev, 0x00000585, 0x0000003f); 2629 nv_icmd(dev, 0x00000585, 0x0000003f);
2558 nv_icmd(dev, 0x00000576, 0x00000003); 2630 nv_icmd(dev, 0x00000576, 0x00000003);
2559 if (dev_priv->chipset == 0xc1) 2631 if (dev_priv->chipset == 0xc1 ||
2632 dev_priv->chipset == 0xd9)
2560 nv_icmd(dev, 0x0000057b, 0x00000059); 2633 nv_icmd(dev, 0x0000057b, 0x00000059);
2561 nv_icmd(dev, 0x00000586, 0x00000040); 2634 nv_icmd(dev, 0x00000586, 0x00000040);
2562 nv_icmd(dev, 0x00000582, 0x00000080); 2635 nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2658 nv_icmd(dev, 0x00000957, 0x00000003); 2731 nv_icmd(dev, 0x00000957, 0x00000003);
2659 nv_icmd(dev, 0x0000095e, 0x20164010); 2732 nv_icmd(dev, 0x0000095e, 0x20164010);
2660 nv_icmd(dev, 0x0000095f, 0x00000020); 2733 nv_icmd(dev, 0x0000095f, 0x00000020);
2734 if (dev_priv->chipset == 0xd9)
2735 nv_icmd(dev, 0x0000097d, 0x00000020);
2661 nv_icmd(dev, 0x00000683, 0x00000006); 2736 nv_icmd(dev, 0x00000683, 0x00000006);
2662 nv_icmd(dev, 0x00000685, 0x003fffff); 2737 nv_icmd(dev, 0x00000685, 0x003fffff);
2663 nv_icmd(dev, 0x00000687, 0x00000c48); 2738 nv_icmd(dev, 0x00000687, 0x00000c48);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 06f5e26d1e0..15272be33b6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -32,7 +32,7 @@
32 * - watchdog timer around ctx operations 32 * - watchdog timer around ctx operations
33 */ 33 */
34 34
35.section nvc0_grgpc_data 35.section #nvc0_grgpc_data
36include(`nvc0_graph.fuc') 36include(`nvc0_graph.fuc')
37gpc_id: .b32 0 37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0 38gpc_mmio_list_head: .b32 0
@@ -48,40 +48,45 @@ cmd_queue: queue_init
48// chipset descriptions 48// chipset descriptions
49chipsets: 49chipsets:
50.b8 0xc0 0 0 0 50.b8 0xc0 0 0 0
51.b16 nvc0_gpc_mmio_head 51.b16 #nvc0_gpc_mmio_head
52.b16 nvc0_gpc_mmio_tail 52.b16 #nvc0_gpc_mmio_tail
53.b16 nvc0_tpc_mmio_head 53.b16 #nvc0_tpc_mmio_head
54.b16 nvc0_tpc_mmio_tail 54.b16 #nvc0_tpc_mmio_tail
55.b8 0xc1 0 0 0 55.b8 0xc1 0 0 0
56.b16 nvc0_gpc_mmio_head 56.b16 #nvc0_gpc_mmio_head
57.b16 nvc1_gpc_mmio_tail 57.b16 #nvc1_gpc_mmio_tail
58.b16 nvc0_tpc_mmio_head 58.b16 #nvc0_tpc_mmio_head
59.b16 nvc1_tpc_mmio_tail 59.b16 #nvc1_tpc_mmio_tail
60.b8 0xc3 0 0 0 60.b8 0xc3 0 0 0
61.b16 nvc0_gpc_mmio_head 61.b16 #nvc0_gpc_mmio_head
62.b16 nvc0_gpc_mmio_tail 62.b16 #nvc0_gpc_mmio_tail
63.b16 nvc0_tpc_mmio_head 63.b16 #nvc0_tpc_mmio_head
64.b16 nvc3_tpc_mmio_tail 64.b16 #nvc3_tpc_mmio_tail
65.b8 0xc4 0 0 0 65.b8 0xc4 0 0 0
66.b16 nvc0_gpc_mmio_head 66.b16 #nvc0_gpc_mmio_head
67.b16 nvc0_gpc_mmio_tail 67.b16 #nvc0_gpc_mmio_tail
68.b16 nvc0_tpc_mmio_head 68.b16 #nvc0_tpc_mmio_head
69.b16 nvc3_tpc_mmio_tail 69.b16 #nvc3_tpc_mmio_tail
70.b8 0xc8 0 0 0 70.b8 0xc8 0 0 0
71.b16 nvc0_gpc_mmio_head 71.b16 #nvc0_gpc_mmio_head
72.b16 nvc0_gpc_mmio_tail 72.b16 #nvc0_gpc_mmio_tail
73.b16 nvc0_tpc_mmio_head 73.b16 #nvc0_tpc_mmio_head
74.b16 nvc0_tpc_mmio_tail 74.b16 #nvc0_tpc_mmio_tail
75.b8 0xce 0 0 0 75.b8 0xce 0 0 0
76.b16 nvc0_gpc_mmio_head 76.b16 #nvc0_gpc_mmio_head
77.b16 nvc0_gpc_mmio_tail 77.b16 #nvc0_gpc_mmio_tail
78.b16 nvc0_tpc_mmio_head 78.b16 #nvc0_tpc_mmio_head
79.b16 nvc3_tpc_mmio_tail 79.b16 #nvc3_tpc_mmio_tail
80.b8 0xcf 0 0 0 80.b8 0xcf 0 0 0
81.b16 nvc0_gpc_mmio_head 81.b16 #nvc0_gpc_mmio_head
82.b16 nvc0_gpc_mmio_tail 82.b16 #nvc0_gpc_mmio_tail
83.b16 nvc0_tpc_mmio_head 83.b16 #nvc0_tpc_mmio_head
84.b16 nvcf_tpc_mmio_tail 84.b16 #nvcf_tpc_mmio_tail
85.b8 0xd9 0 0 0
86.b16 #nvd9_gpc_mmio_head
87.b16 #nvd9_gpc_mmio_tail
88.b16 #nvd9_tpc_mmio_head
89.b16 #nvd9_tpc_mmio_tail
85.b8 0 0 0 0 90.b8 0 0 0 0
86 91
87// GPC mmio lists 92// GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
114mmctx_data(0x000c6c, 1); 119mmctx_data(0x000c6c, 1);
115nvc1_gpc_mmio_tail: 120nvc1_gpc_mmio_tail:
116 121
122nvd9_gpc_mmio_head:
123mmctx_data(0x000380, 1)
124mmctx_data(0x000400, 2)
125mmctx_data(0x00040c, 3)
126mmctx_data(0x000450, 9)
127mmctx_data(0x000600, 1)
128mmctx_data(0x000684, 1)
129mmctx_data(0x000700, 5)
130mmctx_data(0x000800, 1)
131mmctx_data(0x000808, 3)
132mmctx_data(0x000828, 1)
133mmctx_data(0x000830, 1)
134mmctx_data(0x0008d8, 1)
135mmctx_data(0x0008e0, 1)
136mmctx_data(0x0008e8, 6)
137mmctx_data(0x00091c, 1)
138mmctx_data(0x000924, 3)
139mmctx_data(0x000b00, 1)
140mmctx_data(0x000b08, 6)
141mmctx_data(0x000bb8, 1)
142mmctx_data(0x000c08, 1)
143mmctx_data(0x000c10, 8)
144mmctx_data(0x000c6c, 1)
145mmctx_data(0x000c80, 1)
146mmctx_data(0x000c8c, 1)
147mmctx_data(0x001000, 3)
148mmctx_data(0x001014, 1)
149nvd9_gpc_mmio_tail:
150
117// TPC mmio lists 151// TPC mmio lists
118nvc0_tpc_mmio_head: 152nvc0_tpc_mmio_head:
119mmctx_data(0x000018, 1) 153mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
146mmctx_data(0x000544, 1) 180mmctx_data(0x000544, 1)
147nvc1_tpc_mmio_tail: 181nvc1_tpc_mmio_tail:
148 182
183nvd9_tpc_mmio_head:
184mmctx_data(0x000018, 1)
185mmctx_data(0x00003c, 1)
186mmctx_data(0x000048, 1)
187mmctx_data(0x000064, 1)
188mmctx_data(0x000088, 1)
189mmctx_data(0x000200, 6)
190mmctx_data(0x00021c, 2)
191mmctx_data(0x0002c4, 1)
192mmctx_data(0x000300, 6)
193mmctx_data(0x0003d0, 1)
194mmctx_data(0x0003e0, 2)
195mmctx_data(0x000400, 3)
196mmctx_data(0x000420, 3)
197mmctx_data(0x0004b0, 1)
198mmctx_data(0x0004e8, 1)
199mmctx_data(0x0004f4, 1)
200mmctx_data(0x000520, 2)
201mmctx_data(0x000544, 1)
202mmctx_data(0x000604, 4)
203mmctx_data(0x000644, 20)
204mmctx_data(0x000698, 1)
205mmctx_data(0x0006e0, 1)
206mmctx_data(0x000750, 3)
207nvd9_tpc_mmio_tail:
149 208
150.section nvc0_grgpc_code 209.section #nvc0_grgpc_code
151bra init 210bra #init
152define(`include_code') 211define(`include_code')
153include(`nvc0_graph.fuc') 212include(`nvc0_graph.fuc')
154 213
@@ -160,10 +219,10 @@ error:
160 push $r14 219 push $r14
161 mov $r14 -0x67ec // 0x9814 220 mov $r14 -0x67ec // 0x9814
162 sethi $r14 0x400000 221 sethi $r14 0x400000
163 call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code 222 call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
164 add b32 $r14 0x41c 223 add b32 $r14 0x41c
165 mov $r15 1 224 mov $r15 1
166 call nv_wr32 // HUB_CTXCTL_INTR_UP_SET 225 call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
167 pop $r14 226 pop $r14
168 ret 227 ret
169 228
@@ -190,7 +249,7 @@ init:
190 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE 249 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
191 250
192 // setup i0 handler, and route all interrupts to it 251 // setup i0 handler, and route all interrupts to it
193 mov $r1 ih 252 mov $r1 #ih
194 mov $iv0 $r1 253 mov $iv0 $r1
195 mov $r1 0x400 254 mov $r1 0x400
196 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH 255 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
210 and $r2 0x1f 269 and $r2 0x1f
211 shl b32 $r3 $r2 270 shl b32 $r3 $r2
212 sub b32 $r3 1 271 sub b32 $r3 1
213 st b32 D[$r0 + tpc_count] $r2 272 st b32 D[$r0 + #tpc_count] $r2
214 st b32 D[$r0 + tpc_mask] $r3 273 st b32 D[$r0 + #tpc_mask] $r3
215 add b32 $r1 0x400 274 add b32 $r1 0x400
216 iord $r2 I[$r1 + 0x000] // MYINDEX 275 iord $r2 I[$r1 + 0x000] // MYINDEX
217 st b32 D[$r0 + gpc_id] $r2 276 st b32 D[$r0 + #gpc_id] $r2
218 277
219 // find context data for this chipset 278 // find context data for this chipset
220 mov $r2 0x800 279 mov $r2 0x800
221 shl b32 $r2 6 280 shl b32 $r2 6
222 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] 281 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
223 mov $r1 chipsets - 12 282 mov $r1 #chipsets - 12
224 init_find_chipset: 283 init_find_chipset:
225 add b32 $r1 12 284 add b32 $r1 12
226 ld b32 $r3 D[$r1 + 0x00] 285 ld b32 $r3 D[$r1 + 0x00]
227 cmpu b32 $r3 $r2 286 cmpu b32 $r3 $r2
228 bra e init_context 287 bra e #init_context
229 cmpu b32 $r3 0 288 cmpu b32 $r3 0
230 bra ne init_find_chipset 289 bra ne #init_find_chipset
231 // unknown chipset 290 // unknown chipset
232 ret 291 ret
233 292
@@ -253,19 +312,19 @@ init:
253 clear b32 $r15 312 clear b32 $r15
254 ld b16 $r14 D[$r1 + 4] 313 ld b16 $r14 D[$r1 + 4]
255 ld b16 $r15 D[$r1 + 6] 314 ld b16 $r15 D[$r1 + 6]
256 st b16 D[$r0 + gpc_mmio_list_head] $r14 315 st b16 D[$r0 + #gpc_mmio_list_head] $r14
257 st b16 D[$r0 + gpc_mmio_list_tail] $r15 316 st b16 D[$r0 + #gpc_mmio_list_tail] $r15
258 call mmctx_size 317 call #mmctx_size
259 add b32 $r2 $r15 318 add b32 $r2 $r15
260 add b32 $r3 $r15 319 add b32 $r3 $r15
261 320
262 // calculate per-TPC mmio context size, store the list pointers 321 // calculate per-TPC mmio context size, store the list pointers
263 ld b16 $r14 D[$r1 + 8] 322 ld b16 $r14 D[$r1 + 8]
264 ld b16 $r15 D[$r1 + 10] 323 ld b16 $r15 D[$r1 + 10]
265 st b16 D[$r0 + tpc_mmio_list_head] $r14 324 st b16 D[$r0 + #tpc_mmio_list_head] $r14
266 st b16 D[$r0 + tpc_mmio_list_tail] $r15 325 st b16 D[$r0 + #tpc_mmio_list_tail] $r15
267 call mmctx_size 326 call #mmctx_size
268 ld b32 $r14 D[$r0 + tpc_count] 327 ld b32 $r14 D[$r0 + #tpc_count]
269 mulu $r14 $r15 328 mulu $r14 $r15
270 add b32 $r2 $r14 329 add b32 $r2 $r14
271 add b32 $r3 $r14 330 add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
283 342
284 // calculate size of strand context data 343 // calculate size of strand context data
285 mov b32 $r15 $r2 344 mov b32 $r15 $r2
286 call strand_ctx_init 345 call #strand_ctx_init
287 add b32 $r3 $r15 346 add b32 $r3 $r15
288 347
289 // save context size, and tell HUB we're done 348 // save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
301main: 360main:
302 bset $flags $p0 361 bset $flags $p0
303 sleep $p0 362 sleep $p0
304 mov $r13 cmd_queue 363 mov $r13 #cmd_queue
305 call queue_get 364 call #queue_get
306 bra $p1 main 365 bra $p1 #main
307 366
308 // 0x0000-0x0003 are all context transfers 367 // 0x0000-0x0003 are all context transfers
309 cmpu b32 $r14 0x04 368 cmpu b32 $r14 0x04
310 bra nc main_not_ctx_xfer 369 bra nc #main_not_ctx_xfer
311 // fetch $flags and mask off $p1/$p2 370 // fetch $flags and mask off $p1/$p2
312 mov $r1 $flags 371 mov $r1 $flags
313 mov $r2 0x0006 372 mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
318 or $r1 $r14 377 or $r1 $r14
319 mov $flags $r1 378 mov $flags $r1
320 // transfer context data 379 // transfer context data
321 call ctx_xfer 380 call #ctx_xfer
322 bra main 381 bra #main
323 382
324 main_not_ctx_xfer: 383 main_not_ctx_xfer:
325 shl b32 $r15 $r14 16 384 shl b32 $r15 $r14 16
326 or $r15 E_BAD_COMMAND 385 or $r15 E_BAD_COMMAND
327 call error 386 call #error
328 bra main 387 bra #main
329 388
330// interrupt handler 389// interrupt handler
331ih: 390ih:
@@ -342,13 +401,13 @@ ih:
342 // incoming fifo command? 401 // incoming fifo command?
343 iord $r10 I[$r0 + 0x200] // INTR 402 iord $r10 I[$r0 + 0x200] // INTR
344 and $r11 $r10 0x00000004 403 and $r11 $r10 0x00000004
345 bra e ih_no_fifo 404 bra e #ih_no_fifo
346 // queue incoming fifo command for later processing 405 // queue incoming fifo command for later processing
347 mov $r11 0x1900 406 mov $r11 0x1900
348 mov $r13 cmd_queue 407 mov $r13 #cmd_queue
349 iord $r14 I[$r11 + 0x100] // FIFO_CMD 408 iord $r14 I[$r11 + 0x100] // FIFO_CMD
350 iord $r15 I[$r11 + 0x000] // FIFO_DATA 409 iord $r15 I[$r11 + 0x000] // FIFO_DATA
351 call queue_put 410 call #queue_put
352 add b32 $r11 0x400 411 add b32 $r11 0x400
353 mov $r14 1 412 mov $r14 1
354 iowr I[$r11 + 0x000] $r14 // FIFO_ACK 413 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
374// 433//
375hub_barrier_done: 434hub_barrier_done:
376 mov $r15 1 435 mov $r15 1
377 ld b32 $r14 D[$r0 + gpc_id] 436 ld b32 $r14 D[$r0 + #gpc_id]
378 shl b32 $r15 $r14 437 shl b32 $r15 $r14
379 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET 438 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
380 sethi $r14 0x400000 439 sethi $r14 0x400000
381 call nv_wr32 440 call #nv_wr32
382 ret 441 ret
383 442
384// Disables various things, waits a bit, and re-enables them.. 443// Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
395 mov $r15 8 454 mov $r15 8
396 ctx_redswitch_delay: 455 ctx_redswitch_delay:
397 sub b32 $r15 1 456 sub b32 $r15 1
398 bra ne ctx_redswitch_delay 457 bra ne #ctx_redswitch_delay
399 mov $r15 0xa20 458 mov $r15 0xa20
400 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER 459 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
401 ret 460 ret
@@ -413,8 +472,8 @@ ctx_xfer:
413 mov $r1 0xa04 472 mov $r1 0xa04
414 shl b32 $r1 6 473 shl b32 $r1 6
415 iowr I[$r1 + 0x000] $r15// MEM_BASE 474 iowr I[$r1 + 0x000] $r15// MEM_BASE
416 bra not $p1 ctx_xfer_not_load 475 bra not $p1 #ctx_xfer_not_load
417 call ctx_redswitch 476 call #ctx_redswitch
418 ctx_xfer_not_load: 477 ctx_xfer_not_load:
419 478
420 // strands 479 // strands
@@ -422,7 +481,7 @@ ctx_xfer:
422 sethi $r1 0x20000 481 sethi $r1 0x20000
423 mov $r2 0xc 482 mov $r2 0xc
424 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c 483 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
425 call strand_wait 484 call #strand_wait
426 mov $r2 0x47fc 485 mov $r2 0x47fc
427 sethi $r2 0x20000 486 sethi $r2 0x20000
428 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 487 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
435 or $r10 2 // first 494 or $r10 2 // first
436 mov $r11 0x0000 495 mov $r11 0x0000
437 sethi $r11 0x500000 496 sethi $r11 0x500000
438 ld b32 $r12 D[$r0 + gpc_id] 497 ld b32 $r12 D[$r0 + #gpc_id]
439 shl b32 $r12 15 498 shl b32 $r12 15
440 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn 499 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
441 ld b32 $r12 D[$r0 + gpc_mmio_list_head] 500 ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
442 ld b32 $r13 D[$r0 + gpc_mmio_list_tail] 501 ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
443 mov $r14 0 // not multi 502 mov $r14 0 // not multi
444 call mmctx_xfer 503 call #mmctx_xfer
445 504
446 // per-TPC mmio context 505 // per-TPC mmio context
447 xbit $r10 $flags $p1 // direction 506 xbit $r10 $flags $p1 // direction
448 or $r10 4 // last 507 or $r10 4 // last
449 mov $r11 0x4000 508 mov $r11 0x4000
450 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 509 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
451 ld b32 $r12 D[$r0 + gpc_id] 510 ld b32 $r12 D[$r0 + #gpc_id]
452 shl b32 $r12 15 511 shl b32 $r12 15
453 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 512 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
454 ld b32 $r12 D[$r0 + tpc_mmio_list_head] 513 ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
455 ld b32 $r13 D[$r0 + tpc_mmio_list_tail] 514 ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
456 ld b32 $r15 D[$r0 + tpc_mask] 515 ld b32 $r15 D[$r0 + #tpc_mask]
457 mov $r14 0x800 // stride = 0x800 516 mov $r14 0x800 // stride = 0x800
458 call mmctx_xfer 517 call #mmctx_xfer
459 518
460 // wait for strands to finish 519 // wait for strands to finish
461 call strand_wait 520 call #strand_wait
462 521
463 // if load, or a save without a load following, do some 522 // if load, or a save without a load following, do some
464 // unknown stuff that's done after finishing a block of 523 // unknown stuff that's done after finishing a block of
465 // strand commands 524 // strand commands
466 bra $p1 ctx_xfer_post 525 bra $p1 #ctx_xfer_post
467 bra not $p2 ctx_xfer_done 526 bra not $p2 #ctx_xfer_done
468 ctx_xfer_post: 527 ctx_xfer_post:
469 mov $r1 0x4afc 528 mov $r1 0x4afc
470 sethi $r1 0x20000 529 sethi $r1 0x20000
471 mov $r2 0xd 530 mov $r2 0xd
472 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d 531 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
473 call strand_wait 532 call #strand_wait
474 533
475 // mark completion in HUB's barrier 534 // mark completion in HUB's barrier
476 ctx_xfer_done: 535 ctx_xfer_done:
477 call hub_barrier_done 536 call #hub_barrier_done
478 ret 537 ret
479 538
480.align 256 539.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 6f820324480..a988b8ad00a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
25 0x00000000, 25 0x00000000,
26 0x00000000, 26 0x00000000,
27 0x000000c0, 27 0x000000c0,
28 0x011c00bc, 28 0x012800c8,
29 0x01700120, 29 0x01e40194,
30 0x000000c1, 30 0x000000c1,
31 0x012000bc, 31 0x012c00c8,
32 0x01840120, 32 0x01f80194,
33 0x000000c3, 33 0x000000c3,
34 0x011c00bc, 34 0x012800c8,
35 0x01800120, 35 0x01f40194,
36 0x000000c4, 36 0x000000c4,
37 0x011c00bc, 37 0x012800c8,
38 0x01800120, 38 0x01f40194,
39 0x000000c8, 39 0x000000c8,
40 0x011c00bc, 40 0x012800c8,
41 0x01700120, 41 0x01e40194,
42 0x000000ce, 42 0x000000ce,
43 0x011c00bc, 43 0x012800c8,
44 0x01800120, 44 0x01f40194,
45 0x000000cf, 45 0x000000cf,
46 0x011c00bc, 46 0x012800c8,
47 0x017c0120, 47 0x01f00194,
48 0x000000d9,
49 0x0194012c,
50 0x025401f8,
48 0x00000000, 51 0x00000000,
49 0x00000380, 52 0x00000380,
50 0x14000400, 53 0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
71 0x08001000, 74 0x08001000,
72 0x00001014, 75 0x00001014,
73 0x00000c6c, 76 0x00000c6c,
77 0x00000380,
78 0x04000400,
79 0x0800040c,
80 0x20000450,
81 0x00000600,
82 0x00000684,
83 0x10000700,
84 0x00000800,
85 0x08000808,
86 0x00000828,
87 0x00000830,
88 0x000008d8,
89 0x000008e0,
90 0x140008e8,
91 0x0000091c,
92 0x08000924,
93 0x00000b00,
94 0x14000b08,
95 0x00000bb8,
96 0x00000c08,
97 0x1c000c10,
98 0x00000c6c,
99 0x00000c80,
100 0x00000c8c,
101 0x08001000,
102 0x00001014,
74 0x00000018, 103 0x00000018,
75 0x0000003c, 104 0x0000003c,
76 0x00000048, 105 0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
96 0x000006e0, 125 0x000006e0,
97 0x000004bc, 126 0x000004bc,
98 0x00000544, 127 0x00000544,
128 0x00000018,
129 0x0000003c,
130 0x00000048,
131 0x00000064,
132 0x00000088,
133 0x14000200,
134 0x0400021c,
135 0x000002c4,
136 0x14000300,
137 0x000003d0,
138 0x040003e0,
139 0x08000400,
140 0x08000420,
141 0x000004b0,
142 0x000004e8,
143 0x000004f4,
144 0x04000520,
145 0x00000544,
146 0x0c000604,
147 0x4c000644,
148 0x00000698,
149 0x000006e0,
150 0x08000750,
99}; 151};
100 152
101uint32_t nvc0_grgpc_code[] = { 153uint32_t nvc0_grgpc_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index e4f8c7e89dd..98acddb2c5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -27,7 +27,7 @@
27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h 27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
28 */ 28 */
29 29
30.section nvc0_grhub_data 30.section #nvc0_grhub_data
31include(`nvc0_graph.fuc') 31include(`nvc0_graph.fuc')
32gpc_count: .b32 0 32gpc_count: .b32 0
33rop_count: .b32 0 33rop_count: .b32 0
@@ -39,26 +39,29 @@ ctx_current: .b32 0
39 39
40chipsets: 40chipsets:
41.b8 0xc0 0 0 0 41.b8 0xc0 0 0 0
42.b16 nvc0_hub_mmio_head 42.b16 #nvc0_hub_mmio_head
43.b16 nvc0_hub_mmio_tail 43.b16 #nvc0_hub_mmio_tail
44.b8 0xc1 0 0 0 44.b8 0xc1 0 0 0
45.b16 nvc0_hub_mmio_head 45.b16 #nvc0_hub_mmio_head
46.b16 nvc1_hub_mmio_tail 46.b16 #nvc1_hub_mmio_tail
47.b8 0xc3 0 0 0 47.b8 0xc3 0 0 0
48.b16 nvc0_hub_mmio_head 48.b16 #nvc0_hub_mmio_head
49.b16 nvc0_hub_mmio_tail 49.b16 #nvc0_hub_mmio_tail
50.b8 0xc4 0 0 0 50.b8 0xc4 0 0 0
51.b16 nvc0_hub_mmio_head 51.b16 #nvc0_hub_mmio_head
52.b16 nvc0_hub_mmio_tail 52.b16 #nvc0_hub_mmio_tail
53.b8 0xc8 0 0 0 53.b8 0xc8 0 0 0
54.b16 nvc0_hub_mmio_head 54.b16 #nvc0_hub_mmio_head
55.b16 nvc0_hub_mmio_tail 55.b16 #nvc0_hub_mmio_tail
56.b8 0xce 0 0 0 56.b8 0xce 0 0 0
57.b16 nvc0_hub_mmio_head 57.b16 #nvc0_hub_mmio_head
58.b16 nvc0_hub_mmio_tail 58.b16 #nvc0_hub_mmio_tail
59.b8 0xcf 0 0 0 59.b8 0xcf 0 0 0
60.b16 nvc0_hub_mmio_head 60.b16 #nvc0_hub_mmio_head
61.b16 nvc0_hub_mmio_tail 61.b16 #nvc0_hub_mmio_tail
62.b8 0xd9 0 0 0
63.b16 #nvd9_hub_mmio_head
64.b16 #nvd9_hub_mmio_tail
62.b8 0 0 0 0 65.b8 0 0 0 0
63 66
64nvc0_hub_mmio_head: 67nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
105mmctx_data(0x4064c0, 2) 108mmctx_data(0x4064c0, 2)
106nvc1_hub_mmio_tail: 109nvc1_hub_mmio_tail:
107 110
111nvd9_hub_mmio_head:
112mmctx_data(0x17e91c, 2)
113mmctx_data(0x400204, 2)
114mmctx_data(0x404004, 10)
115mmctx_data(0x404044, 1)
116mmctx_data(0x404094, 14)
117mmctx_data(0x4040d0, 7)
118mmctx_data(0x4040f8, 1)
119mmctx_data(0x404130, 3)
120mmctx_data(0x404150, 3)
121mmctx_data(0x404164, 2)
122mmctx_data(0x404178, 2)
123mmctx_data(0x404200, 8)
124mmctx_data(0x404404, 14)
125mmctx_data(0x404460, 4)
126mmctx_data(0x404480, 1)
127mmctx_data(0x404498, 1)
128mmctx_data(0x404604, 4)
129mmctx_data(0x404618, 32)
130mmctx_data(0x404698, 21)
131mmctx_data(0x4046f0, 2)
132mmctx_data(0x404700, 22)
133mmctx_data(0x405800, 1)
134mmctx_data(0x405830, 3)
135mmctx_data(0x405854, 1)
136mmctx_data(0x405870, 4)
137mmctx_data(0x405a00, 2)
138mmctx_data(0x405a18, 1)
139mmctx_data(0x406020, 1)
140mmctx_data(0x406028, 4)
141mmctx_data(0x4064a8, 2)
142mmctx_data(0x4064b4, 5)
143mmctx_data(0x407804, 1)
144mmctx_data(0x40780c, 6)
145mmctx_data(0x4078bc, 1)
146mmctx_data(0x408000, 7)
147mmctx_data(0x408064, 1)
148mmctx_data(0x408800, 3)
149mmctx_data(0x408900, 4)
150mmctx_data(0x408980, 1)
151nvd9_hub_mmio_tail:
152
108.align 256 153.align 256
109chan_data: 154chan_data:
110chan_mmio_count: .b32 0 155chan_mmio_count: .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address: .b32 0
113.align 256 158.align 256
114xfer_data: .b32 0 159xfer_data: .b32 0
115 160
116.section nvc0_grhub_code 161.section #nvc0_grhub_code
117bra init 162bra #init
118define(`include_code') 163define(`include_code')
119include(`nvc0_graph.fuc') 164include(`nvc0_graph.fuc')
120 165
@@ -157,7 +202,7 @@ init:
157 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE 202 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
158 203
159 // setup i0 handler, and route all interrupts to it 204 // setup i0 handler, and route all interrupts to it
160 mov $r1 ih 205 mov $r1 #ih
161 mov $iv0 $r1 206 mov $iv0 $r1
162 mov $r1 0x400 207 mov $r1 0x400
163 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH 208 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
201 // fetch enabled GPC/ROP counts 246 // fetch enabled GPC/ROP counts
202 mov $r14 -0x69fc // 0x409604 247 mov $r14 -0x69fc // 0x409604
203 sethi $r14 0x400000 248 sethi $r14 0x400000
204 call nv_rd32 249 call #nv_rd32
205 extr $r1 $r15 16:20 250 extr $r1 $r15 16:20
206 st b32 D[$r0 + rop_count] $r1 251 st b32 D[$r0 + #rop_count] $r1
207 and $r15 0x1f 252 and $r15 0x1f
208 st b32 D[$r0 + gpc_count] $r15 253 st b32 D[$r0 + #gpc_count] $r15
209 254
210 // set BAR_REQMASK to GPC mask 255 // set BAR_REQMASK to GPC mask
211 mov $r1 1 256 mov $r1 1
@@ -220,14 +265,14 @@ init:
220 mov $r2 0x800 265 mov $r2 0x800
221 shl b32 $r2 6 266 shl b32 $r2 6
222 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] 267 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
223 mov $r15 chipsets - 8 268 mov $r15 #chipsets - 8
224 init_find_chipset: 269 init_find_chipset:
225 add b32 $r15 8 270 add b32 $r15 8
226 ld b32 $r3 D[$r15 + 0x00] 271 ld b32 $r3 D[$r15 + 0x00]
227 cmpu b32 $r3 $r2 272 cmpu b32 $r3 $r2
228 bra e init_context 273 bra e #init_context
229 cmpu b32 $r3 0 274 cmpu b32 $r3 0
230 bra ne init_find_chipset 275 bra ne #init_find_chipset
231 // unknown chipset 276 // unknown chipset
232 ret 277 ret
233 278
@@ -239,9 +284,9 @@ init:
239 ld b16 $r14 D[$r15 + 4] 284 ld b16 $r14 D[$r15 + 4]
240 ld b16 $r15 D[$r15 + 6] 285 ld b16 $r15 D[$r15 + 6]
241 sethi $r14 0 286 sethi $r14 0
242 st b32 D[$r0 + hub_mmio_list_head] $r14 287 st b32 D[$r0 + #hub_mmio_list_head] $r14
243 st b32 D[$r0 + hub_mmio_list_tail] $r15 288 st b32 D[$r0 + #hub_mmio_list_tail] $r15
244 call mmctx_size 289 call #mmctx_size
245 290
246 // set mmctx base addresses now so we don't have to do it later, 291 // set mmctx base addresses now so we don't have to do it later,
247 // they don't (currently) ever change 292 // they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
260 add b32 $r1 1 305 add b32 $r1 1
261 shl b32 $r1 8 306 shl b32 $r1 8
262 mov b32 $r15 $r1 307 mov b32 $r15 $r1
263 call strand_ctx_init 308 call #strand_ctx_init
264 add b32 $r1 $r15 309 add b32 $r1 $r15
265 310
266 // initialise each GPC in sequence by passing in the offset of its 311 // initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
271 // when it has completed, and return the size of its context data 316 // when it has completed, and return the size of its context data
272 // in GPCn_CC_SCRATCH[1] 317 // in GPCn_CC_SCRATCH[1]
273 // 318 //
274 ld b32 $r3 D[$r0 + gpc_count] 319 ld b32 $r3 D[$r0 + #gpc_count]
275 mov $r4 0x2000 320 mov $r4 0x2000
276 sethi $r4 0x500000 321 sethi $r4 0x500000
277 init_gpc: 322 init_gpc:
278 // setup, and start GPC ucode running 323 // setup, and start GPC ucode running
279 add b32 $r14 $r4 0x804 324 add b32 $r14 $r4 0x804
280 mov b32 $r15 $r1 325 mov b32 $r15 $r1
281 call nv_wr32 // CC_SCRATCH[1] = ctx offset 326 call #nv_wr32 // CC_SCRATCH[1] = ctx offset
282 add b32 $r14 $r4 0x800 327 add b32 $r14 $r4 0x800
283 mov b32 $r15 $r2 328 mov b32 $r15 $r2
284 call nv_wr32 // CC_SCRATCH[0] = chipset 329 call #nv_wr32 // CC_SCRATCH[0] = chipset
285 add b32 $r14 $r4 0x10c 330 add b32 $r14 $r4 0x10c
286 clear b32 $r15 331 clear b32 $r15
287 call nv_wr32 332 call #nv_wr32
288 add b32 $r14 $r4 0x104 333 add b32 $r14 $r4 0x104
289 call nv_wr32 // ENTRY 334 call #nv_wr32 // ENTRY
290 add b32 $r14 $r4 0x100 335 add b32 $r14 $r4 0x100
291 mov $r15 2 // CTRL_START_TRIGGER 336 mov $r15 2 // CTRL_START_TRIGGER
292 call nv_wr32 // CTRL 337 call #nv_wr32 // CTRL
293 338
294 // wait for it to complete, and adjust context size 339 // wait for it to complete, and adjust context size
295 add b32 $r14 $r4 0x800 340 add b32 $r14 $r4 0x800
296 init_gpc_wait: 341 init_gpc_wait:
297 call nv_rd32 342 call #nv_rd32
298 xbit $r15 $r15 31 343 xbit $r15 $r15 31
299 bra e init_gpc_wait 344 bra e #init_gpc_wait
300 add b32 $r14 $r4 0x804 345 add b32 $r14 $r4 0x804
301 call nv_rd32 346 call #nv_rd32
302 add b32 $r1 $r15 347 add b32 $r1 $r15
303 348
304 // next! 349 // next!
305 add b32 $r4 0x8000 350 add b32 $r4 0x8000
306 sub b32 $r3 1 351 sub b32 $r3 1
307 bra ne init_gpc 352 bra ne #init_gpc
308 353
309 // save context size, and tell host we're ready 354 // save context size, and tell host we're ready
310 mov $r2 0x800 355 mov $r2 0x800
@@ -322,13 +367,13 @@ main:
322 // sleep until we have something to do 367 // sleep until we have something to do
323 bset $flags $p0 368 bset $flags $p0
324 sleep $p0 369 sleep $p0
325 mov $r13 cmd_queue 370 mov $r13 #cmd_queue
326 call queue_get 371 call #queue_get
327 bra $p1 main 372 bra $p1 #main
328 373
329 // context switch, requested by GPU? 374 // context switch, requested by GPU?
330 cmpu b32 $r14 0x4001 375 cmpu b32 $r14 0x4001
331 bra ne main_not_ctx_switch 376 bra ne #main_not_ctx_switch
332 trace_set(T_AUTO) 377 trace_set(T_AUTO)
333 mov $r1 0xb00 378 mov $r1 0xb00
334 shl b32 $r1 6 379 shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
336 iord $r1 I[$r1 + 0x000] // CHAN_CUR 381 iord $r1 I[$r1 + 0x000] // CHAN_CUR
337 382
338 xbit $r3 $r1 31 383 xbit $r3 $r1 31
339 bra e chsw_no_prev 384 bra e #chsw_no_prev
340 xbit $r3 $r2 31 385 xbit $r3 $r2 31
341 bra e chsw_prev_no_next 386 bra e #chsw_prev_no_next
342 push $r2 387 push $r2
343 mov b32 $r2 $r1 388 mov b32 $r2 $r1
344 trace_set(T_SAVE) 389 trace_set(T_SAVE)
345 bclr $flags $p1 390 bclr $flags $p1
346 bset $flags $p2 391 bset $flags $p2
347 call ctx_xfer 392 call #ctx_xfer
348 trace_clr(T_SAVE); 393 trace_clr(T_SAVE);
349 pop $r2 394 pop $r2
350 trace_set(T_LOAD); 395 trace_set(T_LOAD);
351 bset $flags $p1 396 bset $flags $p1
352 call ctx_xfer 397 call #ctx_xfer
353 trace_clr(T_LOAD); 398 trace_clr(T_LOAD);
354 bra chsw_done 399 bra #chsw_done
355 chsw_prev_no_next: 400 chsw_prev_no_next:
356 push $r2 401 push $r2
357 mov b32 $r2 $r1 402 mov b32 $r2 $r1
358 bclr $flags $p1 403 bclr $flags $p1
359 bclr $flags $p2 404 bclr $flags $p2
360 call ctx_xfer 405 call #ctx_xfer
361 pop $r2 406 pop $r2
362 mov $r1 0xb00 407 mov $r1 0xb00
363 shl b32 $r1 6 408 shl b32 $r1 6
364 iowr I[$r1] $r2 409 iowr I[$r1] $r2
365 bra chsw_done 410 bra #chsw_done
366 chsw_no_prev: 411 chsw_no_prev:
367 xbit $r3 $r2 31 412 xbit $r3 $r2 31
368 bra e chsw_done 413 bra e #chsw_done
369 bset $flags $p1 414 bset $flags $p1
370 bclr $flags $p2 415 bclr $flags $p2
371 call ctx_xfer 416 call #ctx_xfer
372 417
373 // ack the context switch request 418 // ack the context switch request
374 chsw_done: 419 chsw_done:
@@ -377,32 +422,32 @@ main:
377 mov $r2 1 422 mov $r2 1
378 iowr I[$r1 + 0x000] $r2 // 0x409b0c 423 iowr I[$r1 + 0x000] $r2 // 0x409b0c
379 trace_clr(T_AUTO) 424 trace_clr(T_AUTO)
380 bra main 425 bra #main
381 426
382 // request to set current channel? (*not* a context switch) 427 // request to set current channel? (*not* a context switch)
383 main_not_ctx_switch: 428 main_not_ctx_switch:
384 cmpu b32 $r14 0x0001 429 cmpu b32 $r14 0x0001
385 bra ne main_not_ctx_chan 430 bra ne #main_not_ctx_chan
386 mov b32 $r2 $r15 431 mov b32 $r2 $r15
387 call ctx_chan 432 call #ctx_chan
388 bra main_done 433 bra #main_done
389 434
390 // request to store current channel context? 435 // request to store current channel context?
391 main_not_ctx_chan: 436 main_not_ctx_chan:
392 cmpu b32 $r14 0x0002 437 cmpu b32 $r14 0x0002
393 bra ne main_not_ctx_save 438 bra ne #main_not_ctx_save
394 trace_set(T_SAVE) 439 trace_set(T_SAVE)
395 bclr $flags $p1 440 bclr $flags $p1
396 bclr $flags $p2 441 bclr $flags $p2
397 call ctx_xfer 442 call #ctx_xfer
398 trace_clr(T_SAVE) 443 trace_clr(T_SAVE)
399 bra main_done 444 bra #main_done
400 445
401 main_not_ctx_save: 446 main_not_ctx_save:
402 shl b32 $r15 $r14 16 447 shl b32 $r15 $r14 16
403 or $r15 E_BAD_COMMAND 448 or $r15 E_BAD_COMMAND
404 call error 449 call #error
405 bra main 450 bra #main
406 451
407 main_done: 452 main_done:
408 mov $r1 0x820 453 mov $r1 0x820
@@ -410,7 +455,7 @@ main:
410 clear b32 $r2 455 clear b32 $r2
411 bset $r2 31 456 bset $r2 31
412 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 457 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
413 bra main 458 bra #main
414 459
415// interrupt handler 460// interrupt handler
416ih: 461ih:
@@ -427,13 +472,13 @@ ih:
427 // incoming fifo command? 472 // incoming fifo command?
428 iord $r10 I[$r0 + 0x200] // INTR 473 iord $r10 I[$r0 + 0x200] // INTR
429 and $r11 $r10 0x00000004 474 and $r11 $r10 0x00000004
430 bra e ih_no_fifo 475 bra e #ih_no_fifo
431 // queue incoming fifo command for later processing 476 // queue incoming fifo command for later processing
432 mov $r11 0x1900 477 mov $r11 0x1900
433 mov $r13 cmd_queue 478 mov $r13 #cmd_queue
434 iord $r14 I[$r11 + 0x100] // FIFO_CMD 479 iord $r14 I[$r11 + 0x100] // FIFO_CMD
435 iord $r15 I[$r11 + 0x000] // FIFO_DATA 480 iord $r15 I[$r11 + 0x000] // FIFO_DATA
436 call queue_put 481 call #queue_put
437 add b32 $r11 0x400 482 add b32 $r11 0x400
438 mov $r14 1 483 mov $r14 1
439 iowr I[$r11 + 0x000] $r14 // FIFO_ACK 484 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
441 // context switch request? 486 // context switch request?
442 ih_no_fifo: 487 ih_no_fifo:
443 and $r11 $r10 0x00000100 488 and $r11 $r10 0x00000100
444 bra e ih_no_ctxsw 489 bra e #ih_no_ctxsw
445 // enqueue a context switch for later processing 490 // enqueue a context switch for later processing
446 mov $r13 cmd_queue 491 mov $r13 #cmd_queue
447 mov $r14 0x4001 492 mov $r14 0x4001
448 call queue_put 493 call #queue_put
449 494
450 // anything we didn't handle, bring it to the host's attention 495 // anything we didn't handle, bring it to the host's attention
451 ih_no_ctxsw: 496 ih_no_ctxsw:
452 mov $r11 0x104 497 mov $r11 0x104
453 not b32 $r11 498 not b32 $r11
454 and $r11 $r10 $r11 499 and $r11 $r10 $r11
455 bra e ih_no_other 500 bra e #ih_no_other
456 mov $r10 0xc1c 501 mov $r10 0xc1c
457 shl b32 $r10 6 502 shl b32 $r10 6
458 iowr I[$r10] $r11 // INTR_UP_SET 503 iowr I[$r10] $r11 // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
478 mov $r14 0x4160 523 mov $r14 0x4160
479 sethi $r14 0x400000 524 sethi $r14 0x400000
480 mov $r15 1 525 mov $r15 1
481 call nv_wr32 526 call #nv_wr32
482 ctx_4160s_wait: 527 ctx_4160s_wait:
483 call nv_rd32 528 call #nv_rd32
484 xbit $r15 $r15 4 529 xbit $r15 $r15 4
485 bra e ctx_4160s_wait 530 bra e #ctx_4160s_wait
486 ret 531 ret
487 532
488// Without clearing again at end of xfer, some things cause PGRAPH 533// Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
492 mov $r14 0x4160 537 mov $r14 0x4160
493 sethi $r14 0x400000 538 sethi $r14 0x400000
494 clear b32 $r15 539 clear b32 $r15
495 call nv_wr32 540 call #nv_wr32
496 ret 541 ret
497 542
498// Again, not real sure 543// Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
503 mov $r14 0x4170 548 mov $r14 0x4170
504 sethi $r14 0x400000 549 sethi $r14 0x400000
505 or $r15 0x10 550 or $r15 0x10
506 call nv_wr32 551 call #nv_wr32
507 ret 552 ret
508 553
509// Waits for a ctx_4170s() call to complete 554// Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
511ctx_4170w: 556ctx_4170w:
512 mov $r14 0x4170 557 mov $r14 0x4170
513 sethi $r14 0x400000 558 sethi $r14 0x400000
514 call nv_rd32 559 call #nv_rd32
515 and $r15 0x10 560 and $r15 0x10
516 bra ne ctx_4170w 561 bra ne #ctx_4170w
517 ret 562 ret
518 563
519// Disables various things, waits a bit, and re-enables them.. 564// Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
530 mov $r15 8 575 mov $r15 8
531 ctx_redswitch_delay: 576 ctx_redswitch_delay:
532 sub b32 $r15 1 577 sub b32 $r15 1
533 bra ne ctx_redswitch_delay 578 bra ne #ctx_redswitch_delay
534 mov $r15 0x770 579 mov $r15 0x770
535 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL 580 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
536 ret 581 ret
@@ -546,10 +591,10 @@ ctx_86c:
546 iowr I[$r14] $r15 // HUB(0x86c) = val 591 iowr I[$r14] $r15 // HUB(0x86c) = val
547 mov $r14 -0x75ec 592 mov $r14 -0x75ec
548 sethi $r14 0x400000 593 sethi $r14 0x400000
549 call nv_wr32 // ROP(0xa14) = val 594 call #nv_wr32 // ROP(0xa14) = val
550 mov $r14 -0x5794 595 mov $r14 -0x5794
551 sethi $r14 0x410000 596 sethi $r14 0x410000
552 call nv_wr32 // GPC(0x86c) = val 597 call #nv_wr32 // GPC(0x86c) = val
553 ret 598 ret
554 599
555// ctx_load - load's a channel's ctxctl data, and selects its vm 600// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
561 606
562 // switch to channel, somewhat magic in parts.. 607 // switch to channel, somewhat magic in parts..
563 mov $r10 12 // DONE_UNK12 608 mov $r10 12 // DONE_UNK12
564 call wait_donez 609 call #wait_donez
565 mov $r1 0xa24 610 mov $r1 0xa24
566 shl b32 $r1 6 611 shl b32 $r1 6
567 iowr I[$r1 + 0x000] $r0 // 0x409a24 612 iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
576 ctx_chan_wait_0: 621 ctx_chan_wait_0:
577 iord $r4 I[$r1 + 0x100] 622 iord $r4 I[$r1 + 0x100]
578 and $r4 0x1f 623 and $r4 0x1f
579 bra ne ctx_chan_wait_0 624 bra ne #ctx_chan_wait_0
580 iowr I[$r3 + 0x000] $r2 // CHAN_CUR 625 iowr I[$r3 + 0x000] $r2 // CHAN_CUR
581 626
582 // load channel header, fetch PGRAPH context pointer 627 // load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
595 sethi $r2 0x80000000 640 sethi $r2 0x80000000
596 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram 641 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
597 mov $r1 0x10 // chan + 0x0210 642 mov $r1 0x10 // chan + 0x0210
598 mov $r2 xfer_data 643 mov $r2 #xfer_data
599 sethi $r2 0x00020000 // 16 bytes 644 sethi $r2 0x00020000 // 16 bytes
600 xdld $r1 $r2 645 xdld $r1 $r2
601 xdwait 646 xdwait
602 trace_clr(T_LCHAN) 647 trace_clr(T_LCHAN)
603 648
604 // update current context 649 // update current context
605 ld b32 $r1 D[$r0 + xfer_data + 4] 650 ld b32 $r1 D[$r0 + #xfer_data + 4]
606 shl b32 $r1 24 651 shl b32 $r1 24
607 ld b32 $r2 D[$r0 + xfer_data + 0] 652 ld b32 $r2 D[$r0 + #xfer_data + 0]
608 shr b32 $r2 8 653 shr b32 $r2 8
609 or $r1 $r2 654 or $r1 $r2
610 st b32 D[$r0 + ctx_current] $r1 655 st b32 D[$r0 + #ctx_current] $r1
611 656
612 // set transfer base to start of context, and fetch context header 657 // set transfer base to start of context, and fetch context header
613 trace_set(T_LCTXH) 658 trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
618 mov $r1 0xa20 663 mov $r1 0xa20
619 shl b32 $r1 6 664 shl b32 $r1 6
620 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm 665 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
621 mov $r1 chan_data 666 mov $r1 #chan_data
622 sethi $r1 0x00060000 // 256 bytes 667 sethi $r1 0x00060000 // 256 bytes
623 xdld $r0 $r1 668 xdld $r0 $r1
624 xdwait 669 xdwait
@@ -635,10 +680,10 @@ ctx_load:
635// In: $r2 channel address 680// In: $r2 channel address
636// 681//
637ctx_chan: 682ctx_chan:
638 call ctx_4160s 683 call #ctx_4160s
639 call ctx_load 684 call #ctx_load
640 mov $r10 12 // DONE_UNK12 685 mov $r10 12 // DONE_UNK12
641 call wait_donez 686 call #wait_donez
642 mov $r1 0xa10 687 mov $r1 0xa10
643 shl b32 $r1 6 688 shl b32 $r1 6
644 mov $r2 5 689 mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
646 ctx_chan_wait: 691 ctx_chan_wait:
647 iord $r2 I[$r1 + 0x000] 692 iord $r2 I[$r1 + 0x000]
648 or $r2 $r2 693 or $r2 $r2
649 bra ne ctx_chan_wait 694 bra ne #ctx_chan_wait
650 call ctx_4160c 695 call #ctx_4160c
651 ret 696 ret
652 697
653// Execute per-context state overrides list 698// Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
661// 706//
662ctx_mmio_exec: 707ctx_mmio_exec:
663 // set transfer base to be the mmio list 708 // set transfer base to be the mmio list
664 ld b32 $r3 D[$r0 + chan_mmio_address] 709 ld b32 $r3 D[$r0 + #chan_mmio_address]
665 mov $r2 0xa04 710 mov $r2 0xa04
666 shl b32 $r2 6 711 shl b32 $r2 6
667 iowr I[$r2 + 0x000] $r3 // MEM_BASE 712 iowr I[$r2 + 0x000] $r3 // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
670 ctx_mmio_loop: 715 ctx_mmio_loop:
671 // fetch next 256 bytes of mmio list if necessary 716 // fetch next 256 bytes of mmio list if necessary
672 and $r4 $r3 0xff 717 and $r4 $r3 0xff
673 bra ne ctx_mmio_pull 718 bra ne #ctx_mmio_pull
674 mov $r5 xfer_data 719 mov $r5 #xfer_data
675 sethi $r5 0x00060000 // 256 bytes 720 sethi $r5 0x00060000 // 256 bytes
676 xdld $r3 $r5 721 xdld $r3 $r5
677 xdwait 722 xdwait
678 723
679 // execute a single list entry 724 // execute a single list entry
680 ctx_mmio_pull: 725 ctx_mmio_pull:
681 ld b32 $r14 D[$r4 + xfer_data + 0x00] 726 ld b32 $r14 D[$r4 + #xfer_data + 0x00]
682 ld b32 $r15 D[$r4 + xfer_data + 0x04] 727 ld b32 $r15 D[$r4 + #xfer_data + 0x04]
683 call nv_wr32 728 call #nv_wr32
684 729
685 // next! 730 // next!
686 add b32 $r3 8 731 add b32 $r3 8
687 sub b32 $r1 1 732 sub b32 $r1 1
688 bra ne ctx_mmio_loop 733 bra ne #ctx_mmio_loop
689 734
690 // set transfer base back to the current context 735 // set transfer base back to the current context
691 ctx_mmio_done: 736 ctx_mmio_done:
692 ld b32 $r3 D[$r0 + ctx_current] 737 ld b32 $r3 D[$r0 + #ctx_current]
693 iowr I[$r2 + 0x000] $r3 // MEM_BASE 738 iowr I[$r2 + 0x000] $r3 // MEM_BASE
694 739
695 // disable the mmio list now, we don't need/want to execute it again 740 // disable the mmio list now, we don't need/want to execute it again
696 st b32 D[$r0 + chan_mmio_count] $r0 741 st b32 D[$r0 + #chan_mmio_count] $r0
697 mov $r1 chan_data 742 mov $r1 #chan_data
698 sethi $r1 0x00060000 // 256 bytes 743 sethi $r1 0x00060000 // 256 bytes
699 xdst $r0 $r1 744 xdst $r0 $r1
700 xdwait 745 xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
709// on load it means: "a save preceeded this load" 754// on load it means: "a save preceeded this load"
710// 755//
711ctx_xfer: 756ctx_xfer:
712 bra not $p1 ctx_xfer_pre 757 bra not $p1 #ctx_xfer_pre
713 bra $p2 ctx_xfer_pre_load 758 bra $p2 #ctx_xfer_pre_load
714 ctx_xfer_pre: 759 ctx_xfer_pre:
715 mov $r15 0x10 760 mov $r15 0x10
716 call ctx_86c 761 call #ctx_86c
717 call ctx_4160s 762 call #ctx_4160s
718 bra not $p1 ctx_xfer_exec 763 bra not $p1 #ctx_xfer_exec
719 764
720 ctx_xfer_pre_load: 765 ctx_xfer_pre_load:
721 mov $r15 2 766 mov $r15 2
722 call ctx_4170s 767 call #ctx_4170s
723 call ctx_4170w 768 call #ctx_4170w
724 call ctx_redswitch 769 call #ctx_redswitch
725 clear b32 $r15 770 clear b32 $r15
726 call ctx_4170s 771 call #ctx_4170s
727 call ctx_load 772 call #ctx_load
728 773
729 // fetch context pointer, and initiate xfer on all GPCs 774 // fetch context pointer, and initiate xfer on all GPCs
730 ctx_xfer_exec: 775 ctx_xfer_exec:
731 ld b32 $r1 D[$r0 + ctx_current] 776 ld b32 $r1 D[$r0 + #ctx_current]
732 mov $r2 0x414 777 mov $r2 0x414
733 shl b32 $r2 6 778 shl b32 $r2 6
734 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset 779 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
735 mov $r14 -0x5b00 780 mov $r14 -0x5b00
736 sethi $r14 0x410000 781 sethi $r14 0x410000
737 mov b32 $r15 $r1 782 mov b32 $r15 $r1
738 call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer 783 call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
739 add b32 $r14 4 784 add b32 $r14 4
740 xbit $r15 $flags $p1 785 xbit $r15 $flags $p1
741 xbit $r2 $flags $p2 786 xbit $r2 $flags $p2
742 shl b32 $r2 1 787 shl b32 $r2 1
743 or $r15 $r2 788 or $r15 $r2
744 call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) 789 call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
745 790
746 // strands 791 // strands
747 mov $r1 0x4afc 792 mov $r1 0x4afc
748 sethi $r1 0x20000 793 sethi $r1 0x20000
749 mov $r2 0xc 794 mov $r2 0xc
750 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c 795 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
751 call strand_wait 796 call #strand_wait
752 mov $r2 0x47fc 797 mov $r2 0x47fc
753 sethi $r2 0x20000 798 sethi $r2 0x20000
754 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 799 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
760 xbit $r10 $flags $p1 // direction 805 xbit $r10 $flags $p1 // direction
761 or $r10 6 // first, last 806 or $r10 6 // first, last
762 mov $r11 0 // base = 0 807 mov $r11 0 // base = 0
763 ld b32 $r12 D[$r0 + hub_mmio_list_head] 808 ld b32 $r12 D[$r0 + #hub_mmio_list_head]
764 ld b32 $r13 D[$r0 + hub_mmio_list_tail] 809 ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
765 mov $r14 0 // not multi 810 mov $r14 0 // not multi
766 call mmctx_xfer 811 call #mmctx_xfer
767 812
768 // wait for GPCs to all complete 813 // wait for GPCs to all complete
769 mov $r10 8 // DONE_BAR 814 mov $r10 8 // DONE_BAR
770 call wait_doneo 815 call #wait_doneo
771 816
772 // wait for strand xfer to complete 817 // wait for strand xfer to complete
773 call strand_wait 818 call #strand_wait
774 819
775 // post-op 820 // post-op
776 bra $p1 ctx_xfer_post 821 bra $p1 #ctx_xfer_post
777 mov $r10 12 // DONE_UNK12 822 mov $r10 12 // DONE_UNK12
778 call wait_donez 823 call #wait_donez
779 mov $r1 0xa10 824 mov $r1 0xa10
780 shl b32 $r1 6 825 shl b32 $r1 6
781 mov $r2 5 826 mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
783 ctx_xfer_post_save_wait: 828 ctx_xfer_post_save_wait:
784 iord $r2 I[$r1] 829 iord $r2 I[$r1]
785 or $r2 $r2 830 or $r2 $r2
786 bra ne ctx_xfer_post_save_wait 831 bra ne #ctx_xfer_post_save_wait
787 832
788 bra $p2 ctx_xfer_done 833 bra $p2 #ctx_xfer_done
789 ctx_xfer_post: 834 ctx_xfer_post:
790 mov $r15 2 835 mov $r15 2
791 call ctx_4170s 836 call #ctx_4170s
792 clear b32 $r15 837 clear b32 $r15
793 call ctx_86c 838 call #ctx_86c
794 call strand_post 839 call #strand_post
795 call ctx_4170w 840 call #ctx_4170w
796 clear b32 $r15 841 clear b32 $r15
797 call ctx_4170s 842 call #ctx_4170s
798 843
799 bra not $p1 ctx_xfer_no_post_mmio 844 bra not $p1 #ctx_xfer_no_post_mmio
800 ld b32 $r1 D[$r0 + chan_mmio_count] 845 ld b32 $r1 D[$r0 + #chan_mmio_count]
801 or $r1 $r1 846 or $r1 $r1
802 bra e ctx_xfer_no_post_mmio 847 bra e #ctx_xfer_no_post_mmio
803 call ctx_mmio_exec 848 call #ctx_mmio_exec
804 849
805 ctx_xfer_no_post_mmio: 850 ctx_xfer_no_post_mmio:
806 call ctx_4160c 851 call #ctx_4160c
807 852
808 ctx_xfer_done: 853 ctx_xfer_done:
809 ret 854 ret
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index 241d3263f1e..c5ed307abeb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
23 0x00000000, 23 0x00000000,
24 0x00000000, 24 0x00000000,
25 0x000000c0, 25 0x000000c0,
26 0x01340098, 26 0x013c00a0,
27 0x000000c1, 27 0x000000c1,
28 0x01380098, 28 0x014000a0,
29 0x000000c3, 29 0x000000c3,
30 0x01340098, 30 0x013c00a0,
31 0x000000c4, 31 0x000000c4,
32 0x01340098, 32 0x013c00a0,
33 0x000000c8, 33 0x000000c8,
34 0x01340098, 34 0x013c00a0,
35 0x000000ce, 35 0x000000ce,
36 0x01340098, 36 0x013c00a0,
37 0x000000cf, 37 0x000000cf,
38 0x01340098, 38 0x013c00a0,
39 0x000000d9,
40 0x01dc0140,
39 0x00000000, 41 0x00000000,
40 0x0417e91c, 42 0x0417e91c,
41 0x04400204, 43 0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
77 0x0c408900, 79 0x0c408900,
78 0x00408980, 80 0x00408980,
79 0x044064c0, 81 0x044064c0,
80 0x00000000, 82 0x0417e91c,
81 0x00000000, 83 0x04400204,
82 0x00000000, 84 0x24404004,
83 0x00000000, 85 0x00404044,
84 0x00000000, 86 0x34404094,
85 0x00000000, 87 0x184040d0,
86 0x00000000, 88 0x004040f8,
87 0x00000000, 89 0x08404130,
88 0x00000000, 90 0x08404150,
89 0x00000000, 91 0x04404164,
90 0x00000000, 92 0x04404178,
91 0x00000000, 93 0x1c404200,
92 0x00000000, 94 0x34404404,
93 0x00000000, 95 0x0c404460,
94 0x00000000, 96 0x00404480,
95 0x00000000, 97 0x00404498,
96 0x00000000, 98 0x0c404604,
97 0x00000000, 99 0x7c404618,
98 0x00000000, 100 0x50404698,
99 0x00000000, 101 0x044046f0,
100 0x00000000, 102 0x54404700,
101 0x00000000, 103 0x00405800,
102 0x00000000, 104 0x08405830,
103 0x00000000, 105 0x00405854,
104 0x00000000, 106 0x0c405870,
105 0x00000000, 107 0x04405a00,
106 0x00000000, 108 0x00405a18,
107 0x00000000, 109 0x00406020,
108 0x00000000, 110 0x0c406028,
109 0x00000000, 111 0x044064a8,
110 0x00000000, 112 0x104064b4,
111 0x00000000, 113 0x00407804,
112 0x00000000, 114 0x1440780c,
113 0x00000000, 115 0x004078bc,
114 0x00000000, 116 0x18408000,
115 0x00000000, 117 0x00408064,
116 0x00000000, 118 0x08408800,
117 0x00000000, 119 0x0c408900,
118 0x00000000, 120 0x00408980,
119 0x00000000,
120 0x00000000,
121 0x00000000, 121 0x00000000,
122 0x00000000, 122 0x00000000,
123 0x00000000, 123 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 929aded35cb..e9992f62c1c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
153 perflvl->vdec = read_clk(dev, 0x0e); 153 perflvl->vdec = read_clk(dev, 0x0e);
154 return 0; 154 return 0;
155} 155}
156
157struct nvc0_pm_clock {
158 u32 freq;
159 u32 ssel;
160 u32 mdiv;
161 u32 dsrc;
162 u32 ddiv;
163 u32 coef;
164};
165
166struct nvc0_pm_state {
167 struct nvc0_pm_clock eng[16];
168};
169
170static u32
171calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
172{
173 u32 div = min((ref * 2) / freq, (u32)65);
174 if (div < 2)
175 div = 2;
176
177 *ddiv = div - 2;
178 return (ref * 2) / div;
179}
180
181static u32
182calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
183{
184 u32 sclk;
185
186 /* use one of the fixed frequencies if possible */
187 *ddiv = 0x00000000;
188 switch (freq) {
189 case 27000:
190 case 108000:
191 *dsrc = 0x00000000;
192 if (freq == 108000)
193 *dsrc |= 0x00030000;
194 return freq;
195 case 100000:
196 *dsrc = 0x00000002;
197 return freq;
198 default:
199 *dsrc = 0x00000003;
200 break;
201 }
202
203 /* otherwise, calculate the closest divider */
204 sclk = read_vco(dev, clk);
205 if (clk < 7)
206 sclk = calc_div(dev, clk, sclk, freq, ddiv);
207 return sclk;
208}
209
210static u32
211calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
212{
213 struct pll_lims limits;
214 int N, M, P, ret;
215
216 ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
217 if (ret)
218 return 0;
219
220 limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
221 if (!limits.refclk)
222 return 0;
223
224 ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
225 if (ret <= 0)
226 return 0;
227
228 *coef = (P << 16) | (N << 8) | M;
229 return ret;
230}
231
232/* A (likely rather simplified and incomplete) view of the clock tree
233 *
234 * Key:
235 *
236 * S: source select
237 * D: divider
238 * P: pll
239 * F: switch
240 *
241 * Engine clocks:
242 *
243 * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
244 * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
245 *
246 * Not all registers exist for all clocks. For example: clocks >= 8 don't
247 * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
248 * they have the divider at 1371d0, though the source selection at 137160
249 * still exists. You must use the divider at 137250 for these instead.
250 *
251 * Memory clock:
252 *
253 * TBD, read_mem() above is likely very wrong...
254 *
255 */
256
257static int
258calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
259{
260 u32 src0, div0, div1D, div1P = 0;
261 u32 clk0, clk1 = 0;
262
263 /* invalid clock domain */
264 if (!freq)
265 return 0;
266
267 /* first possible path, using only dividers */
268 clk0 = calc_src(dev, clk, freq, &src0, &div0);
269 clk0 = calc_div(dev, clk, clk0, freq, &div1D);
270
271 /* see if we can get any closer using PLLs */
272 if (clk0 != freq) {
273 if (clk < 7)
274 clk1 = calc_pll(dev, clk, freq, &info->coef);
275 else
276 clk1 = read_pll(dev, 0x1370e0);
277 clk1 = calc_div(dev, clk, clk1, freq, &div1P);
278 }
279
280 /* select the method which gets closest to target freq */
281 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
282 info->dsrc = src0;
283 if (div0) {
284 info->ddiv |= 0x80000000;
285 info->ddiv |= div0 << 8;
286 info->ddiv |= div0;
287 }
288 if (div1D) {
289 info->mdiv |= 0x80000000;
290 info->mdiv |= div1D;
291 }
292 info->ssel = 0;
293 info->freq = clk0;
294 } else {
295 if (div1P) {
296 info->mdiv |= 0x80000000;
297 info->mdiv |= div1P << 8;
298 }
299 info->ssel = (1 << clk);
300 info->freq = clk1;
301 }
302
303 return 0;
304}
305
306void *
307nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
308{
309 struct drm_nouveau_private *dev_priv = dev->dev_private;
310 struct nvc0_pm_state *info;
311 int ret;
312
313 info = kzalloc(sizeof(*info), GFP_KERNEL);
314 if (!info)
315 return ERR_PTR(-ENOMEM);
316
317 /* NFI why this is still in the performance table, the ROPCs appear
318 * to get their clock from clock 2 ("hub07", actually hub05 on this
319 * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
320 * are always the same freq with the binary driver even when the
321 * performance table says they should differ.
322 */
323 if (dev_priv->chipset == 0xd9)
324 perflvl->rop = 0;
325
326 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
327 (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
328 (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
329 (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
330 (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
331 (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
332 (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
333 (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
334 kfree(info);
335 return ERR_PTR(ret);
336 }
337
338 return info;
339}
340
341static void
342prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
343{
344 /* program dividers at 137160/1371d0 first */
345 if (clk < 7 && !info->ssel) {
346 nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
347 nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
348 }
349
350 /* switch clock to non-pll mode */
351 nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
352 nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
353
354 /* reprogram pll */
355 if (clk < 7) {
356 /* make sure it's disabled first... */
357 u32 base = 0x137000 + (clk * 0x20);
358 u32 ctrl = nv_rd32(dev, base + 0x00);
359 if (ctrl & 0x00000001) {
360 nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
361 nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
362 }
363 /* program it to new values, if necessary */
364 if (info->ssel) {
365 nv_wr32(dev, base + 0x04, info->coef);
366 nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
367 nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
368 nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
369 }
370 }
371
372 /* select pll/non-pll mode, and program final clock divider */
373 nv_mask(dev, 0x137100, (1 << clk), info->ssel);
374 nv_wait(dev, 0x137100, (1 << clk), info->ssel);
375 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
376}
377
378int
379nvc0_pm_clocks_set(struct drm_device *dev, void *data)
380{
381 struct nvc0_pm_state *info = data;
382 int i;
383
384 for (i = 0; i < 16; i++) {
385 if (!info->eng[i].freq)
386 continue;
387 prog_clk(dev, i, &info->eng[i]);
388 }
389
390 kfree(info);
391 return 0;
392}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index cb006a718e7..d2ba2f07400 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -35,12 +35,34 @@
35#include "nouveau_fb.h" 35#include "nouveau_fb.h"
36#include "nv50_display.h" 36#include "nv50_display.h"
37 37
38#define EVO_DMA_NR 9
39
40#define EVO_MASTER (0x00)
41#define EVO_FLIP(c) (0x01 + (c))
42#define EVO_OVLY(c) (0x05 + (c))
43#define EVO_OIMM(c) (0x09 + (c))
44#define EVO_CURS(c) (0x0d + (c))
45
46/* offsets in shared sync bo of various structures */
47#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
48#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
49#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
50#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
51
52struct evo {
53 int idx;
54 dma_addr_t handle;
55 u32 *ptr;
56 struct {
57 u32 offset;
58 u16 value;
59 } sem;
60};
61
38struct nvd0_display { 62struct nvd0_display {
39 struct nouveau_gpuobj *mem; 63 struct nouveau_gpuobj *mem;
40 struct { 64 struct nouveau_bo *sync;
41 dma_addr_t handle; 65 struct evo evo[9];
42 u32 *ptr;
43 } evo[1];
44 66
45 struct tasklet_struct tasklet; 67 struct tasklet_struct tasklet;
46 u32 modeset; 68 u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
53 return dev_priv->engine.display.priv; 75 return dev_priv->engine.display.priv;
54} 76}
55 77
78static struct drm_crtc *
79nvd0_display_crtc_get(struct drm_encoder *encoder)
80{
81 return nouveau_encoder(encoder)->crtc;
82}
83
84/******************************************************************************
85 * EVO channel helpers
86 *****************************************************************************/
56static inline int 87static inline int
57evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) 88evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
58{ 89{
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
84 put = 0; 115 put = 0;
85 } 116 }
86 117
118 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
119 NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
120
87 return disp->evo[id].ptr + put; 121 return disp->evo[id].ptr + put;
88} 122}
89 123
@@ -91,40 +125,264 @@ static void
91evo_kick(u32 *push, struct drm_device *dev, int id) 125evo_kick(u32 *push, struct drm_device *dev, int id)
92{ 126{
93 struct nvd0_display *disp = nvd0_display(dev); 127 struct nvd0_display *disp = nvd0_display(dev);
128
129 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
130 u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
131 u32 *cur = disp->evo[id].ptr + curp;
132
133 while (cur < push)
134 NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
135 NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
136 }
137
94 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2); 138 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
95} 139}
96 140
97#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) 141#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
98#define evo_data(p,d) *((p)++) = (d) 142#define evo_data(p,d) *((p)++) = (d)
99 143
100static struct drm_crtc * 144static int
101nvd0_display_crtc_get(struct drm_encoder *encoder) 145evo_init_dma(struct drm_device *dev, int ch)
102{ 146{
103 return nouveau_encoder(encoder)->crtc; 147 struct nvd0_display *disp = nvd0_display(dev);
148 u32 flags;
149
150 flags = 0x00000000;
151 if (ch == EVO_MASTER)
152 flags |= 0x01000000;
153
154 nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
155 nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
156 nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
157 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
158 nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
159 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
160 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
161 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
162 nv_rd32(dev, 0x610490 + (ch * 0x0010)));
163 return -EBUSY;
164 }
165
166 nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
167 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
168 return 0;
169}
170
171static void
172evo_fini_dma(struct drm_device *dev, int ch)
173{
174 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
175 return;
176
177 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
178 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
179 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
180 nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
181 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
182}
183
184static inline void
185evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
186{
187 nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
188}
189
190static int
191evo_init_pio(struct drm_device *dev, int ch)
192{
193 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
194 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
195 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
196 nv_rd32(dev, 0x610490 + (ch * 0x0010)));
197 return -EBUSY;
198 }
199
200 nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
201 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
202 return 0;
203}
204
205static void
206evo_fini_pio(struct drm_device *dev, int ch)
207{
208 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
209 return;
210
211 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
212 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
213 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
214 nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
215 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
216}
217
218static bool
219evo_sync_wait(void *data)
220{
221 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
222}
223
224static int
225evo_sync(struct drm_device *dev, int ch)
226{
227 struct nvd0_display *disp = nvd0_display(dev);
228 u32 *push = evo_wait(dev, ch, 8);
229 if (push) {
230 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
231 evo_mthd(push, 0x0084, 1);
232 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
233 evo_mthd(push, 0x0080, 2);
234 evo_data(push, 0x00000000);
235 evo_data(push, 0x00000000);
236 evo_kick(push, dev, ch);
237 if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
238 return 0;
239 }
240
241 return -EBUSY;
242}
243
244/******************************************************************************
245 * Page flipping channel
246 *****************************************************************************/
247struct nouveau_bo *
248nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
249{
250 return nvd0_display(dev)->sync;
251}
252
253void
254nvd0_display_flip_stop(struct drm_crtc *crtc)
255{
256 struct nvd0_display *disp = nvd0_display(crtc->dev);
257 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
258 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
259 u32 *push;
260
261 push = evo_wait(crtc->dev, evo->idx, 8);
262 if (push) {
263 evo_mthd(push, 0x0084, 1);
264 evo_data(push, 0x00000000);
265 evo_mthd(push, 0x0094, 1);
266 evo_data(push, 0x00000000);
267 evo_mthd(push, 0x00c0, 1);
268 evo_data(push, 0x00000000);
269 evo_mthd(push, 0x0080, 1);
270 evo_data(push, 0x00000000);
271 evo_kick(push, crtc->dev, evo->idx);
272 }
273}
274
275int
276nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
277 struct nouveau_channel *chan, u32 swap_interval)
278{
279 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
280 struct nvd0_display *disp = nvd0_display(crtc->dev);
281 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
282 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
283 u64 offset;
284 u32 *push;
285 int ret;
286
287 swap_interval <<= 4;
288 if (swap_interval == 0)
289 swap_interval |= 0x100;
290
291 push = evo_wait(crtc->dev, evo->idx, 128);
292 if (unlikely(push == NULL))
293 return -EBUSY;
294
295 /* synchronise with the rendering channel, if necessary */
296 if (likely(chan)) {
297 ret = RING_SPACE(chan, 10);
298 if (ret)
299 return ret;
300
301 offset = chan->dispc_vma[nv_crtc->index].offset;
302 offset += evo->sem.offset;
303
304 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
305 OUT_RING (chan, upper_32_bits(offset));
306 OUT_RING (chan, lower_32_bits(offset));
307 OUT_RING (chan, 0xf00d0000 | evo->sem.value);
308 OUT_RING (chan, 0x1002);
309 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
310 OUT_RING (chan, upper_32_bits(offset));
311 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
312 OUT_RING (chan, 0x74b1e000);
313 OUT_RING (chan, 0x1001);
314 FIRE_RING (chan);
315 } else {
316 nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
317 0xf00d0000 | evo->sem.value);
318 evo_sync(crtc->dev, EVO_MASTER);
319 }
320
321 /* queue the flip */
322 evo_mthd(push, 0x0100, 1);
323 evo_data(push, 0xfffe0000);
324 evo_mthd(push, 0x0084, 1);
325 evo_data(push, swap_interval);
326 if (!(swap_interval & 0x00000100)) {
327 evo_mthd(push, 0x00e0, 1);
328 evo_data(push, 0x40000000);
329 }
330 evo_mthd(push, 0x0088, 4);
331 evo_data(push, evo->sem.offset);
332 evo_data(push, 0xf00d0000 | evo->sem.value);
333 evo_data(push, 0x74b1e000);
334 evo_data(push, NvEvoSync);
335 evo_mthd(push, 0x00a0, 2);
336 evo_data(push, 0x00000000);
337 evo_data(push, 0x00000000);
338 evo_mthd(push, 0x00c0, 1);
339 evo_data(push, nv_fb->r_dma);
340 evo_mthd(push, 0x0110, 2);
341 evo_data(push, 0x00000000);
342 evo_data(push, 0x00000000);
343 evo_mthd(push, 0x0400, 5);
344 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
345 evo_data(push, 0);
346 evo_data(push, (fb->height << 16) | fb->width);
347 evo_data(push, nv_fb->r_pitch);
348 evo_data(push, nv_fb->r_format);
349 evo_mthd(push, 0x0080, 1);
350 evo_data(push, 0x00000000);
351 evo_kick(push, crtc->dev, evo->idx);
352
353 evo->sem.offset ^= 0x10;
354 evo->sem.value++;
355 return 0;
104} 356}
105 357
106/****************************************************************************** 358/******************************************************************************
107 * CRTC 359 * CRTC
108 *****************************************************************************/ 360 *****************************************************************************/
109static int 361static int
110nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) 362nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
111{ 363{
112 struct drm_device *dev = nv_crtc->base.dev; 364 struct drm_device *dev = nv_crtc->base.dev;
113 u32 *push, mode; 365 struct nouveau_connector *nv_connector;
366 struct drm_connector *connector;
367 u32 *push, mode = 0x00;
114 368
115 mode = 0x00000000; 369 nv_connector = nouveau_crtc_connector_get(nv_crtc);
116 if (on) { 370 connector = &nv_connector->base;
117 /* 0x11: 6bpc dynamic 2x2 371 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
118 * 0x13: 8bpc dynamic 2x2 372 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
119 * 0x19: 6bpc static 2x2 373 mode = DITHERING_MODE_DYNAMIC2X2;
120 * 0x1b: 8bpc static 2x2 374 } else {
121 * 0x21: 6bpc temporal 375 mode = nv_connector->dithering_mode;
122 * 0x23: 8bpc temporal 376 }
123 */ 377
124 mode = 0x00000011; 378 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
379 if (connector->display_info.bpc >= 8)
380 mode |= DITHERING_DEPTH_8BPC;
381 } else {
382 mode |= nv_connector->dithering_depth;
125 } 383 }
126 384
127 push = evo_wait(dev, 0, 4); 385 push = evo_wait(dev, EVO_MASTER, 4);
128 if (push) { 386 if (push) {
129 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1); 387 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
130 evo_data(push, mode); 388 evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
132 evo_mthd(push, 0x0080, 1); 390 evo_mthd(push, 0x0080, 1);
133 evo_data(push, 0x00000000); 391 evo_data(push, 0x00000000);
134 } 392 }
135 evo_kick(push, dev, 0); 393 evo_kick(push, dev, EVO_MASTER);
136 } 394 }
137 395
138 return 0; 396 return 0;
139} 397}
140 398
141static int 399static int
142nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update) 400nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
143{ 401{
144 struct drm_display_mode *mode = &nv_crtc->base.mode; 402 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
145 struct drm_device *dev = nv_crtc->base.dev; 403 struct drm_device *dev = nv_crtc->base.dev;
404 struct drm_crtc *crtc = &nv_crtc->base;
146 struct nouveau_connector *nv_connector; 405 struct nouveau_connector *nv_connector;
147 u32 *push, outX, outY; 406 int mode = DRM_MODE_SCALE_NONE;
148 407 u32 oX, oY, *push;
149 outX = mode->hdisplay;
150 outY = mode->vdisplay;
151 408
409 /* start off at the resolution we programmed the crtc for, this
410 * effectively handles NONE/FULL scaling
411 */
152 nv_connector = nouveau_crtc_connector_get(nv_crtc); 412 nv_connector = nouveau_crtc_connector_get(nv_crtc);
153 if (nv_connector && nv_connector->native_mode) { 413 if (nv_connector && nv_connector->native_mode)
154 struct drm_display_mode *native = nv_connector->native_mode; 414 mode = nv_connector->scaling_mode;
155 u32 xratio = (native->hdisplay << 19) / mode->hdisplay; 415
156 u32 yratio = (native->vdisplay << 19) / mode->vdisplay; 416 if (mode != DRM_MODE_SCALE_NONE)
157 417 omode = nv_connector->native_mode;
158 switch (type) { 418 else
159 case DRM_MODE_SCALE_ASPECT: 419 omode = umode;
160 if (xratio > yratio) { 420
161 outX = (mode->hdisplay * yratio) >> 19; 421 oX = omode->hdisplay;
162 outY = (mode->vdisplay * yratio) >> 19; 422 oY = omode->vdisplay;
163 } else { 423 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
164 outX = (mode->hdisplay * xratio) >> 19; 424 oY *= 2;
165 outY = (mode->vdisplay * xratio) >> 19; 425
166 } 426 /* add overscan compensation if necessary, will keep the aspect
167 break; 427 * ratio the same as the backend mode unless overridden by the
168 case DRM_MODE_SCALE_FULLSCREEN: 428 * user setting both hborder and vborder properties.
169 outX = native->hdisplay; 429 */
170 outY = native->vdisplay; 430 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
171 break; 431 (nv_connector->underscan == UNDERSCAN_AUTO &&
172 default: 432 nv_connector->edid &&
173 break; 433 drm_detect_hdmi_monitor(nv_connector->edid)))) {
434 u32 bX = nv_connector->underscan_hborder;
435 u32 bY = nv_connector->underscan_vborder;
436 u32 aspect = (oY << 19) / oX;
437
438 if (bX) {
439 oX -= (bX * 2);
440 if (bY) oY -= (bY * 2);
441 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
442 } else {
443 oX -= (oX >> 4) + 32;
444 if (bY) oY -= (bY * 2);
445 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
446 }
447 }
448
449 /* handle CENTER/ASPECT scaling, taking into account the areas
450 * removed already for overscan compensation
451 */
452 switch (mode) {
453 case DRM_MODE_SCALE_CENTER:
454 oX = min((u32)umode->hdisplay, oX);
455 oY = min((u32)umode->vdisplay, oY);
456 /* fall-through */
457 case DRM_MODE_SCALE_ASPECT:
458 if (oY < oX) {
459 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
460 oX = ((oY * aspect) + (aspect / 2)) >> 19;
461 } else {
462 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
463 oY = ((oX * aspect) + (aspect / 2)) >> 19;
174 } 464 }
465 break;
466 default:
467 break;
175 } 468 }
176 469
177 push = evo_wait(dev, 0, 16); 470 push = evo_wait(dev, EVO_MASTER, 8);
178 if (push) { 471 if (push) {
179 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); 472 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
180 evo_data(push, (outY << 16) | outX); 473 evo_data(push, (oY << 16) | oX);
181 evo_data(push, (outY << 16) | outX); 474 evo_data(push, (oY << 16) | oX);
182 evo_data(push, (outY << 16) | outX); 475 evo_data(push, (oY << 16) | oX);
183 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); 476 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
184 evo_data(push, 0x00000000); 477 evo_data(push, 0x00000000);
185 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); 478 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
186 evo_data(push, (mode->vdisplay << 16) | mode->hdisplay); 479 evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
480 evo_kick(push, dev, EVO_MASTER);
187 if (update) { 481 if (update) {
188 evo_mthd(push, 0x0080, 1); 482 nvd0_display_flip_stop(crtc);
189 evo_data(push, 0x00000000); 483 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
190 } 484 }
191 evo_kick(push, dev, 0);
192 } 485 }
193 486
194 return 0; 487 return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
201 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); 494 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
202 u32 *push; 495 u32 *push;
203 496
204 push = evo_wait(fb->dev, 0, 16); 497 push = evo_wait(fb->dev, EVO_MASTER, 16);
205 if (push) { 498 if (push) {
206 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); 499 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
207 evo_data(push, nvfb->nvbo->bo.offset >> 8); 500 evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
216 evo_mthd(push, 0x0080, 1); 509 evo_mthd(push, 0x0080, 1);
217 evo_data(push, 0x00000000); 510 evo_data(push, 0x00000000);
218 } 511 }
219 evo_kick(push, fb->dev, 0); 512 evo_kick(push, fb->dev, EVO_MASTER);
220 } 513 }
221 514
222 nv_crtc->fb.tile_flags = nvfb->r_dma; 515 nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
227nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update) 520nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
228{ 521{
229 struct drm_device *dev = nv_crtc->base.dev; 522 struct drm_device *dev = nv_crtc->base.dev;
230 u32 *push = evo_wait(dev, 0, 16); 523 u32 *push = evo_wait(dev, EVO_MASTER, 16);
231 if (push) { 524 if (push) {
232 if (show) { 525 if (show) {
233 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); 526 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
247 evo_data(push, 0x00000000); 540 evo_data(push, 0x00000000);
248 } 541 }
249 542
250 evo_kick(push, dev, 0); 543 evo_kick(push, dev, EVO_MASTER);
251 } 544 }
252} 545}
253 546
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
262 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 555 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
263 u32 *push; 556 u32 *push;
264 557
265 push = evo_wait(crtc->dev, 0, 2); 558 nvd0_display_flip_stop(crtc);
559
560 push = evo_wait(crtc->dev, EVO_MASTER, 2);
266 if (push) { 561 if (push) {
267 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); 562 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
268 evo_data(push, 0x00000000); 563 evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
270 evo_data(push, 0x03000000); 565 evo_data(push, 0x03000000);
271 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); 566 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
272 evo_data(push, 0x00000000); 567 evo_data(push, 0x00000000);
273 evo_kick(push, crtc->dev, 0); 568 evo_kick(push, crtc->dev, EVO_MASTER);
274 } 569 }
275 570
276 nvd0_crtc_cursor_show(nv_crtc, false, false); 571 nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
282 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 577 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
283 u32 *push; 578 u32 *push;
284 579
285 push = evo_wait(crtc->dev, 0, 32); 580 push = evo_wait(crtc->dev, EVO_MASTER, 32);
286 if (push) { 581 if (push) {
287 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); 582 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
288 evo_data(push, nv_crtc->fb.tile_flags); 583 evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
295 evo_data(push, NvEvoVRAM); 590 evo_data(push, NvEvoVRAM);
296 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); 591 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
297 evo_data(push, 0xffffff00); 592 evo_data(push, 0xffffff00);
298 evo_kick(push, crtc->dev, 0); 593 evo_kick(push, crtc->dev, EVO_MASTER);
299 } 594 }
300 595
301 nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); 596 nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
597 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
302} 598}
303 599
304static bool 600static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
333{ 629{
334 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 630 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
335 struct nouveau_connector *nv_connector; 631 struct nouveau_connector *nv_connector;
336 u32 htotal = mode->htotal; 632 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
337 u32 vtotal = mode->vtotal; 633 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
338 u32 hsyncw = mode->hsync_end - mode->hsync_start - 1; 634 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
339 u32 vsyncw = mode->vsync_end - mode->vsync_start - 1; 635 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
340 u32 hfrntp = mode->hsync_start - mode->hdisplay; 636 u32 vblan2e = 0, vblan2s = 1;
341 u32 vfrntp = mode->vsync_start - mode->vdisplay; 637 u32 magic = 0x31ec6000;
342 u32 hbackp = mode->htotal - mode->hsync_end;
343 u32 vbackp = mode->vtotal - mode->vsync_end;
344 u32 hss2be = hsyncw + hbackp;
345 u32 vss2be = vsyncw + vbackp;
346 u32 hss2de = htotal - hfrntp;
347 u32 vss2de = vtotal - vfrntp;
348 u32 syncs, *push; 638 u32 syncs, *push;
349 int ret; 639 int ret;
350 640
641 hactive = mode->htotal;
642 hsynce = mode->hsync_end - mode->hsync_start - 1;
643 hbackp = mode->htotal - mode->hsync_end;
644 hblanke = hsynce + hbackp;
645 hfrontp = mode->hsync_start - mode->hdisplay;
646 hblanks = mode->htotal - hfrontp - 1;
647
648 vactive = mode->vtotal * vscan / ilace;
649 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
650 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
651 vblanke = vsynce + vbackp;
652 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
653 vblanks = vactive - vfrontp - 1;
654 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
655 vblan2e = vactive + vsynce + vbackp;
656 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
657 vactive = (vactive * 2) + 1;
658 magic |= 0x00000001;
659 }
660
351 syncs = 0x00000001; 661 syncs = 0x00000001;
352 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 662 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
353 syncs |= 0x00000008; 663 syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
358 if (ret) 668 if (ret)
359 return ret; 669 return ret;
360 670
361 push = evo_wait(crtc->dev, 0, 64); 671 push = evo_wait(crtc->dev, EVO_MASTER, 64);
362 if (push) { 672 if (push) {
363 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5); 673 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
364 evo_data(push, 0x00000000); 674 evo_data(push, 0x00000000);
365 evo_data(push, (vtotal << 16) | htotal); 675 evo_data(push, (vactive << 16) | hactive);
366 evo_data(push, (vsyncw << 16) | hsyncw); 676 evo_data(push, ( vsynce << 16) | hsynce);
367 evo_data(push, (vss2be << 16) | hss2be); 677 evo_data(push, (vblanke << 16) | hblanke);
368 evo_data(push, (vss2de << 16) | hss2de); 678 evo_data(push, (vblanks << 16) | hblanks);
679 evo_data(push, (vblan2e << 16) | vblan2s);
369 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); 680 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
370 evo_data(push, 0x00000000); /* ??? */ 681 evo_data(push, 0x00000000); /* ??? */
371 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); 682 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
372 evo_data(push, mode->clock * 1000); 683 evo_data(push, mode->clock * 1000);
373 evo_data(push, 0x00200000); /* ??? */ 684 evo_data(push, 0x00200000); /* ??? */
374 evo_data(push, mode->clock * 1000); 685 evo_data(push, mode->clock * 1000);
375 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1); 686 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
376 evo_data(push, syncs); 687 evo_data(push, syncs);
377 evo_kick(push, crtc->dev, 0); 688 evo_data(push, magic);
689 evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
690 evo_data(push, 0x00000311);
691 evo_data(push, 0x00000100);
692 evo_kick(push, crtc->dev, EVO_MASTER);
378 } 693 }
379 694
380 nv_connector = nouveau_crtc_connector_get(nv_crtc); 695 nv_connector = nouveau_crtc_connector_get(nv_crtc);
381 nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false); 696 nvd0_crtc_set_dither(nv_crtc, false);
382 nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false); 697 nvd0_crtc_set_scale(nv_crtc, false);
383 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false); 698 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
384 return 0; 699 return 0;
385} 700}
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
400 if (ret) 715 if (ret)
401 return ret; 716 return ret;
402 717
718 nvd0_display_flip_stop(crtc);
403 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); 719 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
720 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
404 return 0; 721 return 0;
405} 722}
406 723
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
410 enum mode_set_atomic state) 727 enum mode_set_atomic state)
411{ 728{
412 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 729 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
730 nvd0_display_flip_stop(crtc);
413 nvd0_crtc_set_image(nv_crtc, fb, x, y, true); 731 nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
414 return 0; 732 return 0;
415} 733}
@@ -472,10 +790,10 @@ static int
472nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 790nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
473{ 791{
474 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 792 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
475 const u32 data = (y << 16) | x; 793 int ch = EVO_CURS(nv_crtc->index);
476 794
477 nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data); 795 evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
478 nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000); 796 evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
479 return 0; 797 return 0;
480} 798}
481 799
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
525 .gamma_set = nvd0_crtc_gamma_set, 843 .gamma_set = nvd0_crtc_gamma_set,
526 .set_config = drm_crtc_helper_set_config, 844 .set_config = drm_crtc_helper_set_config,
527 .destroy = nvd0_crtc_destroy, 845 .destroy = nvd0_crtc_destroy,
846 .page_flip = nouveau_crtc_page_flip,
528}; 847};
529 848
530static void 849static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
659 978
660 nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); 979 nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
661 980
662 push = evo_wait(encoder->dev, 0, 4); 981 push = evo_wait(encoder->dev, EVO_MASTER, 4);
663 if (push) { 982 if (push) {
664 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2); 983 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
665 evo_data(push, 1 << nv_crtc->index); 984 evo_data(push, 1 << nv_crtc->index);
666 evo_data(push, 0x00ff); 985 evo_data(push, 0x00ff);
667 evo_kick(push, encoder->dev, 0); 986 evo_kick(push, encoder->dev, EVO_MASTER);
668 } 987 }
669 988
670 nv_encoder->crtc = encoder->crtc; 989 nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
680 if (nv_encoder->crtc) { 999 if (nv_encoder->crtc) {
681 nvd0_crtc_prepare(nv_encoder->crtc); 1000 nvd0_crtc_prepare(nv_encoder->crtc);
682 1001
683 push = evo_wait(dev, 0, 4); 1002 push = evo_wait(dev, EVO_MASTER, 4);
684 if (push) { 1003 if (push) {
685 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1); 1004 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
686 evo_data(push, 0x00000000); 1005 evo_data(push, 0x00000000);
687 evo_mthd(push, 0x0080, 1); 1006 evo_mthd(push, 0x0080, 1);
688 evo_data(push, 0x00000000); 1007 evo_data(push, 0x00000000);
689 evo_kick(push, dev, 0); 1008 evo_kick(push, dev, EVO_MASTER);
690 } 1009 }
691 1010
692 nv_encoder->crtc = NULL; 1011 nv_encoder->crtc = NULL;
@@ -760,6 +1079,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
760} 1079}
761 1080
762/****************************************************************************** 1081/******************************************************************************
1082 * Audio
1083 *****************************************************************************/
1084static void
1085nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1086{
1087 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1088 struct nouveau_connector *nv_connector;
1089 struct drm_device *dev = encoder->dev;
1090 int i, or = nv_encoder->or * 0x30;
1091
1092 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1093 if (!drm_detect_monitor_audio(nv_connector->edid))
1094 return;
1095
1096 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
1097
1098 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1099 if (nv_connector->base.eld[0]) {
1100 u8 *eld = nv_connector->base.eld;
1101
1102 for (i = 0; i < eld[2] * 4; i++)
1103 nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
1104 for (i = eld[2] * 4; i < 0x60; i++)
1105 nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
1106
1107 nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
1108 }
1109}
1110
1111static void
1112nvd0_audio_disconnect(struct drm_encoder *encoder)
1113{
1114 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1115 struct drm_device *dev = encoder->dev;
1116 int or = nv_encoder->or * 0x30;
1117
1118 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
1119}
1120
1121/******************************************************************************
1122 * HDMI
1123 *****************************************************************************/
1124static void
1125nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1126{
1127 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1128 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1129 struct nouveau_connector *nv_connector;
1130 struct drm_device *dev = encoder->dev;
1131 int head = nv_crtc->index * 0x800;
1132 u32 rekey = 56; /* binary driver, and tegra constant */
1133 u32 max_ac_packet;
1134
1135 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1136 if (!drm_detect_hdmi_monitor(nv_connector->edid))
1137 return;
1138
1139 max_ac_packet = mode->htotal - mode->hdisplay;
1140 max_ac_packet -= rekey;
1141 max_ac_packet -= 18; /* constant from tegra */
1142 max_ac_packet /= 32;
1143
1144 /* AVI InfoFrame */
1145 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
1146 nv_wr32(dev, 0x61671c + head, 0x000d0282);
1147 nv_wr32(dev, 0x616720 + head, 0x0000006f);
1148 nv_wr32(dev, 0x616724 + head, 0x00000000);
1149 nv_wr32(dev, 0x616728 + head, 0x00000000);
1150 nv_wr32(dev, 0x61672c + head, 0x00000000);
1151 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
1152
1153 /* ??? InfoFrame? */
1154 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
1155 nv_wr32(dev, 0x6167ac + head, 0x00000010);
1156 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
1157
1158 /* HDMI_CTRL */
1159 nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
1160 max_ac_packet << 16);
1161
1162 /* NFI, audio doesn't work without it though.. */
1163 nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
1164
1165 nvd0_audio_mode_set(encoder, mode);
1166}
1167
1168static void
1169nvd0_hdmi_disconnect(struct drm_encoder *encoder)
1170{
1171 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1172 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1173 struct drm_device *dev = encoder->dev;
1174 int head = nv_crtc->index * 0x800;
1175
1176 nvd0_audio_disconnect(encoder);
1177
1178 nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
1179 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
1180 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
1181}
1182
1183/******************************************************************************
763 * SOR 1184 * SOR
764 *****************************************************************************/ 1185 *****************************************************************************/
765static void 1186static void
@@ -829,7 +1250,8 @@ static void
829nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, 1250nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
830 struct drm_display_mode *mode) 1251 struct drm_display_mode *mode)
831{ 1252{
832 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 1253 struct drm_device *dev = encoder->dev;
1254 struct drm_nouveau_private *dev_priv = dev->dev_private;
833 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1255 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
834 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1256 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
835 struct nouveau_connector *nv_connector; 1257 struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
852 or_config = (mode_ctrl & 0x00000f00) >> 8; 1274 or_config = (mode_ctrl & 0x00000f00) >> 8;
853 if (mode->clock >= 165000) 1275 if (mode->clock >= 165000)
854 or_config |= 0x0100; 1276 or_config |= 0x0100;
1277
1278 nvd0_hdmi_mode_set(encoder, mode);
855 break; 1279 break;
856 case OUTPUT_LVDS: 1280 case OUTPUT_LVDS:
857 or_config = (mode_ctrl & 0x00000f00) >> 8; 1281 or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
861 if (bios->fp.if_is_24bit) 1285 if (bios->fp.if_is_24bit)
862 or_config |= 0x0200; 1286 or_config |= 0x0200;
863 } else { 1287 } else {
864 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) { 1288 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
865 if (((u8 *)nv_connector->edid)[121] == 2) 1289 if (((u8 *)nv_connector->edid)[121] == 2)
866 or_config |= 0x0100; 1290 or_config |= 0x0100;
867 } else 1291 } else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
889 1313
890 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); 1314 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
891 1315
892 push = evo_wait(encoder->dev, 0, 4); 1316 push = evo_wait(dev, EVO_MASTER, 4);
893 if (push) { 1317 if (push) {
894 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2); 1318 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
895 evo_data(push, mode_ctrl); 1319 evo_data(push, mode_ctrl);
896 evo_data(push, or_config); 1320 evo_data(push, or_config);
897 evo_kick(push, encoder->dev, 0); 1321 evo_kick(push, dev, EVO_MASTER);
898 } 1322 }
899 1323
900 nv_encoder->crtc = encoder->crtc; 1324 nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
910 if (nv_encoder->crtc) { 1334 if (nv_encoder->crtc) {
911 nvd0_crtc_prepare(nv_encoder->crtc); 1335 nvd0_crtc_prepare(nv_encoder->crtc);
912 1336
913 push = evo_wait(dev, 0, 4); 1337 push = evo_wait(dev, EVO_MASTER, 4);
914 if (push) { 1338 if (push) {
915 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); 1339 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
916 evo_data(push, 0x00000000); 1340 evo_data(push, 0x00000000);
917 evo_mthd(push, 0x0080, 1); 1341 evo_mthd(push, 0x0080, 1);
918 evo_data(push, 0x00000000); 1342 evo_data(push, 0x00000000);
919 evo_kick(push, dev, 0); 1343 evo_kick(push, dev, EVO_MASTER);
920 } 1344 }
921 1345
1346 nvd0_hdmi_disconnect(encoder);
1347
922 nv_encoder->crtc = NULL; 1348 nv_encoder->crtc = NULL;
923 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; 1349 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
924 } 1350 }
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
1159 struct nvd0_display *disp = nvd0_display(dev); 1585 struct nvd0_display *disp = nvd0_display(dev);
1160 u32 intr = nv_rd32(dev, 0x610088); 1586 u32 intr = nv_rd32(dev, 0x610088);
1161 1587
1588 if (intr & 0x00000001) {
1589 u32 stat = nv_rd32(dev, 0x61008c);
1590 nv_wr32(dev, 0x61008c, stat);
1591 intr &= ~0x00000001;
1592 }
1593
1162 if (intr & 0x00000002) { 1594 if (intr & 0x00000002) {
1163 u32 stat = nv_rd32(dev, 0x61009c); 1595 u32 stat = nv_rd32(dev, 0x61009c);
1164 int chid = ffs(stat) - 1; 1596 int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
1215/****************************************************************************** 1647/******************************************************************************
1216 * Init 1648 * Init
1217 *****************************************************************************/ 1649 *****************************************************************************/
1218static void 1650void
1219nvd0_display_fini(struct drm_device *dev) 1651nvd0_display_fini(struct drm_device *dev)
1220{ 1652{
1221 int i; 1653 int i;
1222 1654
1223 /* fini cursors */ 1655 /* fini cursors + overlays + flips */
1224 for (i = 14; i >= 13; i--) { 1656 for (i = 1; i >= 0; i--) {
1225 if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001)) 1657 evo_fini_pio(dev, EVO_CURS(i));
1226 continue; 1658 evo_fini_pio(dev, EVO_OIMM(i));
1227 1659 evo_fini_dma(dev, EVO_OVLY(i));
1228 nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000); 1660 evo_fini_dma(dev, EVO_FLIP(i));
1229 nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
1230 nv_mask(dev, 0x610090, 1 << i, 0x00000000);
1231 nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
1232 } 1661 }
1233 1662
1234 /* fini master */ 1663 /* fini master */
1235 if (nv_rd32(dev, 0x610490) & 0x00000010) { 1664 evo_fini_dma(dev, EVO_MASTER);
1236 nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
1237 nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
1238 nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
1239 nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
1240 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
1241 }
1242} 1665}
1243 1666
1244int 1667int
1245nvd0_display_init(struct drm_device *dev) 1668nvd0_display_init(struct drm_device *dev)
1246{ 1669{
1247 struct nvd0_display *disp = nvd0_display(dev); 1670 struct nvd0_display *disp = nvd0_display(dev);
1671 int ret, i;
1248 u32 *push; 1672 u32 *push;
1249 int i;
1250 1673
1251 if (nv_rd32(dev, 0x6100ac) & 0x00000100) { 1674 if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
1252 nv_wr32(dev, 0x6100ac, 0x00000100); 1675 nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
1271 nv_wr32(dev, 0x6301c4 + (i * 0x800), sor); 1694 nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
1272 } 1695 }
1273 1696
1274 for (i = 0; i < 2; i++) { 1697 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1275 u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800)); 1698 u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
1276 u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800)); 1699 u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
1277 u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800)); 1700 u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
1285 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); 1708 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
1286 1709
1287 /* init master */ 1710 /* init master */
1288 nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3); 1711 ret = evo_init_dma(dev, EVO_MASTER);
1289 nv_wr32(dev, 0x610498, 0x00010000); 1712 if (ret)
1290 nv_wr32(dev, 0x61049c, 0x00000001); 1713 goto error;
1291 nv_mask(dev, 0x610490, 0x00000010, 0x00000010); 1714
1292 nv_wr32(dev, 0x640000, 0x00000000); 1715 /* init flips + overlays + cursors */
1293 nv_wr32(dev, 0x610490, 0x01000013); 1716 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1294 if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) { 1717 if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
1295 NV_ERROR(dev, "PDISP: master 0x%08x\n", 1718 (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
1296 nv_rd32(dev, 0x610490)); 1719 (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
1297 return -EBUSY; 1720 (ret = evo_init_pio(dev, EVO_CURS(i))))
1721 goto error;
1298 } 1722 }
1299 nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
1300 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
1301 1723
1302 /* init cursors */ 1724 push = evo_wait(dev, EVO_MASTER, 32);
1303 for (i = 13; i <= 14; i++) { 1725 if (!push) {
1304 nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001); 1726 ret = -EBUSY;
1305 if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) { 1727 goto error;
1306 NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
1307 nv_rd32(dev, 0x610490 + (i * 0x10)));
1308 return -EBUSY;
1309 }
1310
1311 nv_mask(dev, 0x610090, 1 << i, 1 << i);
1312 nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
1313 } 1728 }
1314
1315 push = evo_wait(dev, 0, 32);
1316 if (!push)
1317 return -EBUSY;
1318 evo_mthd(push, 0x0088, 1); 1729 evo_mthd(push, 0x0088, 1);
1319 evo_data(push, NvEvoSync); 1730 evo_data(push, NvEvoSync);
1320 evo_mthd(push, 0x0084, 1); 1731 evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
1323 evo_data(push, 0x80000000); 1734 evo_data(push, 0x80000000);
1324 evo_mthd(push, 0x008c, 1); 1735 evo_mthd(push, 0x008c, 1);
1325 evo_data(push, 0x00000000); 1736 evo_data(push, 0x00000000);
1326 evo_kick(push, dev, 0); 1737 evo_kick(push, dev, EVO_MASTER);
1327 1738
1328 return 0; 1739error:
1740 if (ret)
1741 nvd0_display_fini(dev);
1742 return ret;
1329} 1743}
1330 1744
1331void 1745void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
1334 struct drm_nouveau_private *dev_priv = dev->dev_private; 1748 struct drm_nouveau_private *dev_priv = dev->dev_private;
1335 struct nvd0_display *disp = nvd0_display(dev); 1749 struct nvd0_display *disp = nvd0_display(dev);
1336 struct pci_dev *pdev = dev->pdev; 1750 struct pci_dev *pdev = dev->pdev;
1751 int i;
1337 1752
1338 nvd0_display_fini(dev); 1753 for (i = 0; i < EVO_DMA_NR; i++) {
1754 struct evo *evo = &disp->evo[i];
1755 pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
1756 }
1339 1757
1340 pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
1341 nouveau_gpuobj_ref(NULL, &disp->mem); 1758 nouveau_gpuobj_ref(NULL, &disp->mem);
1759 nouveau_bo_unmap(disp->sync);
1760 nouveau_bo_ref(NULL, &disp->sync);
1342 nouveau_irq_unregister(dev, 26); 1761 nouveau_irq_unregister(dev, 26);
1343 1762
1344 dev_priv->engine.display.priv = NULL; 1763 dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
1410 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); 1829 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
1411 nouveau_irq_register(dev, 26, nvd0_display_intr); 1830 nouveau_irq_register(dev, 26, nvd0_display_intr);
1412 1831
1832 /* small shared memory area we use for notifiers and semaphores */
1833 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
1834 0, 0x0000, &disp->sync);
1835 if (!ret) {
1836 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
1837 if (!ret)
1838 ret = nouveau_bo_map(disp->sync);
1839 if (ret)
1840 nouveau_bo_ref(NULL, &disp->sync);
1841 }
1842
1843 if (ret)
1844 goto out;
1845
1413 /* hash table and dma objects for the memory areas we care about */ 1846 /* hash table and dma objects for the memory areas we care about */
1414 ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000, 1847 ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
1415 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); 1848 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
1416 if (ret) 1849 if (ret)
1417 goto out; 1850 goto out;
1418 1851
1419 nv_wo32(disp->mem, 0x1000, 0x00000049); 1852 /* create evo dma channels */
1420 nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8); 1853 for (i = 0; i < EVO_DMA_NR; i++) {
1421 nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8); 1854 struct evo *evo = &disp->evo[i];
1422 nv_wo32(disp->mem, 0x100c, 0x00000000); 1855 u64 offset = disp->sync->bo.offset;
1423 nv_wo32(disp->mem, 0x1010, 0x00000000); 1856 u32 dmao = 0x1000 + (i * 0x100);
1424 nv_wo32(disp->mem, 0x1014, 0x00000000); 1857 u32 hash = 0x0000 + (i * 0x040);
1425 nv_wo32(disp->mem, 0x0000, NvEvoSync); 1858
1426 nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001); 1859 evo->idx = i;
1427 1860 evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
1428 nv_wo32(disp->mem, 0x1020, 0x00000049); 1861 evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
1429 nv_wo32(disp->mem, 0x1024, 0x00000000); 1862 if (!evo->ptr) {
1430 nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8); 1863 ret = -ENOMEM;
1431 nv_wo32(disp->mem, 0x102c, 0x00000000); 1864 goto out;
1432 nv_wo32(disp->mem, 0x1030, 0x00000000); 1865 }
1433 nv_wo32(disp->mem, 0x1034, 0x00000000);
1434 nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
1435 nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
1436
1437 nv_wo32(disp->mem, 0x1040, 0x00000009);
1438 nv_wo32(disp->mem, 0x1044, 0x00000000);
1439 nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
1440 nv_wo32(disp->mem, 0x104c, 0x00000000);
1441 nv_wo32(disp->mem, 0x1050, 0x00000000);
1442 nv_wo32(disp->mem, 0x1054, 0x00000000);
1443 nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
1444 nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
1445
1446 nv_wo32(disp->mem, 0x1060, 0x0fe00009);
1447 nv_wo32(disp->mem, 0x1064, 0x00000000);
1448 nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
1449 nv_wo32(disp->mem, 0x106c, 0x00000000);
1450 nv_wo32(disp->mem, 0x1070, 0x00000000);
1451 nv_wo32(disp->mem, 0x1074, 0x00000000);
1452 nv_wo32(disp->mem, 0x0018, NvEvoFB32);
1453 nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
1454
1455 pinstmem->flush(dev);
1456 1866
1457 /* push buffers for evo channels */ 1867 nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
1458 disp->evo[0].ptr = 1868 nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
1459 pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle); 1869 nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
1460 if (!disp->evo[0].ptr) { 1870 nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
1461 ret = -ENOMEM; 1871 nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
1462 goto out; 1872 nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
1873 nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
1874 nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
1875 ((dmao + 0x00) << 9));
1876
1877 nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
1878 nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
1879 nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
1880 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
1881 nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
1882 nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
1883 nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
1884 nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
1885 ((dmao + 0x20) << 9));
1886
1887 nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
1888 nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
1889 nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
1890 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
1891 nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
1892 nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
1893 nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
1894 nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
1895 ((dmao + 0x40) << 9));
1896
1897 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
1898 nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
1899 nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
1900 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
1901 nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
1902 nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
1903 nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
1904 nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
1905 ((dmao + 0x60) << 9));
1463 } 1906 }
1464 1907
1465 ret = nvd0_display_init(dev); 1908 pinstmem->flush(dev);
1466 if (ret)
1467 goto out;
1468 1909
1469out: 1910out:
1470 if (ret) 1911 if (ret)
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4c8796ba6dd..6a5f4395838 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
42 r128_PCI_IDS 42 r128_PCI_IDS
43}; 43};
44 44
45static const struct file_operations r128_driver_fops = {
46 .owner = THIS_MODULE,
47 .open = drm_open,
48 .release = drm_release,
49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap,
51 .poll = drm_poll,
52 .fasync = drm_fasync,
53#ifdef CONFIG_COMPAT
54 .compat_ioctl = r128_compat_ioctl,
55#endif
56 .llseek = noop_llseek,
57};
58
45static struct drm_driver driver = { 59static struct drm_driver driver = {
46 .driver_features = 60 .driver_features =
47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
60 .reclaim_buffers = drm_core_reclaim_buffers, 74 .reclaim_buffers = drm_core_reclaim_buffers,
61 .ioctls = r128_ioctls, 75 .ioctls = r128_ioctls,
62 .dma_ioctl = r128_cce_buffers, 76 .dma_ioctl = r128_cce_buffers,
63 .fops = { 77 .fops = &r128_driver_fops,
64 .owner = THIS_MODULE,
65 .open = drm_open,
66 .release = drm_release,
67 .unlocked_ioctl = drm_ioctl,
68 .mmap = drm_mmap,
69 .poll = drm_poll,
70 .fasync = drm_fasync,
71#ifdef CONFIG_COMPAT
72 .compat_ioctl = r128_compat_ioctl,
73#endif
74 .llseek = noop_llseek,
75 },
76
77
78 .name = DRIVER_NAME, 78 .name = DRIVER_NAME,
79 .desc = DRIVER_DESC, 79 .desc = DRIVER_DESC,
80 .date = DRIVER_DATE, 80 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf8b4bc3e73..2139fe893ec 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
73 radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o 73 radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
74 radeon_semaphore.o radeon_sa.o
74 75
75radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 76radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
76radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 77radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
78 79
79obj-$(CONFIG_DRM_RADEON)+= radeon.o 80obj-$(CONFIG_DRM_RADEON)+= radeon.o
80 81
81CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file 82CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 14cc88aaf3a..d1bd239cd9e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
665 SDEBUG(" count: %d\n", count); 665 SDEBUG(" count: %d\n", count);
666 if (arg == ATOM_UNIT_MICROSEC) 666 if (arg == ATOM_UNIT_MICROSEC)
667 udelay(count); 667 udelay(count);
668 else if (!drm_can_sleep())
669 mdelay(count);
668 else 670 else
669 msleep(count); 671 msleep(count);
670} 672}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2b97262e3ab..0fda830ef80 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -554,7 +554,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
554 if (encoder->crtc == crtc) { 554 if (encoder->crtc == crtc) {
555 radeon_encoder = to_radeon_encoder(encoder); 555 radeon_encoder = to_radeon_encoder(encoder);
556 connector = radeon_get_connector_for_encoder(encoder); 556 connector = radeon_get_connector_for_encoder(encoder);
557 if (connector) 557 if (connector && connector->display_info.bpc)
558 bpc = connector->display_info.bpc; 558 bpc = connector->display_info.bpc;
559 encoder_mode = atombios_get_encoder_mode(encoder); 559 encoder_mode = atombios_get_encoder_mode(encoder);
560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -1184,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1184 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); 1184 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
1185 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); 1185 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
1186 1186
1187 fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); 1187 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1188 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1188 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1189 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1189 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1190 1190
@@ -1353,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1353 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); 1353 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
1354 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); 1354 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
1355 1355
1356 fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); 1356 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1357 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1357 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1358 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1358 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1359 1359
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 39c04c1b847..f1f06ca9f1f 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -409,8 +409,6 @@ int
409atombios_get_encoder_mode(struct drm_encoder *encoder) 409atombios_get_encoder_mode(struct drm_encoder *encoder)
410{ 410{
411 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 411 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
412 struct drm_device *dev = encoder->dev;
413 struct radeon_device *rdev = dev->dev_private;
414 struct drm_connector *connector; 412 struct drm_connector *connector;
415 struct radeon_connector *radeon_connector; 413 struct radeon_connector *radeon_connector;
416 struct radeon_connector_atom_dig *dig_connector; 414 struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +432,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
434 switch (connector->connector_type) { 432 switch (connector->connector_type) {
435 case DRM_MODE_CONNECTOR_DVII: 433 case DRM_MODE_CONNECTOR_DVII:
436 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 434 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
437 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { 435 if (drm_detect_monitor_audio(radeon_connector->edid) &&
438 /* fix me */ 436 radeon_audio)
439 if (ASIC_IS_DCE4(rdev)) 437 return ATOM_ENCODER_MODE_HDMI;
440 return ATOM_ENCODER_MODE_DVI; 438 else if (radeon_connector->use_digital)
441 else
442 return ATOM_ENCODER_MODE_HDMI;
443 } else if (radeon_connector->use_digital)
444 return ATOM_ENCODER_MODE_DVI; 439 return ATOM_ENCODER_MODE_DVI;
445 else 440 else
446 return ATOM_ENCODER_MODE_CRT; 441 return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +443,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
448 case DRM_MODE_CONNECTOR_DVID: 443 case DRM_MODE_CONNECTOR_DVID:
449 case DRM_MODE_CONNECTOR_HDMIA: 444 case DRM_MODE_CONNECTOR_HDMIA:
450 default: 445 default:
451 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { 446 if (drm_detect_monitor_audio(radeon_connector->edid) &&
452 /* fix me */ 447 radeon_audio)
453 if (ASIC_IS_DCE4(rdev)) 448 return ATOM_ENCODER_MODE_HDMI;
454 return ATOM_ENCODER_MODE_DVI; 449 else
455 else
456 return ATOM_ENCODER_MODE_HDMI;
457 } else
458 return ATOM_ENCODER_MODE_DVI; 450 return ATOM_ENCODER_MODE_DVI;
459 break; 451 break;
460 case DRM_MODE_CONNECTOR_LVDS: 452 case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +457,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
465 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 457 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
466 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 458 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
467 return ATOM_ENCODER_MODE_DP; 459 return ATOM_ENCODER_MODE_DP;
468 else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { 460 else if (drm_detect_monitor_audio(radeon_connector->edid) &&
469 /* fix me */ 461 radeon_audio)
470 if (ASIC_IS_DCE4(rdev)) 462 return ATOM_ENCODER_MODE_HDMI;
471 return ATOM_ENCODER_MODE_DVI; 463 else
472 else
473 return ATOM_ENCODER_MODE_HDMI;
474 } else
475 return ATOM_ENCODER_MODE_DVI; 464 return ATOM_ENCODER_MODE_DVI;
476 break; 465 break;
477 case DRM_MODE_CONNECTOR_eDP: 466 case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 92c9628c572..636660fca8c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -40,6 +40,8 @@
40static void evergreen_gpu_init(struct radeon_device *rdev); 40static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 42void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
44 int ring, u32 cp_int_cntl);
43 45
44void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 46void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
45{ 47{
@@ -1311,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
1311 */ 1313 */
1312void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1314void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1313{ 1315{
1316 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
1317
1314 /* set to DX10/11 mode */ 1318 /* set to DX10/11 mode */
1315 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); 1319 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1316 radeon_ring_write(rdev, 1); 1320 radeon_ring_write(ring, 1);
1317 /* FIXME: implement */ 1321 /* FIXME: implement */
1318 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1322 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1319 radeon_ring_write(rdev, 1323 radeon_ring_write(ring,
1320#ifdef __BIG_ENDIAN 1324#ifdef __BIG_ENDIAN
1321 (2 << 0) | 1325 (2 << 0) |
1322#endif 1326#endif
1323 (ib->gpu_addr & 0xFFFFFFFC)); 1327 (ib->gpu_addr & 0xFFFFFFFC));
1324 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 1328 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1325 radeon_ring_write(rdev, ib->length_dw); 1329 radeon_ring_write(ring, ib->length_dw);
1326} 1330}
1327 1331
1328 1332
@@ -1360,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1360 1364
1361static int evergreen_cp_start(struct radeon_device *rdev) 1365static int evergreen_cp_start(struct radeon_device *rdev)
1362{ 1366{
1367 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1363 int r, i; 1368 int r, i;
1364 uint32_t cp_me; 1369 uint32_t cp_me;
1365 1370
1366 r = radeon_ring_lock(rdev, 7); 1371 r = radeon_ring_lock(rdev, ring, 7);
1367 if (r) { 1372 if (r) {
1368 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1373 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1369 return r; 1374 return r;
1370 } 1375 }
1371 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1376 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1372 radeon_ring_write(rdev, 0x1); 1377 radeon_ring_write(ring, 0x1);
1373 radeon_ring_write(rdev, 0x0); 1378 radeon_ring_write(ring, 0x0);
1374 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); 1379 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1375 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1380 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1376 radeon_ring_write(rdev, 0); 1381 radeon_ring_write(ring, 0);
1377 radeon_ring_write(rdev, 0); 1382 radeon_ring_write(ring, 0);
1378 radeon_ring_unlock_commit(rdev); 1383 radeon_ring_unlock_commit(rdev, ring);
1379 1384
1380 cp_me = 0xff; 1385 cp_me = 0xff;
1381 WREG32(CP_ME_CNTL, cp_me); 1386 WREG32(CP_ME_CNTL, cp_me);
1382 1387
1383 r = radeon_ring_lock(rdev, evergreen_default_size + 19); 1388 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
1384 if (r) { 1389 if (r) {
1385 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1390 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1386 return r; 1391 return r;
1387 } 1392 }
1388 1393
1389 /* setup clear context state */ 1394 /* setup clear context state */
1390 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1395 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1391 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1396 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1392 1397
1393 for (i = 0; i < evergreen_default_size; i++) 1398 for (i = 0; i < evergreen_default_size; i++)
1394 radeon_ring_write(rdev, evergreen_default_state[i]); 1399 radeon_ring_write(ring, evergreen_default_state[i]);
1395 1400
1396 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1401 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1397 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1402 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1398 1403
1399 /* set clear context state */ 1404 /* set clear context state */
1400 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1405 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1401 radeon_ring_write(rdev, 0); 1406 radeon_ring_write(ring, 0);
1402 1407
1403 /* SQ_VTX_BASE_VTX_LOC */ 1408 /* SQ_VTX_BASE_VTX_LOC */
1404 radeon_ring_write(rdev, 0xc0026f00); 1409 radeon_ring_write(ring, 0xc0026f00);
1405 radeon_ring_write(rdev, 0x00000000); 1410 radeon_ring_write(ring, 0x00000000);
1406 radeon_ring_write(rdev, 0x00000000); 1411 radeon_ring_write(ring, 0x00000000);
1407 radeon_ring_write(rdev, 0x00000000); 1412 radeon_ring_write(ring, 0x00000000);
1408 1413
1409 /* Clear consts */ 1414 /* Clear consts */
1410 radeon_ring_write(rdev, 0xc0036f00); 1415 radeon_ring_write(ring, 0xc0036f00);
1411 radeon_ring_write(rdev, 0x00000bc4); 1416 radeon_ring_write(ring, 0x00000bc4);
1412 radeon_ring_write(rdev, 0xffffffff); 1417 radeon_ring_write(ring, 0xffffffff);
1413 radeon_ring_write(rdev, 0xffffffff); 1418 radeon_ring_write(ring, 0xffffffff);
1414 radeon_ring_write(rdev, 0xffffffff); 1419 radeon_ring_write(ring, 0xffffffff);
1415 1420
1416 radeon_ring_write(rdev, 0xc0026900); 1421 radeon_ring_write(ring, 0xc0026900);
1417 radeon_ring_write(rdev, 0x00000316); 1422 radeon_ring_write(ring, 0x00000316);
1418 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1423 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1419 radeon_ring_write(rdev, 0x00000010); /* */ 1424 radeon_ring_write(ring, 0x00000010); /* */
1420 1425
1421 radeon_ring_unlock_commit(rdev); 1426 radeon_ring_unlock_commit(rdev, ring);
1422 1427
1423 return 0; 1428 return 0;
1424} 1429}
1425 1430
1426int evergreen_cp_resume(struct radeon_device *rdev) 1431int evergreen_cp_resume(struct radeon_device *rdev)
1427{ 1432{
1433 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1428 u32 tmp; 1434 u32 tmp;
1429 u32 rb_bufsz; 1435 u32 rb_bufsz;
1430 int r; 1436 int r;
@@ -1442,13 +1448,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1442 RREG32(GRBM_SOFT_RESET); 1448 RREG32(GRBM_SOFT_RESET);
1443 1449
1444 /* Set ring buffer size */ 1450 /* Set ring buffer size */
1445 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1451 rb_bufsz = drm_order(ring->ring_size / 8);
1446 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1452 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1447#ifdef __BIG_ENDIAN 1453#ifdef __BIG_ENDIAN
1448 tmp |= BUF_SWAP_32BIT; 1454 tmp |= BUF_SWAP_32BIT;
1449#endif 1455#endif
1450 WREG32(CP_RB_CNTL, tmp); 1456 WREG32(CP_RB_CNTL, tmp);
1451 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1457 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1452 1458
1453 /* Set the write pointer delay */ 1459 /* Set the write pointer delay */
1454 WREG32(CP_RB_WPTR_DELAY, 0); 1460 WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1456,8 +1462,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1456 /* Initialize the ring buffer's read and write pointers */ 1462 /* Initialize the ring buffer's read and write pointers */
1457 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1463 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1458 WREG32(CP_RB_RPTR_WR, 0); 1464 WREG32(CP_RB_RPTR_WR, 0);
1459 rdev->cp.wptr = 0; 1465 ring->wptr = 0;
1460 WREG32(CP_RB_WPTR, rdev->cp.wptr); 1466 WREG32(CP_RB_WPTR, ring->wptr);
1461 1467
1462 /* set the wb address wether it's enabled or not */ 1468 /* set the wb address wether it's enabled or not */
1463 WREG32(CP_RB_RPTR_ADDR, 1469 WREG32(CP_RB_RPTR_ADDR,
@@ -1475,16 +1481,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1475 mdelay(1); 1481 mdelay(1);
1476 WREG32(CP_RB_CNTL, tmp); 1482 WREG32(CP_RB_CNTL, tmp);
1477 1483
1478 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); 1484 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
1479 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1485 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1480 1486
1481 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1487 ring->rptr = RREG32(CP_RB_RPTR);
1482 1488
1483 evergreen_cp_start(rdev); 1489 evergreen_cp_start(rdev);
1484 rdev->cp.ready = true; 1490 ring->ready = true;
1485 r = radeon_ring_test(rdev); 1491 r = radeon_ring_test(rdev, ring);
1486 if (r) { 1492 if (r) {
1487 rdev->cp.ready = false; 1493 ring->ready = false;
1488 return r; 1494 return r;
1489 } 1495 }
1490 return 0; 1496 return 0;
@@ -2353,7 +2359,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
2353 return 0; 2359 return 0;
2354} 2360}
2355 2361
2356bool evergreen_gpu_is_lockup(struct radeon_device *rdev) 2362bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2357{ 2363{
2358 u32 srbm_status; 2364 u32 srbm_status;
2359 u32 grbm_status; 2365 u32 grbm_status;
@@ -2366,19 +2372,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
2366 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 2372 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2367 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 2373 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2368 if (!(grbm_status & GUI_ACTIVE)) { 2374 if (!(grbm_status & GUI_ACTIVE)) {
2369 r100_gpu_lockup_update(lockup, &rdev->cp); 2375 r100_gpu_lockup_update(lockup, ring);
2370 return false; 2376 return false;
2371 } 2377 }
2372 /* force CP activities */ 2378 /* force CP activities */
2373 r = radeon_ring_lock(rdev, 2); 2379 r = radeon_ring_lock(rdev, ring, 2);
2374 if (!r) { 2380 if (!r) {
2375 /* PACKET2 NOP */ 2381 /* PACKET2 NOP */
2376 radeon_ring_write(rdev, 0x80000000); 2382 radeon_ring_write(ring, 0x80000000);
2377 radeon_ring_write(rdev, 0x80000000); 2383 radeon_ring_write(ring, 0x80000000);
2378 radeon_ring_unlock_commit(rdev); 2384 radeon_ring_unlock_commit(rdev, ring);
2379 } 2385 }
2380 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2386 ring->rptr = RREG32(CP_RB_RPTR);
2381 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 2387 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
2382} 2388}
2383 2389
2384static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2390static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2470,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2470{ 2476{
2471 u32 tmp; 2477 u32 tmp;
2472 2478
2473 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2479 if (rdev->family >= CHIP_CAYMAN) {
2480 cayman_cp_int_cntl_setup(rdev, 0,
2481 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2482 cayman_cp_int_cntl_setup(rdev, 1, 0);
2483 cayman_cp_int_cntl_setup(rdev, 2, 0);
2484 } else
2485 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2474 WREG32(GRBM_INT_CNTL, 0); 2486 WREG32(GRBM_INT_CNTL, 0);
2475 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2487 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2476 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2488 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2515,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2515int evergreen_irq_set(struct radeon_device *rdev) 2527int evergreen_irq_set(struct radeon_device *rdev)
2516{ 2528{
2517 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 2529 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2530 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
2518 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 2531 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2519 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2532 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2520 u32 grbm_int_cntl = 0; 2533 u32 grbm_int_cntl = 0;
@@ -2539,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
2539 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 2552 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2540 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2553 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2541 2554
2542 if (rdev->irq.sw_int) { 2555 if (rdev->family >= CHIP_CAYMAN) {
2543 DRM_DEBUG("evergreen_irq_set: sw int\n"); 2556 /* enable CP interrupts on all rings */
2544 cp_int_cntl |= RB_INT_ENABLE; 2557 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2545 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2558 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2559 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2560 }
2561 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
2562 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2563 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2564 }
2565 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
2566 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2567 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2568 }
2569 } else {
2570 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
2571 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2572 cp_int_cntl |= RB_INT_ENABLE;
2573 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2574 }
2546 } 2575 }
2576
2547 if (rdev->irq.crtc_vblank_int[0] || 2577 if (rdev->irq.crtc_vblank_int[0] ||
2548 rdev->irq.pflip[0]) { 2578 rdev->irq.pflip[0]) {
2549 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2579 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2603,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
2603 grbm_int_cntl |= GUI_IDLE_INT_ENABLE; 2633 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2604 } 2634 }
2605 2635
2606 WREG32(CP_INT_CNTL, cp_int_cntl); 2636 if (rdev->family >= CHIP_CAYMAN) {
2637 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2638 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2639 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2640 } else
2641 WREG32(CP_INT_CNTL, cp_int_cntl);
2607 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 2642 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2608 2643
2609 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2644 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3018,11 +3053,24 @@ restart_ih:
3018 case 177: /* CP_INT in IB1 */ 3053 case 177: /* CP_INT in IB1 */
3019 case 178: /* CP_INT in IB2 */ 3054 case 178: /* CP_INT in IB2 */
3020 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 3055 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3021 radeon_fence_process(rdev); 3056 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3022 break; 3057 break;
3023 case 181: /* CP EOP event */ 3058 case 181: /* CP EOP event */
3024 DRM_DEBUG("IH: CP EOP\n"); 3059 DRM_DEBUG("IH: CP EOP\n");
3025 radeon_fence_process(rdev); 3060 if (rdev->family >= CHIP_CAYMAN) {
3061 switch (src_data) {
3062 case 0:
3063 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3064 break;
3065 case 1:
3066 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3067 break;
3068 case 2:
3069 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3070 break;
3071 }
3072 } else
3073 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3026 break; 3074 break;
3027 case 233: /* GUI IDLE */ 3075 case 233: /* GUI IDLE */
3028 DRM_DEBUG("IH: GUI idle\n"); 3076 DRM_DEBUG("IH: GUI idle\n");
@@ -3052,6 +3100,7 @@ restart_ih:
3052 3100
3053static int evergreen_startup(struct radeon_device *rdev) 3101static int evergreen_startup(struct radeon_device *rdev)
3054{ 3102{
3103 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3055 int r; 3104 int r;
3056 3105
3057 /* enable pcie gen2 link */ 3106 /* enable pcie gen2 link */
@@ -3106,6 +3155,12 @@ static int evergreen_startup(struct radeon_device *rdev)
3106 if (r) 3155 if (r)
3107 return r; 3156 return r;
3108 3157
3158 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3159 if (r) {
3160 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3161 return r;
3162 }
3163
3109 /* Enable IRQ */ 3164 /* Enable IRQ */
3110 r = r600_irq_init(rdev); 3165 r = r600_irq_init(rdev);
3111 if (r) { 3166 if (r) {
@@ -3115,7 +3170,9 @@ static int evergreen_startup(struct radeon_device *rdev)
3115 } 3170 }
3116 evergreen_irq_set(rdev); 3171 evergreen_irq_set(rdev);
3117 3172
3118 r = radeon_ring_init(rdev, rdev->cp.ring_size); 3173 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3174 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3175 0, 0xfffff, RADEON_CP_PACKET2);
3119 if (r) 3176 if (r)
3120 return r; 3177 return r;
3121 r = evergreen_cp_load_microcode(rdev); 3178 r = evergreen_cp_load_microcode(rdev);
@@ -3125,6 +3182,22 @@ static int evergreen_startup(struct radeon_device *rdev)
3125 if (r) 3182 if (r)
3126 return r; 3183 return r;
3127 3184
3185 r = radeon_ib_pool_start(rdev);
3186 if (r)
3187 return r;
3188
3189 r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
3190 if (r) {
3191 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3192 rdev->accel_working = false;
3193 }
3194
3195 r = r600_audio_init(rdev);
3196 if (r) {
3197 DRM_ERROR("radeon: audio init failed\n");
3198 return r;
3199 }
3200
3128 return 0; 3201 return 0;
3129} 3202}
3130 3203
@@ -3144,31 +3217,30 @@ int evergreen_resume(struct radeon_device *rdev)
3144 /* post card */ 3217 /* post card */
3145 atom_asic_init(rdev->mode_info.atom_context); 3218 atom_asic_init(rdev->mode_info.atom_context);
3146 3219
3220 rdev->accel_working = true;
3147 r = evergreen_startup(rdev); 3221 r = evergreen_startup(rdev);
3148 if (r) { 3222 if (r) {
3149 DRM_ERROR("evergreen startup failed on resume\n"); 3223 DRM_ERROR("evergreen startup failed on resume\n");
3150 return r; 3224 return r;
3151 } 3225 }
3152 3226
3153 r = r600_ib_test(rdev);
3154 if (r) {
3155 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3156 return r;
3157 }
3158
3159 return r; 3227 return r;
3160 3228
3161} 3229}
3162 3230
3163int evergreen_suspend(struct radeon_device *rdev) 3231int evergreen_suspend(struct radeon_device *rdev)
3164{ 3232{
3233 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3234
3235 r600_audio_fini(rdev);
3165 /* FIXME: we should wait for ring to be empty */ 3236 /* FIXME: we should wait for ring to be empty */
3237 radeon_ib_pool_suspend(rdev);
3238 r600_blit_suspend(rdev);
3166 r700_cp_stop(rdev); 3239 r700_cp_stop(rdev);
3167 rdev->cp.ready = false; 3240 ring->ready = false;
3168 evergreen_irq_suspend(rdev); 3241 evergreen_irq_suspend(rdev);
3169 radeon_wb_disable(rdev); 3242 radeon_wb_disable(rdev);
3170 evergreen_pcie_gart_disable(rdev); 3243 evergreen_pcie_gart_disable(rdev);
3171 r600_blit_suspend(rdev);
3172 3244
3173 return 0; 3245 return 0;
3174} 3246}
@@ -3243,8 +3315,8 @@ int evergreen_init(struct radeon_device *rdev)
3243 if (r) 3315 if (r)
3244 return r; 3316 return r;
3245 3317
3246 rdev->cp.ring_obj = NULL; 3318 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3247 r600_ring_init(rdev, 1024 * 1024); 3319 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3248 3320
3249 rdev->ih.ring_obj = NULL; 3321 rdev->ih.ring_obj = NULL;
3250 r600_ih_ring_init(rdev, 64 * 1024); 3322 r600_ih_ring_init(rdev, 64 * 1024);
@@ -3253,29 +3325,24 @@ int evergreen_init(struct radeon_device *rdev)
3253 if (r) 3325 if (r)
3254 return r; 3326 return r;
3255 3327
3328 r = radeon_ib_pool_init(rdev);
3256 rdev->accel_working = true; 3329 rdev->accel_working = true;
3330 if (r) {
3331 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3332 rdev->accel_working = false;
3333 }
3334
3257 r = evergreen_startup(rdev); 3335 r = evergreen_startup(rdev);
3258 if (r) { 3336 if (r) {
3259 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3337 dev_err(rdev->dev, "disabling GPU acceleration\n");
3260 r700_cp_fini(rdev); 3338 r700_cp_fini(rdev);
3261 r600_irq_fini(rdev); 3339 r600_irq_fini(rdev);
3262 radeon_wb_fini(rdev); 3340 radeon_wb_fini(rdev);
3341 r100_ib_fini(rdev);
3263 radeon_irq_kms_fini(rdev); 3342 radeon_irq_kms_fini(rdev);
3264 evergreen_pcie_gart_fini(rdev); 3343 evergreen_pcie_gart_fini(rdev);
3265 rdev->accel_working = false; 3344 rdev->accel_working = false;
3266 } 3345 }
3267 if (rdev->accel_working) {
3268 r = radeon_ib_pool_init(rdev);
3269 if (r) {
3270 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
3271 rdev->accel_working = false;
3272 }
3273 r = r600_ib_test(rdev);
3274 if (r) {
3275 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3276 rdev->accel_working = false;
3277 }
3278 }
3279 3346
3280 /* Don't start up if the MC ucode is missing on BTC parts. 3347 /* Don't start up if the MC ucode is missing on BTC parts.
3281 * The default clocks and voltages before the MC ucode 3348 * The default clocks and voltages before the MC ucode
@@ -3293,15 +3360,17 @@ int evergreen_init(struct radeon_device *rdev)
3293 3360
3294void evergreen_fini(struct radeon_device *rdev) 3361void evergreen_fini(struct radeon_device *rdev)
3295{ 3362{
3363 r600_audio_fini(rdev);
3296 r600_blit_fini(rdev); 3364 r600_blit_fini(rdev);
3297 r700_cp_fini(rdev); 3365 r700_cp_fini(rdev);
3298 r600_irq_fini(rdev); 3366 r600_irq_fini(rdev);
3299 radeon_wb_fini(rdev); 3367 radeon_wb_fini(rdev);
3300 radeon_ib_pool_fini(rdev); 3368 r100_ib_fini(rdev);
3301 radeon_irq_kms_fini(rdev); 3369 radeon_irq_kms_fini(rdev);
3302 evergreen_pcie_gart_fini(rdev); 3370 evergreen_pcie_gart_fini(rdev);
3303 r600_vram_scratch_fini(rdev); 3371 r600_vram_scratch_fini(rdev);
3304 radeon_gem_fini(rdev); 3372 radeon_gem_fini(rdev);
3373 radeon_semaphore_driver_fini(rdev);
3305 radeon_fence_driver_fini(rdev); 3374 radeon_fence_driver_fini(rdev);
3306 radeon_agp_fini(rdev); 3375 radeon_agp_fini(rdev);
3307 radeon_bo_fini(rdev); 3376 radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 914e5af8416..2379849515c 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -49,6 +49,7 @@ static void
49set_render_target(struct radeon_device *rdev, int format, 49set_render_target(struct radeon_device *rdev, int format,
50 int w, int h, u64 gpu_addr) 50 int w, int h, u64 gpu_addr)
51{ 51{
52 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
52 u32 cb_color_info; 53 u32 cb_color_info;
53 int pitch, slice; 54 int pitch, slice;
54 55
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
62 pitch = (w / 8) - 1; 63 pitch = (w / 8) - 1;
63 slice = ((w * h) / 64) - 1; 64 slice = ((w * h) / 64) - 1;
64 65
65 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); 66 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
66 radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); 67 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
67 radeon_ring_write(rdev, gpu_addr >> 8); 68 radeon_ring_write(ring, gpu_addr >> 8);
68 radeon_ring_write(rdev, pitch); 69 radeon_ring_write(ring, pitch);
69 radeon_ring_write(rdev, slice); 70 radeon_ring_write(ring, slice);
70 radeon_ring_write(rdev, 0); 71 radeon_ring_write(ring, 0);
71 radeon_ring_write(rdev, cb_color_info); 72 radeon_ring_write(ring, cb_color_info);
72 radeon_ring_write(rdev, 0); 73 radeon_ring_write(ring, 0);
73 radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); 74 radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
74 radeon_ring_write(rdev, 0); 75 radeon_ring_write(ring, 0);
75 radeon_ring_write(rdev, 0); 76 radeon_ring_write(ring, 0);
76 radeon_ring_write(rdev, 0); 77 radeon_ring_write(ring, 0);
77 radeon_ring_write(rdev, 0); 78 radeon_ring_write(ring, 0);
78 radeon_ring_write(rdev, 0); 79 radeon_ring_write(ring, 0);
79 radeon_ring_write(rdev, 0); 80 radeon_ring_write(ring, 0);
80 radeon_ring_write(rdev, 0); 81 radeon_ring_write(ring, 0);
81 radeon_ring_write(rdev, 0); 82 radeon_ring_write(ring, 0);
82} 83}
83 84
84/* emits 5dw */ 85/* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
87 u32 sync_type, u32 size, 88 u32 sync_type, u32 size,
88 u64 mc_addr) 89 u64 mc_addr)
89{ 90{
91 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90 u32 cp_coher_size; 92 u32 cp_coher_size;
91 93
92 if (size == 0xffffffff) 94 if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
99 * to the RB directly. For IBs, the CP programs this as part of the 101 * to the RB directly. For IBs, the CP programs this as part of the
100 * surface_sync packet. 102 * surface_sync packet.
101 */ 103 */
102 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 104 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
103 radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); 105 radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
104 radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */ 106 radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
105 } 107 }
106 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); 108 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
107 radeon_ring_write(rdev, sync_type); 109 radeon_ring_write(ring, sync_type);
108 radeon_ring_write(rdev, cp_coher_size); 110 radeon_ring_write(ring, cp_coher_size);
109 radeon_ring_write(rdev, mc_addr >> 8); 111 radeon_ring_write(ring, mc_addr >> 8);
110 radeon_ring_write(rdev, 10); /* poll interval */ 112 radeon_ring_write(ring, 10); /* poll interval */
111} 113}
112 114
113/* emits 11dw + 1 surface sync = 16dw */ 115/* emits 11dw + 1 surface sync = 16dw */
114static void 116static void
115set_shaders(struct radeon_device *rdev) 117set_shaders(struct radeon_device *rdev)
116{ 118{
119 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
117 u64 gpu_addr; 120 u64 gpu_addr;
118 121
119 /* VS */ 122 /* VS */
120 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 123 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
121 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); 124 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
122 radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); 125 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
123 radeon_ring_write(rdev, gpu_addr >> 8); 126 radeon_ring_write(ring, gpu_addr >> 8);
124 radeon_ring_write(rdev, 2); 127 radeon_ring_write(ring, 2);
125 radeon_ring_write(rdev, 0); 128 radeon_ring_write(ring, 0);
126 129
127 /* PS */ 130 /* PS */
128 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; 131 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
129 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); 132 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
130 radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); 133 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
131 radeon_ring_write(rdev, gpu_addr >> 8); 134 radeon_ring_write(ring, gpu_addr >> 8);
132 radeon_ring_write(rdev, 1); 135 radeon_ring_write(ring, 1);
133 radeon_ring_write(rdev, 0); 136 radeon_ring_write(ring, 0);
134 radeon_ring_write(rdev, 2); 137 radeon_ring_write(ring, 2);
135 138
136 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 139 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
137 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); 140 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
141static void 144static void
142set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) 145set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
143{ 146{
147 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
144 u32 sq_vtx_constant_word2, sq_vtx_constant_word3; 148 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
145 149
146 /* high addr, stride */ 150 /* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
155 SQ_VTCX_SEL_Z(SQ_SEL_Z) | 159 SQ_VTCX_SEL_Z(SQ_SEL_Z) |
156 SQ_VTCX_SEL_W(SQ_SEL_W); 160 SQ_VTCX_SEL_W(SQ_SEL_W);
157 161
158 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); 162 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
159 radeon_ring_write(rdev, 0x580); 163 radeon_ring_write(ring, 0x580);
160 radeon_ring_write(rdev, gpu_addr & 0xffffffff); 164 radeon_ring_write(ring, gpu_addr & 0xffffffff);
161 radeon_ring_write(rdev, 48 - 1); /* size */ 165 radeon_ring_write(ring, 48 - 1); /* size */
162 radeon_ring_write(rdev, sq_vtx_constant_word2); 166 radeon_ring_write(ring, sq_vtx_constant_word2);
163 radeon_ring_write(rdev, sq_vtx_constant_word3); 167 radeon_ring_write(ring, sq_vtx_constant_word3);
164 radeon_ring_write(rdev, 0); 168 radeon_ring_write(ring, 0);
165 radeon_ring_write(rdev, 0); 169 radeon_ring_write(ring, 0);
166 radeon_ring_write(rdev, 0); 170 radeon_ring_write(ring, 0);
167 radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); 171 radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
168 172
169 if ((rdev->family == CHIP_CEDAR) || 173 if ((rdev->family == CHIP_CEDAR) ||
170 (rdev->family == CHIP_PALM) || 174 (rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
185 int format, int w, int h, int pitch, 189 int format, int w, int h, int pitch,
186 u64 gpu_addr, u32 size) 190 u64 gpu_addr, u32 size)
187{ 191{
192 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
188 u32 sq_tex_resource_word0, sq_tex_resource_word1; 193 u32 sq_tex_resource_word0, sq_tex_resource_word1;
189 u32 sq_tex_resource_word4, sq_tex_resource_word7; 194 u32 sq_tex_resource_word4, sq_tex_resource_word7;
190 195
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
208 cp_set_surface_sync(rdev, 213 cp_set_surface_sync(rdev,
209 PACKET3_TC_ACTION_ENA, size, gpu_addr); 214 PACKET3_TC_ACTION_ENA, size, gpu_addr);
210 215
211 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); 216 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
212 radeon_ring_write(rdev, 0); 217 radeon_ring_write(ring, 0);
213 radeon_ring_write(rdev, sq_tex_resource_word0); 218 radeon_ring_write(ring, sq_tex_resource_word0);
214 radeon_ring_write(rdev, sq_tex_resource_word1); 219 radeon_ring_write(ring, sq_tex_resource_word1);
215 radeon_ring_write(rdev, gpu_addr >> 8); 220 radeon_ring_write(ring, gpu_addr >> 8);
216 radeon_ring_write(rdev, gpu_addr >> 8); 221 radeon_ring_write(ring, gpu_addr >> 8);
217 radeon_ring_write(rdev, sq_tex_resource_word4); 222 radeon_ring_write(ring, sq_tex_resource_word4);
218 radeon_ring_write(rdev, 0); 223 radeon_ring_write(ring, 0);
219 radeon_ring_write(rdev, 0); 224 radeon_ring_write(ring, 0);
220 radeon_ring_write(rdev, sq_tex_resource_word7); 225 radeon_ring_write(ring, sq_tex_resource_word7);
221} 226}
222 227
223/* emits 12 */ 228/* emits 12 */
@@ -225,6 +230,7 @@ static void
225set_scissors(struct radeon_device *rdev, int x1, int y1, 230set_scissors(struct radeon_device *rdev, int x1, int y1,
226 int x2, int y2) 231 int x2, int y2)
227{ 232{
233 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
228 /* workaround some hw bugs */ 234 /* workaround some hw bugs */
229 if (x2 == 0) 235 if (x2 == 0)
230 x1 = 1; 236 x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
235 x2 = 2; 241 x2 = 2;
236 } 242 }
237 243
238 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 244 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
239 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 245 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
240 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); 246 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
241 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 247 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
242 248
243 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 249 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
244 radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 250 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
245 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); 251 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
246 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 252 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
247 253
248 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 254 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
249 radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 255 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
250 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); 256 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
251 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 257 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
252} 258}
253 259
254/* emits 10 */ 260/* emits 10 */
255static void 261static void
256draw_auto(struct radeon_device *rdev) 262draw_auto(struct radeon_device *rdev)
257{ 263{
258 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 264 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
259 radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); 265 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
260 radeon_ring_write(rdev, DI_PT_RECTLIST); 266 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
267 radeon_ring_write(ring, DI_PT_RECTLIST);
261 268
262 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 269 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
263 radeon_ring_write(rdev, 270 radeon_ring_write(ring,
264#ifdef __BIG_ENDIAN 271#ifdef __BIG_ENDIAN
265 (2 << 2) | 272 (2 << 2) |
266#endif 273#endif
267 DI_INDEX_SIZE_16_BIT); 274 DI_INDEX_SIZE_16_BIT);
268 275
269 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 276 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
270 radeon_ring_write(rdev, 1); 277 radeon_ring_write(ring, 1);
271 278
272 radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); 279 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
273 radeon_ring_write(rdev, 3); 280 radeon_ring_write(ring, 3);
274 radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); 281 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
275 282
276} 283}
277 284
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
279static void 286static void
280set_default_state(struct radeon_device *rdev) 287set_default_state(struct radeon_device *rdev)
281{ 288{
289 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
282 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; 290 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
283 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; 291 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
284 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; 292 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
292 int dwords; 300 int dwords;
293 301
294 /* set clear context state */ 302 /* set clear context state */
295 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 303 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
296 radeon_ring_write(rdev, 0); 304 radeon_ring_write(ring, 0);
297 305
298 if (rdev->family < CHIP_CAYMAN) { 306 if (rdev->family < CHIP_CAYMAN) {
299 switch (rdev->family) { 307 switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
550 NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); 558 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
551 559
552 /* disable dyn gprs */ 560 /* disable dyn gprs */
553 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 561 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
554 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); 562 radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
555 radeon_ring_write(rdev, 0); 563 radeon_ring_write(ring, 0);
556 564
557 /* setup LDS */ 565 /* setup LDS */
558 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 566 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
559 radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); 567 radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
560 radeon_ring_write(rdev, 0x10001000); 568 radeon_ring_write(ring, 0x10001000);
561 569
562 /* SQ config */ 570 /* SQ config */
563 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); 571 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
564 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); 572 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
565 radeon_ring_write(rdev, sq_config); 573 radeon_ring_write(ring, sq_config);
566 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); 574 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
567 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); 575 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
568 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); 576 radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
569 radeon_ring_write(rdev, 0); 577 radeon_ring_write(ring, 0);
570 radeon_ring_write(rdev, 0); 578 radeon_ring_write(ring, 0);
571 radeon_ring_write(rdev, sq_thread_resource_mgmt); 579 radeon_ring_write(ring, sq_thread_resource_mgmt);
572 radeon_ring_write(rdev, sq_thread_resource_mgmt_2); 580 radeon_ring_write(ring, sq_thread_resource_mgmt_2);
573 radeon_ring_write(rdev, sq_stack_resource_mgmt_1); 581 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
574 radeon_ring_write(rdev, sq_stack_resource_mgmt_2); 582 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
575 radeon_ring_write(rdev, sq_stack_resource_mgmt_3); 583 radeon_ring_write(ring, sq_stack_resource_mgmt_3);
576 } 584 }
577 585
578 /* CONTEXT_CONTROL */ 586 /* CONTEXT_CONTROL */
579 radeon_ring_write(rdev, 0xc0012800); 587 radeon_ring_write(ring, 0xc0012800);
580 radeon_ring_write(rdev, 0x80000000); 588 radeon_ring_write(ring, 0x80000000);
581 radeon_ring_write(rdev, 0x80000000); 589 radeon_ring_write(ring, 0x80000000);
582 590
583 /* SQ_VTX_BASE_VTX_LOC */ 591 /* SQ_VTX_BASE_VTX_LOC */
584 radeon_ring_write(rdev, 0xc0026f00); 592 radeon_ring_write(ring, 0xc0026f00);
585 radeon_ring_write(rdev, 0x00000000); 593 radeon_ring_write(ring, 0x00000000);
586 radeon_ring_write(rdev, 0x00000000); 594 radeon_ring_write(ring, 0x00000000);
587 radeon_ring_write(rdev, 0x00000000); 595 radeon_ring_write(ring, 0x00000000);
588 596
589 /* SET_SAMPLER */ 597 /* SET_SAMPLER */
590 radeon_ring_write(rdev, 0xc0036e00); 598 radeon_ring_write(ring, 0xc0036e00);
591 radeon_ring_write(rdev, 0x00000000); 599 radeon_ring_write(ring, 0x00000000);
592 radeon_ring_write(rdev, 0x00000012); 600 radeon_ring_write(ring, 0x00000012);
593 radeon_ring_write(rdev, 0x00000000); 601 radeon_ring_write(ring, 0x00000000);
594 radeon_ring_write(rdev, 0x00000000); 602 radeon_ring_write(ring, 0x00000000);
595 603
596 /* set to DX10/11 mode */ 604 /* set to DX10/11 mode */
597 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); 605 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
598 radeon_ring_write(rdev, 1); 606 radeon_ring_write(ring, 1);
599 607
600 /* emit an IB pointing at default state */ 608 /* emit an IB pointing at default state */
601 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 609 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
602 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 610 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
603 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 611 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
604 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 612 radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
605 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 613 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
606 radeon_ring_write(rdev, dwords); 614 radeon_ring_write(ring, dwords);
607 615
608} 616}
609 617
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index cd4590aae15..f7442e62c03 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -520,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
520 break; 520 break;
521 case DB_Z_INFO: 521 case DB_Z_INFO:
522 track->db_z_info = radeon_get_ib_value(p, idx); 522 track->db_z_info = radeon_get_ib_value(p, idx);
523 if (!p->keep_tiling_flags) { 523 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
524 r = evergreen_cs_packet_next_reloc(p, &reloc); 524 r = evergreen_cs_packet_next_reloc(p, &reloc);
525 if (r) { 525 if (r) {
526 dev_warn(p->dev, "bad SET_CONTEXT_REG " 526 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -649,7 +649,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
649 case CB_COLOR7_INFO: 649 case CB_COLOR7_INFO:
650 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 650 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
651 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 651 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
652 if (!p->keep_tiling_flags) { 652 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
653 r = evergreen_cs_packet_next_reloc(p, &reloc); 653 r = evergreen_cs_packet_next_reloc(p, &reloc);
654 if (r) { 654 if (r) {
655 dev_warn(p->dev, "bad SET_CONTEXT_REG " 655 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -666,7 +666,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
666 case CB_COLOR11_INFO: 666 case CB_COLOR11_INFO:
667 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 667 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
668 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 668 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
669 if (!p->keep_tiling_flags) { 669 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
670 r = evergreen_cs_packet_next_reloc(p, &reloc); 670 r = evergreen_cs_packet_next_reloc(p, &reloc);
671 if (r) { 671 if (r) {
672 dev_warn(p->dev, "bad SET_CONTEXT_REG " 672 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1355,7 +1355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1355 return -EINVAL; 1355 return -EINVAL;
1356 } 1356 }
1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1358 if (!p->keep_tiling_flags) { 1358 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1359 ib[idx+1+(i*8)+1] |= 1359 ib[idx+1+(i*8)+1] |=
1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
@@ -1572,3 +1572,241 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1572 return 0; 1572 return 0;
1573} 1573}
1574 1574
1575/* vm parser */
1576static bool evergreen_vm_reg_valid(u32 reg)
1577{
1578 /* context regs are fine */
1579 if (reg >= 0x28000)
1580 return true;
1581
1582 /* check config regs */
1583 switch (reg) {
1584 case GRBM_GFX_INDEX:
1585 case VGT_VTX_VECT_EJECT_REG:
1586 case VGT_CACHE_INVALIDATION:
1587 case VGT_GS_VERTEX_REUSE:
1588 case VGT_PRIMITIVE_TYPE:
1589 case VGT_INDEX_TYPE:
1590 case VGT_NUM_INDICES:
1591 case VGT_NUM_INSTANCES:
1592 case VGT_COMPUTE_DIM_X:
1593 case VGT_COMPUTE_DIM_Y:
1594 case VGT_COMPUTE_DIM_Z:
1595 case VGT_COMPUTE_START_X:
1596 case VGT_COMPUTE_START_Y:
1597 case VGT_COMPUTE_START_Z:
1598 case VGT_COMPUTE_INDEX:
1599 case VGT_COMPUTE_THREAD_GROUP_SIZE:
1600 case VGT_HS_OFFCHIP_PARAM:
1601 case PA_CL_ENHANCE:
1602 case PA_SU_LINE_STIPPLE_VALUE:
1603 case PA_SC_LINE_STIPPLE_STATE:
1604 case PA_SC_ENHANCE:
1605 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
1606 case SQ_DYN_GPR_SIMD_LOCK_EN:
1607 case SQ_CONFIG:
1608 case SQ_GPR_RESOURCE_MGMT_1:
1609 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
1610 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
1611 case SQ_CONST_MEM_BASE:
1612 case SQ_STATIC_THREAD_MGMT_1:
1613 case SQ_STATIC_THREAD_MGMT_2:
1614 case SQ_STATIC_THREAD_MGMT_3:
1615 case SPI_CONFIG_CNTL:
1616 case SPI_CONFIG_CNTL_1:
1617 case TA_CNTL_AUX:
1618 case DB_DEBUG:
1619 case DB_DEBUG2:
1620 case DB_DEBUG3:
1621 case DB_DEBUG4:
1622 case DB_WATERMARKS:
1623 case TD_PS_BORDER_COLOR_INDEX:
1624 case TD_PS_BORDER_COLOR_RED:
1625 case TD_PS_BORDER_COLOR_GREEN:
1626 case TD_PS_BORDER_COLOR_BLUE:
1627 case TD_PS_BORDER_COLOR_ALPHA:
1628 case TD_VS_BORDER_COLOR_INDEX:
1629 case TD_VS_BORDER_COLOR_RED:
1630 case TD_VS_BORDER_COLOR_GREEN:
1631 case TD_VS_BORDER_COLOR_BLUE:
1632 case TD_VS_BORDER_COLOR_ALPHA:
1633 case TD_GS_BORDER_COLOR_INDEX:
1634 case TD_GS_BORDER_COLOR_RED:
1635 case TD_GS_BORDER_COLOR_GREEN:
1636 case TD_GS_BORDER_COLOR_BLUE:
1637 case TD_GS_BORDER_COLOR_ALPHA:
1638 case TD_HS_BORDER_COLOR_INDEX:
1639 case TD_HS_BORDER_COLOR_RED:
1640 case TD_HS_BORDER_COLOR_GREEN:
1641 case TD_HS_BORDER_COLOR_BLUE:
1642 case TD_HS_BORDER_COLOR_ALPHA:
1643 case TD_LS_BORDER_COLOR_INDEX:
1644 case TD_LS_BORDER_COLOR_RED:
1645 case TD_LS_BORDER_COLOR_GREEN:
1646 case TD_LS_BORDER_COLOR_BLUE:
1647 case TD_LS_BORDER_COLOR_ALPHA:
1648 case TD_CS_BORDER_COLOR_INDEX:
1649 case TD_CS_BORDER_COLOR_RED:
1650 case TD_CS_BORDER_COLOR_GREEN:
1651 case TD_CS_BORDER_COLOR_BLUE:
1652 case TD_CS_BORDER_COLOR_ALPHA:
1653 case SQ_ESGS_RING_SIZE:
1654 case SQ_GSVS_RING_SIZE:
1655 case SQ_ESTMP_RING_SIZE:
1656 case SQ_GSTMP_RING_SIZE:
1657 case SQ_HSTMP_RING_SIZE:
1658 case SQ_LSTMP_RING_SIZE:
1659 case SQ_PSTMP_RING_SIZE:
1660 case SQ_VSTMP_RING_SIZE:
1661 case SQ_ESGS_RING_ITEMSIZE:
1662 case SQ_ESTMP_RING_ITEMSIZE:
1663 case SQ_GSTMP_RING_ITEMSIZE:
1664 case SQ_GSVS_RING_ITEMSIZE:
1665 case SQ_GS_VERT_ITEMSIZE:
1666 case SQ_GS_VERT_ITEMSIZE_1:
1667 case SQ_GS_VERT_ITEMSIZE_2:
1668 case SQ_GS_VERT_ITEMSIZE_3:
1669 case SQ_GSVS_RING_OFFSET_1:
1670 case SQ_GSVS_RING_OFFSET_2:
1671 case SQ_GSVS_RING_OFFSET_3:
1672 case SQ_HSTMP_RING_ITEMSIZE:
1673 case SQ_LSTMP_RING_ITEMSIZE:
1674 case SQ_PSTMP_RING_ITEMSIZE:
1675 case SQ_VSTMP_RING_ITEMSIZE:
1676 case VGT_TF_RING_SIZE:
1677 case SQ_ESGS_RING_BASE:
1678 case SQ_GSVS_RING_BASE:
1679 case SQ_ESTMP_RING_BASE:
1680 case SQ_GSTMP_RING_BASE:
1681 case SQ_HSTMP_RING_BASE:
1682 case SQ_LSTMP_RING_BASE:
1683 case SQ_PSTMP_RING_BASE:
1684 case SQ_VSTMP_RING_BASE:
1685 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
1686 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
1687 return true;
1688 default:
1689 return false;
1690 }
1691}
1692
1693static int evergreen_vm_packet3_check(struct radeon_device *rdev,
1694 u32 *ib, struct radeon_cs_packet *pkt)
1695{
1696 u32 idx = pkt->idx + 1;
1697 u32 idx_value = ib[idx];
1698 u32 start_reg, end_reg, reg, i;
1699
1700 switch (pkt->opcode) {
1701 case PACKET3_NOP:
1702 case PACKET3_SET_BASE:
1703 case PACKET3_CLEAR_STATE:
1704 case PACKET3_INDEX_BUFFER_SIZE:
1705 case PACKET3_DISPATCH_DIRECT:
1706 case PACKET3_DISPATCH_INDIRECT:
1707 case PACKET3_MODE_CONTROL:
1708 case PACKET3_SET_PREDICATION:
1709 case PACKET3_COND_EXEC:
1710 case PACKET3_PRED_EXEC:
1711 case PACKET3_DRAW_INDIRECT:
1712 case PACKET3_DRAW_INDEX_INDIRECT:
1713 case PACKET3_INDEX_BASE:
1714 case PACKET3_DRAW_INDEX_2:
1715 case PACKET3_CONTEXT_CONTROL:
1716 case PACKET3_DRAW_INDEX_OFFSET:
1717 case PACKET3_INDEX_TYPE:
1718 case PACKET3_DRAW_INDEX:
1719 case PACKET3_DRAW_INDEX_AUTO:
1720 case PACKET3_DRAW_INDEX_IMMD:
1721 case PACKET3_NUM_INSTANCES:
1722 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1723 case PACKET3_STRMOUT_BUFFER_UPDATE:
1724 case PACKET3_DRAW_INDEX_OFFSET_2:
1725 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
1726 case PACKET3_MPEG_INDEX:
1727 case PACKET3_WAIT_REG_MEM:
1728 case PACKET3_MEM_WRITE:
1729 case PACKET3_SURFACE_SYNC:
1730 case PACKET3_EVENT_WRITE:
1731 case PACKET3_EVENT_WRITE_EOP:
1732 case PACKET3_EVENT_WRITE_EOS:
1733 case PACKET3_SET_CONTEXT_REG:
1734 case PACKET3_SET_BOOL_CONST:
1735 case PACKET3_SET_LOOP_CONST:
1736 case PACKET3_SET_RESOURCE:
1737 case PACKET3_SET_SAMPLER:
1738 case PACKET3_SET_CTL_CONST:
1739 case PACKET3_SET_RESOURCE_OFFSET:
1740 case PACKET3_SET_CONTEXT_REG_INDIRECT:
1741 case PACKET3_SET_RESOURCE_INDIRECT:
1742 case CAYMAN_PACKET3_DEALLOC_STATE:
1743 break;
1744 case PACKET3_COND_WRITE:
1745 if (idx_value & 0x100) {
1746 reg = ib[idx + 5] * 4;
1747 if (!evergreen_vm_reg_valid(reg))
1748 return -EINVAL;
1749 }
1750 break;
1751 case PACKET3_COPY_DW:
1752 if (idx_value & 0x2) {
1753 reg = ib[idx + 3] * 4;
1754 if (!evergreen_vm_reg_valid(reg))
1755 return -EINVAL;
1756 }
1757 break;
1758 case PACKET3_SET_CONFIG_REG:
1759 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1760 end_reg = 4 * pkt->count + start_reg - 4;
1761 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1762 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1763 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1764 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1765 return -EINVAL;
1766 }
1767 for (i = 0; i < pkt->count; i++) {
1768 reg = start_reg + (4 * i);
1769 if (!evergreen_vm_reg_valid(reg))
1770 return -EINVAL;
1771 }
1772 break;
1773 default:
1774 return -EINVAL;
1775 }
1776 return 0;
1777}
1778
1779int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
1780{
1781 int ret = 0;
1782 u32 idx = 0;
1783 struct radeon_cs_packet pkt;
1784
1785 do {
1786 pkt.idx = idx;
1787 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
1788 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
1789 pkt.one_reg_wr = 0;
1790 switch (pkt.type) {
1791 case PACKET_TYPE0:
1792 dev_err(rdev->dev, "Packet0 not allowed!\n");
1793 ret = -EINVAL;
1794 break;
1795 case PACKET_TYPE2:
1796 break;
1797 case PACKET_TYPE3:
1798 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
1799 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
1800 break;
1801 default:
1802 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
1803 ret = -EINVAL;
1804 break;
1805 }
1806 if (ret)
1807 break;
1808 idx += pkt.count + 2;
1809 } while (idx < ib->length_dw);
1810
1811 return ret;
1812}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 7d7f2155e34..4215de95477 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,6 +35,14 @@
35#define EVERGREEN_P1PLL_SS_CNTL 0x414 35#define EVERGREEN_P1PLL_SS_CNTL 0x414
36#define EVERGREEN_P2PLL_SS_CNTL 0x454 36#define EVERGREEN_P2PLL_SS_CNTL 0x454
37# define EVERGREEN_PxPLL_SS_EN (1 << 12) 37# define EVERGREEN_PxPLL_SS_EN (1 << 12)
38
39#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
40#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
41#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
42
43#define EVERGREEN_AUDIO_ENABLE 0x5e78
44#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
45
38/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */ 46/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
39#define EVERGREEN_GRPH_ENABLE 0x6800 47#define EVERGREEN_GRPH_ENABLE 0x6800
40#define EVERGREEN_GRPH_CONTROL 0x6804 48#define EVERGREEN_GRPH_CONTROL 0x6804
@@ -220,4 +228,9 @@
220#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8 228#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
221#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc 229#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
222 230
231/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
232#define EVERGREEN_HDMI_BASE 0x7030
233
234#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
235
223#endif 236#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index e00039e59a7..b502216d42a 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -242,6 +242,7 @@
242#define PA_CL_ENHANCE 0x8A14 242#define PA_CL_ENHANCE 0x8A14
243#define CLIP_VTX_REORDER_ENA (1 << 0) 243#define CLIP_VTX_REORDER_ENA (1 << 0)
244#define NUM_CLIP_SEQ(x) ((x) << 1) 244#define NUM_CLIP_SEQ(x) ((x) << 1)
245#define PA_SC_ENHANCE 0x8BF0
245#define PA_SC_AA_CONFIG 0x28C04 246#define PA_SC_AA_CONFIG 0x28C04
246#define MSAA_NUM_SAMPLES_SHIFT 0 247#define MSAA_NUM_SAMPLES_SHIFT 0
247#define MSAA_NUM_SAMPLES_MASK 0x3 248#define MSAA_NUM_SAMPLES_MASK 0x3
@@ -319,6 +320,8 @@
319#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C 320#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
320#define NUM_HS_GPRS(x) ((x) << 0) 321#define NUM_HS_GPRS(x) ((x) << 0)
321#define NUM_LS_GPRS(x) ((x) << 16) 322#define NUM_LS_GPRS(x) ((x) << 16)
323#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
324#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
322#define SQ_THREAD_RESOURCE_MGMT 0x8C18 325#define SQ_THREAD_RESOURCE_MGMT 0x8C18
323#define NUM_PS_THREADS(x) ((x) << 0) 326#define NUM_PS_THREADS(x) ((x) << 0)
324#define NUM_VS_THREADS(x) ((x) << 8) 327#define NUM_VS_THREADS(x) ((x) << 8)
@@ -337,6 +340,10 @@
337#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) 340#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
338#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) 341#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
339#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C 342#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
343#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
344#define SQ_STATIC_THREAD_MGMT_1 0x8E20
345#define SQ_STATIC_THREAD_MGMT_2 0x8E24
346#define SQ_STATIC_THREAD_MGMT_3 0x8E28
340#define SQ_LDS_RESOURCE_MGMT 0x8E2C 347#define SQ_LDS_RESOURCE_MGMT 0x8E2C
341 348
342#define SQ_MS_FIFO_SIZES 0x8CF0 349#define SQ_MS_FIFO_SIZES 0x8CF0
@@ -691,6 +698,7 @@
691#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 698#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
692#define PACKET3_MEM_SEMAPHORE 0x39 699#define PACKET3_MEM_SEMAPHORE 0x39
693#define PACKET3_MPEG_INDEX 0x3A 700#define PACKET3_MPEG_INDEX 0x3A
701#define PACKET3_COPY_DW 0x3B
694#define PACKET3_WAIT_REG_MEM 0x3C 702#define PACKET3_WAIT_REG_MEM 0x3C
695#define PACKET3_MEM_WRITE 0x3D 703#define PACKET3_MEM_WRITE 0x3D
696#define PACKET3_INDIRECT_BUFFER 0x32 704#define PACKET3_INDIRECT_BUFFER 0x32
@@ -768,6 +776,8 @@
768#define SQ_TEX_VTX_VALID_TEXTURE 0x2 776#define SQ_TEX_VTX_VALID_TEXTURE 0x2
769#define SQ_TEX_VTX_VALID_BUFFER 0x3 777#define SQ_TEX_VTX_VALID_BUFFER 0x3
770 778
779#define VGT_VTX_VECT_EJECT_REG 0x88b0
780
771#define SQ_CONST_MEM_BASE 0x8df8 781#define SQ_CONST_MEM_BASE 0x8df8
772 782
773#define SQ_ESGS_RING_BASE 0x8c40 783#define SQ_ESGS_RING_BASE 0x8c40
@@ -892,8 +902,27 @@
892#define PA_SC_SCREEN_SCISSOR_TL 0x28030 902#define PA_SC_SCREEN_SCISSOR_TL 0x28030
893#define PA_SC_GENERIC_SCISSOR_TL 0x28240 903#define PA_SC_GENERIC_SCISSOR_TL 0x28240
894#define PA_SC_WINDOW_SCISSOR_TL 0x28204 904#define PA_SC_WINDOW_SCISSOR_TL 0x28204
895#define VGT_PRIMITIVE_TYPE 0x8958
896 905
906#define VGT_PRIMITIVE_TYPE 0x8958
907#define VGT_INDEX_TYPE 0x895C
908
909#define VGT_NUM_INDICES 0x8970
910
911#define VGT_COMPUTE_DIM_X 0x8990
912#define VGT_COMPUTE_DIM_Y 0x8994
913#define VGT_COMPUTE_DIM_Z 0x8998
914#define VGT_COMPUTE_START_X 0x899C
915#define VGT_COMPUTE_START_Y 0x89A0
916#define VGT_COMPUTE_START_Z 0x89A4
917#define VGT_COMPUTE_INDEX 0x89A8
918#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
919#define VGT_HS_OFFCHIP_PARAM 0x89B0
920
921#define DB_DEBUG 0x9830
922#define DB_DEBUG2 0x9834
923#define DB_DEBUG3 0x9838
924#define DB_DEBUG4 0x983C
925#define DB_WATERMARKS 0x9854
897#define DB_DEPTH_CONTROL 0x28800 926#define DB_DEPTH_CONTROL 0x28800
898#define DB_DEPTH_VIEW 0x28008 927#define DB_DEPTH_VIEW 0x28008
899#define DB_HTILE_DATA_BASE 0x28014 928#define DB_HTILE_DATA_BASE 0x28014
@@ -1189,8 +1218,40 @@
1189#define SQ_VTX_CONSTANT_WORD6_0 0x30018 1218#define SQ_VTX_CONSTANT_WORD6_0 0x30018
1190#define SQ_VTX_CONSTANT_WORD7_0 0x3001c 1219#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
1191 1220
1221#define TD_PS_BORDER_COLOR_INDEX 0xA400
1222#define TD_PS_BORDER_COLOR_RED 0xA404
1223#define TD_PS_BORDER_COLOR_GREEN 0xA408
1224#define TD_PS_BORDER_COLOR_BLUE 0xA40C
1225#define TD_PS_BORDER_COLOR_ALPHA 0xA410
1226#define TD_VS_BORDER_COLOR_INDEX 0xA414
1227#define TD_VS_BORDER_COLOR_RED 0xA418
1228#define TD_VS_BORDER_COLOR_GREEN 0xA41C
1229#define TD_VS_BORDER_COLOR_BLUE 0xA420
1230#define TD_VS_BORDER_COLOR_ALPHA 0xA424
1231#define TD_GS_BORDER_COLOR_INDEX 0xA428
1232#define TD_GS_BORDER_COLOR_RED 0xA42C
1233#define TD_GS_BORDER_COLOR_GREEN 0xA430
1234#define TD_GS_BORDER_COLOR_BLUE 0xA434
1235#define TD_GS_BORDER_COLOR_ALPHA 0xA438
1236#define TD_HS_BORDER_COLOR_INDEX 0xA43C
1237#define TD_HS_BORDER_COLOR_RED 0xA440
1238#define TD_HS_BORDER_COLOR_GREEN 0xA444
1239#define TD_HS_BORDER_COLOR_BLUE 0xA448
1240#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
1241#define TD_LS_BORDER_COLOR_INDEX 0xA450
1242#define TD_LS_BORDER_COLOR_RED 0xA454
1243#define TD_LS_BORDER_COLOR_GREEN 0xA458
1244#define TD_LS_BORDER_COLOR_BLUE 0xA45C
1245#define TD_LS_BORDER_COLOR_ALPHA 0xA460
1246#define TD_CS_BORDER_COLOR_INDEX 0xA464
1247#define TD_CS_BORDER_COLOR_RED 0xA468
1248#define TD_CS_BORDER_COLOR_GREEN 0xA46C
1249#define TD_CS_BORDER_COLOR_BLUE 0xA470
1250#define TD_CS_BORDER_COLOR_ALPHA 0xA474
1251
1192/* cayman 3D regs */ 1252/* cayman 3D regs */
1193#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0 1253#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
1254#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
1194#define CAYMAN_DB_EQAA 0x28804 1255#define CAYMAN_DB_EQAA 0x28804
1195#define CAYMAN_DB_DEPTH_INFO 0x2803C 1256#define CAYMAN_DB_DEPTH_INFO 0x2803C
1196#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 1257#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 0e579985746..32113729540 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
934 934
935int cayman_pcie_gart_enable(struct radeon_device *rdev) 935int cayman_pcie_gart_enable(struct radeon_device *rdev)
936{ 936{
937 int r; 937 int i, r;
938 938
939 if (rdev->gart.robj == NULL) { 939 if (rdev->gart.robj == NULL) {
940 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 940 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
945 return r; 945 return r;
946 radeon_gart_restore(rdev); 946 radeon_gart_restore(rdev);
947 /* Setup TLB control */ 947 /* Setup TLB control */
948 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | 948 WREG32(MC_VM_MX_L1_TLB_CNTL,
949 (0xA << 7) |
950 ENABLE_L1_TLB |
949 ENABLE_L1_FRAGMENT_PROCESSING | 951 ENABLE_L1_FRAGMENT_PROCESSING |
950 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 952 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
953 ENABLE_ADVANCED_DRIVER_MODEL |
951 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 954 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
952 /* Setup L2 cache */ 955 /* Setup L2 cache */
953 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 956 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
967 WREG32(VM_CONTEXT0_CNTL2, 0); 970 WREG32(VM_CONTEXT0_CNTL2, 0);
968 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 971 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
969 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 972 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
970 /* disable context1-7 */ 973
974 WREG32(0x15D4, 0);
975 WREG32(0x15D8, 0);
976 WREG32(0x15DC, 0);
977
978 /* empty context1-7 */
979 for (i = 1; i < 8; i++) {
980 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
981 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
982 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
983 rdev->gart.table_addr >> 12);
984 }
985
986 /* enable context1-7 */
987 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
988 (u32)(rdev->dummy_page.addr >> 12));
971 WREG32(VM_CONTEXT1_CNTL2, 0); 989 WREG32(VM_CONTEXT1_CNTL2, 0);
972 WREG32(VM_CONTEXT1_CNTL, 0); 990 WREG32(VM_CONTEXT1_CNTL, 0);
991 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
992 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
973 993
974 cayman_pcie_gart_tlb_flush(rdev); 994 cayman_pcie_gart_tlb_flush(rdev);
975 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 995 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
1006 radeon_gart_fini(rdev); 1026 radeon_gart_fini(rdev);
1007} 1027}
1008 1028
1029void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1030 int ring, u32 cp_int_cntl)
1031{
1032 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1033
1034 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1035 WREG32(CP_INT_CNTL, cp_int_cntl);
1036}
1037
1009/* 1038/*
1010 * CP. 1039 * CP.
1011 */ 1040 */
1041void cayman_fence_ring_emit(struct radeon_device *rdev,
1042 struct radeon_fence *fence)
1043{
1044 struct radeon_ring *ring = &rdev->ring[fence->ring];
1045 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1046
1047 /* flush read cache over gart for this vmid */
1048 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1049 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1050 radeon_ring_write(ring, 0);
1051 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1052 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1053 radeon_ring_write(ring, 0xFFFFFFFF);
1054 radeon_ring_write(ring, 0);
1055 radeon_ring_write(ring, 10); /* poll interval */
1056 /* EVENT_WRITE_EOP - flush caches, send int */
1057 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1058 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1059 radeon_ring_write(ring, addr & 0xffffffff);
1060 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1061 radeon_ring_write(ring, fence->seq);
1062 radeon_ring_write(ring, 0);
1063}
1064
1065void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1066{
1067 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
1068
1069 /* set to DX10/11 mode */
1070 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1071 radeon_ring_write(ring, 1);
1072 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1073 radeon_ring_write(ring,
1074#ifdef __BIG_ENDIAN
1075 (2 << 0) |
1076#endif
1077 (ib->gpu_addr & 0xFFFFFFFC));
1078 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1079 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
1080
1081 /* flush read cache over gart for this vmid */
1082 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1083 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1084 radeon_ring_write(ring, ib->vm_id);
1085 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1086 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1087 radeon_ring_write(ring, 0xFFFFFFFF);
1088 radeon_ring_write(ring, 0);
1089 radeon_ring_write(ring, 10); /* poll interval */
1090}
1091
1012static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1092static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1013{ 1093{
1014 if (enable) 1094 if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
1049 1129
1050static int cayman_cp_start(struct radeon_device *rdev) 1130static int cayman_cp_start(struct radeon_device *rdev)
1051{ 1131{
1132 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1052 int r, i; 1133 int r, i;
1053 1134
1054 r = radeon_ring_lock(rdev, 7); 1135 r = radeon_ring_lock(rdev, ring, 7);
1055 if (r) { 1136 if (r) {
1056 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1137 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1057 return r; 1138 return r;
1058 } 1139 }
1059 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1140 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1060 radeon_ring_write(rdev, 0x1); 1141 radeon_ring_write(ring, 0x1);
1061 radeon_ring_write(rdev, 0x0); 1142 radeon_ring_write(ring, 0x0);
1062 radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); 1143 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1063 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1144 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1064 radeon_ring_write(rdev, 0); 1145 radeon_ring_write(ring, 0);
1065 radeon_ring_write(rdev, 0); 1146 radeon_ring_write(ring, 0);
1066 radeon_ring_unlock_commit(rdev); 1147 radeon_ring_unlock_commit(rdev, ring);
1067 1148
1068 cayman_cp_enable(rdev, true); 1149 cayman_cp_enable(rdev, true);
1069 1150
1070 r = radeon_ring_lock(rdev, cayman_default_size + 19); 1151 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1071 if (r) { 1152 if (r) {
1072 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1153 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1073 return r; 1154 return r;
1074 } 1155 }
1075 1156
1076 /* setup clear context state */ 1157 /* setup clear context state */
1077 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1158 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1078 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1159 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1079 1160
1080 for (i = 0; i < cayman_default_size; i++) 1161 for (i = 0; i < cayman_default_size; i++)
1081 radeon_ring_write(rdev, cayman_default_state[i]); 1162 radeon_ring_write(ring, cayman_default_state[i]);
1082 1163
1083 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1164 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1084 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1165 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1085 1166
1086 /* set clear context state */ 1167 /* set clear context state */
1087 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1168 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1088 radeon_ring_write(rdev, 0); 1169 radeon_ring_write(ring, 0);
1089 1170
1090 /* SQ_VTX_BASE_VTX_LOC */ 1171 /* SQ_VTX_BASE_VTX_LOC */
1091 radeon_ring_write(rdev, 0xc0026f00); 1172 radeon_ring_write(ring, 0xc0026f00);
1092 radeon_ring_write(rdev, 0x00000000); 1173 radeon_ring_write(ring, 0x00000000);
1093 radeon_ring_write(rdev, 0x00000000); 1174 radeon_ring_write(ring, 0x00000000);
1094 radeon_ring_write(rdev, 0x00000000); 1175 radeon_ring_write(ring, 0x00000000);
1095 1176
1096 /* Clear consts */ 1177 /* Clear consts */
1097 radeon_ring_write(rdev, 0xc0036f00); 1178 radeon_ring_write(ring, 0xc0036f00);
1098 radeon_ring_write(rdev, 0x00000bc4); 1179 radeon_ring_write(ring, 0x00000bc4);
1099 radeon_ring_write(rdev, 0xffffffff); 1180 radeon_ring_write(ring, 0xffffffff);
1100 radeon_ring_write(rdev, 0xffffffff); 1181 radeon_ring_write(ring, 0xffffffff);
1101 radeon_ring_write(rdev, 0xffffffff); 1182 radeon_ring_write(ring, 0xffffffff);
1102 1183
1103 radeon_ring_write(rdev, 0xc0026900); 1184 radeon_ring_write(ring, 0xc0026900);
1104 radeon_ring_write(rdev, 0x00000316); 1185 radeon_ring_write(ring, 0x00000316);
1105 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1186 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1106 radeon_ring_write(rdev, 0x00000010); /* */ 1187 radeon_ring_write(ring, 0x00000010); /* */
1107 1188
1108 radeon_ring_unlock_commit(rdev); 1189 radeon_ring_unlock_commit(rdev, ring);
1109 1190
1110 /* XXX init other rings */ 1191 /* XXX init other rings */
1111 1192
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
1115static void cayman_cp_fini(struct radeon_device *rdev) 1196static void cayman_cp_fini(struct radeon_device *rdev)
1116{ 1197{
1117 cayman_cp_enable(rdev, false); 1198 cayman_cp_enable(rdev, false);
1118 radeon_ring_fini(rdev); 1199 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1119} 1200}
1120 1201
1121int cayman_cp_resume(struct radeon_device *rdev) 1202int cayman_cp_resume(struct radeon_device *rdev)
1122{ 1203{
1204 struct radeon_ring *ring;
1123 u32 tmp; 1205 u32 tmp;
1124 u32 rb_bufsz; 1206 u32 rb_bufsz;
1125 int r; 1207 int r;
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1136 WREG32(GRBM_SOFT_RESET, 0); 1218 WREG32(GRBM_SOFT_RESET, 0);
1137 RREG32(GRBM_SOFT_RESET); 1219 RREG32(GRBM_SOFT_RESET);
1138 1220
1139 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1221 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1140 1222
1141 /* Set the write pointer delay */ 1223 /* Set the write pointer delay */
1142 WREG32(CP_RB_WPTR_DELAY, 0); 1224 WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1145 1227
1146 /* ring 0 - compute and gfx */ 1228 /* ring 0 - compute and gfx */
1147 /* Set ring buffer size */ 1229 /* Set ring buffer size */
1148 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1230 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1231 rb_bufsz = drm_order(ring->ring_size / 8);
1149 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1232 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1150#ifdef __BIG_ENDIAN 1233#ifdef __BIG_ENDIAN
1151 tmp |= BUF_SWAP_32BIT; 1234 tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1154 1237
1155 /* Initialize the ring buffer's read and write pointers */ 1238 /* Initialize the ring buffer's read and write pointers */
1156 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1239 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1157 rdev->cp.wptr = 0; 1240 ring->wptr = 0;
1158 WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1241 WREG32(CP_RB0_WPTR, ring->wptr);
1159 1242
1160 /* set the wb address wether it's enabled or not */ 1243 /* set the wb address wether it's enabled or not */
1161 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1244 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
1172 mdelay(1); 1255 mdelay(1);
1173 WREG32(CP_RB0_CNTL, tmp); 1256 WREG32(CP_RB0_CNTL, tmp);
1174 1257
1175 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1258 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
1176 1259
1177 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1260 ring->rptr = RREG32(CP_RB0_RPTR);
1178 1261
1179 /* ring1 - compute only */ 1262 /* ring1 - compute only */
1180 /* Set ring buffer size */ 1263 /* Set ring buffer size */
1181 rb_bufsz = drm_order(rdev->cp1.ring_size / 8); 1264 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
1265 rb_bufsz = drm_order(ring->ring_size / 8);
1182 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1266 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1183#ifdef __BIG_ENDIAN 1267#ifdef __BIG_ENDIAN
1184 tmp |= BUF_SWAP_32BIT; 1268 tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1187 1271
1188 /* Initialize the ring buffer's read and write pointers */ 1272 /* Initialize the ring buffer's read and write pointers */
1189 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1273 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1190 rdev->cp1.wptr = 0; 1274 ring->wptr = 0;
1191 WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1275 WREG32(CP_RB1_WPTR, ring->wptr);
1192 1276
1193 /* set the wb address wether it's enabled or not */ 1277 /* set the wb address wether it's enabled or not */
1194 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1278 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
1197 mdelay(1); 1281 mdelay(1);
1198 WREG32(CP_RB1_CNTL, tmp); 1282 WREG32(CP_RB1_CNTL, tmp);
1199 1283
1200 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1284 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
1201 1285
1202 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1286 ring->rptr = RREG32(CP_RB1_RPTR);
1203 1287
1204 /* ring2 - compute only */ 1288 /* ring2 - compute only */
1205 /* Set ring buffer size */ 1289 /* Set ring buffer size */
1206 rb_bufsz = drm_order(rdev->cp2.ring_size / 8); 1290 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
1291 rb_bufsz = drm_order(ring->ring_size / 8);
1207 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1292 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1208#ifdef __BIG_ENDIAN 1293#ifdef __BIG_ENDIAN
1209 tmp |= BUF_SWAP_32BIT; 1294 tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1212 1297
1213 /* Initialize the ring buffer's read and write pointers */ 1298 /* Initialize the ring buffer's read and write pointers */
1214 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1299 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1215 rdev->cp2.wptr = 0; 1300 ring->wptr = 0;
1216 WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1301 WREG32(CP_RB2_WPTR, ring->wptr);
1217 1302
1218 /* set the wb address wether it's enabled or not */ 1303 /* set the wb address wether it's enabled or not */
1219 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1304 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
1222 mdelay(1); 1307 mdelay(1);
1223 WREG32(CP_RB2_CNTL, tmp); 1308 WREG32(CP_RB2_CNTL, tmp);
1224 1309
1225 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1310 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
1226 1311
1227 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1312 ring->rptr = RREG32(CP_RB2_RPTR);
1228 1313
1229 /* start the rings */ 1314 /* start the rings */
1230 cayman_cp_start(rdev); 1315 cayman_cp_start(rdev);
1231 rdev->cp.ready = true; 1316 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1232 rdev->cp1.ready = true; 1317 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1233 rdev->cp2.ready = true; 1318 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1234 /* this only test cp0 */ 1319 /* this only test cp0 */
1235 r = radeon_ring_test(rdev); 1320 r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1236 if (r) { 1321 if (r) {
1237 rdev->cp.ready = false; 1322 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1238 rdev->cp1.ready = false; 1323 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1239 rdev->cp2.ready = false; 1324 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1240 return r; 1325 return r;
1241 } 1326 }
1242 1327
1243 return 0; 1328 return 0;
1244} 1329}
1245 1330
1246bool cayman_gpu_is_lockup(struct radeon_device *rdev) 1331bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1247{ 1332{
1248 u32 srbm_status; 1333 u32 srbm_status;
1249 u32 grbm_status; 1334 u32 grbm_status;
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
1256 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1341 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1257 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1342 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1258 if (!(grbm_status & GUI_ACTIVE)) { 1343 if (!(grbm_status & GUI_ACTIVE)) {
1259 r100_gpu_lockup_update(lockup, &rdev->cp); 1344 r100_gpu_lockup_update(lockup, ring);
1260 return false; 1345 return false;
1261 } 1346 }
1262 /* force CP activities */ 1347 /* force CP activities */
1263 r = radeon_ring_lock(rdev, 2); 1348 r = radeon_ring_lock(rdev, ring, 2);
1264 if (!r) { 1349 if (!r) {
1265 /* PACKET2 NOP */ 1350 /* PACKET2 NOP */
1266 radeon_ring_write(rdev, 0x80000000); 1351 radeon_ring_write(ring, 0x80000000);
1267 radeon_ring_write(rdev, 0x80000000); 1352 radeon_ring_write(ring, 0x80000000);
1268 radeon_ring_unlock_commit(rdev); 1353 radeon_ring_unlock_commit(rdev, ring);
1269 } 1354 }
1270 /* XXX deal with CP0,1,2 */ 1355 /* XXX deal with CP0,1,2 */
1271 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1356 ring->rptr = RREG32(ring->rptr_reg);
1272 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1357 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1273} 1358}
1274 1359
1275static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1360static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1289 RREG32(GRBM_STATUS_SE1)); 1374 RREG32(GRBM_STATUS_SE1));
1290 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1375 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1291 RREG32(SRBM_STATUS)); 1376 RREG32(SRBM_STATUS));
1377 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1378 RREG32(0x14F8));
1379 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1380 RREG32(0x14D8));
1381 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1382 RREG32(0x14FC));
1383 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1384 RREG32(0x14DC));
1385
1292 evergreen_mc_stop(rdev, &save); 1386 evergreen_mc_stop(rdev, &save);
1293 if (evergreen_mc_wait_for_idle(rdev)) { 1387 if (evergreen_mc_wait_for_idle(rdev)) {
1294 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1388 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1319 (void)RREG32(GRBM_SOFT_RESET); 1413 (void)RREG32(GRBM_SOFT_RESET);
1320 /* Wait a little for things to settle down */ 1414 /* Wait a little for things to settle down */
1321 udelay(50); 1415 udelay(50);
1416
1322 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1417 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1323 RREG32(GRBM_STATUS)); 1418 RREG32(GRBM_STATUS));
1324 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1419 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
1338 1433
1339static int cayman_startup(struct radeon_device *rdev) 1434static int cayman_startup(struct radeon_device *rdev)
1340{ 1435{
1436 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1341 int r; 1437 int r;
1342 1438
1343 /* enable pcie gen2 link */ 1439 /* enable pcie gen2 link */
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev)
1378 if (r) 1474 if (r)
1379 return r; 1475 return r;
1380 1476
1477 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1478 if (r) {
1479 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1480 return r;
1481 }
1482
1483 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1484 if (r) {
1485 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1486 return r;
1487 }
1488
1489 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1490 if (r) {
1491 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1492 return r;
1493 }
1494
1381 /* Enable IRQ */ 1495 /* Enable IRQ */
1382 r = r600_irq_init(rdev); 1496 r = r600_irq_init(rdev);
1383 if (r) { 1497 if (r) {
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev)
1387 } 1501 }
1388 evergreen_irq_set(rdev); 1502 evergreen_irq_set(rdev);
1389 1503
1390 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1504 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1505 CP_RB0_RPTR, CP_RB0_WPTR,
1506 0, 0xfffff, RADEON_CP_PACKET2);
1391 if (r) 1507 if (r)
1392 return r; 1508 return r;
1393 r = cayman_cp_load_microcode(rdev); 1509 r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev)
1397 if (r) 1513 if (r)
1398 return r; 1514 return r;
1399 1515
1516 r = radeon_ib_pool_start(rdev);
1517 if (r)
1518 return r;
1519
1520 r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
1521 if (r) {
1522 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1523 rdev->accel_working = false;
1524 return r;
1525 }
1526
1527 r = radeon_vm_manager_start(rdev);
1528 if (r)
1529 return r;
1530
1400 return 0; 1531 return 0;
1401} 1532}
1402 1533
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev)
1411 /* post card */ 1542 /* post card */
1412 atom_asic_init(rdev->mode_info.atom_context); 1543 atom_asic_init(rdev->mode_info.atom_context);
1413 1544
1545 rdev->accel_working = true;
1414 r = cayman_startup(rdev); 1546 r = cayman_startup(rdev);
1415 if (r) { 1547 if (r) {
1416 DRM_ERROR("cayman startup failed on resume\n"); 1548 DRM_ERROR("cayman startup failed on resume\n");
1417 return r; 1549 return r;
1418 } 1550 }
1419
1420 r = r600_ib_test(rdev);
1421 if (r) {
1422 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1423 return r;
1424 }
1425
1426 return r; 1551 return r;
1427
1428} 1552}
1429 1553
1430int cayman_suspend(struct radeon_device *rdev) 1554int cayman_suspend(struct radeon_device *rdev)
1431{ 1555{
1432 /* FIXME: we should wait for ring to be empty */ 1556 /* FIXME: we should wait for ring to be empty */
1557 radeon_ib_pool_suspend(rdev);
1558 radeon_vm_manager_suspend(rdev);
1559 r600_blit_suspend(rdev);
1433 cayman_cp_enable(rdev, false); 1560 cayman_cp_enable(rdev, false);
1434 rdev->cp.ready = false; 1561 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1435 evergreen_irq_suspend(rdev); 1562 evergreen_irq_suspend(rdev);
1436 radeon_wb_disable(rdev); 1563 radeon_wb_disable(rdev);
1437 cayman_pcie_gart_disable(rdev); 1564 cayman_pcie_gart_disable(rdev);
1438 r600_blit_suspend(rdev);
1439
1440 return 0; 1565 return 0;
1441} 1566}
1442 1567
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev)
1448 */ 1573 */
1449int cayman_init(struct radeon_device *rdev) 1574int cayman_init(struct radeon_device *rdev)
1450{ 1575{
1576 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1451 int r; 1577 int r;
1452 1578
1453 /* This don't do much */ 1579 /* This don't do much */
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev)
1500 if (r) 1626 if (r)
1501 return r; 1627 return r;
1502 1628
1503 rdev->cp.ring_obj = NULL; 1629 ring->ring_obj = NULL;
1504 r600_ring_init(rdev, 1024 * 1024); 1630 r600_ring_init(rdev, ring, 1024 * 1024);
1505 1631
1506 rdev->ih.ring_obj = NULL; 1632 rdev->ih.ring_obj = NULL;
1507 r600_ih_ring_init(rdev, 64 * 1024); 1633 r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev)
1510 if (r) 1636 if (r)
1511 return r; 1637 return r;
1512 1638
1639 r = radeon_ib_pool_init(rdev);
1513 rdev->accel_working = true; 1640 rdev->accel_working = true;
1641 if (r) {
1642 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1643 rdev->accel_working = false;
1644 }
1645 r = radeon_vm_manager_init(rdev);
1646 if (r) {
1647 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
1648 }
1649
1514 r = cayman_startup(rdev); 1650 r = cayman_startup(rdev);
1515 if (r) { 1651 if (r) {
1516 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1652 dev_err(rdev->dev, "disabling GPU acceleration\n");
1517 cayman_cp_fini(rdev); 1653 cayman_cp_fini(rdev);
1518 r600_irq_fini(rdev); 1654 r600_irq_fini(rdev);
1519 radeon_wb_fini(rdev); 1655 radeon_wb_fini(rdev);
1656 r100_ib_fini(rdev);
1657 radeon_vm_manager_fini(rdev);
1520 radeon_irq_kms_fini(rdev); 1658 radeon_irq_kms_fini(rdev);
1521 cayman_pcie_gart_fini(rdev); 1659 cayman_pcie_gart_fini(rdev);
1522 rdev->accel_working = false; 1660 rdev->accel_working = false;
1523 } 1661 }
1524 if (rdev->accel_working) {
1525 r = radeon_ib_pool_init(rdev);
1526 if (r) {
1527 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1528 rdev->accel_working = false;
1529 }
1530 r = r600_ib_test(rdev);
1531 if (r) {
1532 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1533 rdev->accel_working = false;
1534 }
1535 }
1536 1662
1537 /* Don't start up if the MC ucode is missing. 1663 /* Don't start up if the MC ucode is missing.
1538 * The default clocks and voltages before the MC ucode 1664 * The default clocks and voltages before the MC ucode
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev)
1552 cayman_cp_fini(rdev); 1678 cayman_cp_fini(rdev);
1553 r600_irq_fini(rdev); 1679 r600_irq_fini(rdev);
1554 radeon_wb_fini(rdev); 1680 radeon_wb_fini(rdev);
1555 radeon_ib_pool_fini(rdev); 1681 radeon_vm_manager_fini(rdev);
1682 r100_ib_fini(rdev);
1556 radeon_irq_kms_fini(rdev); 1683 radeon_irq_kms_fini(rdev);
1557 cayman_pcie_gart_fini(rdev); 1684 cayman_pcie_gart_fini(rdev);
1558 r600_vram_scratch_fini(rdev); 1685 r600_vram_scratch_fini(rdev);
1559 radeon_gem_fini(rdev); 1686 radeon_gem_fini(rdev);
1687 radeon_semaphore_driver_fini(rdev);
1560 radeon_fence_driver_fini(rdev); 1688 radeon_fence_driver_fini(rdev);
1561 radeon_bo_fini(rdev); 1689 radeon_bo_fini(rdev);
1562 radeon_atombios_fini(rdev); 1690 radeon_atombios_fini(rdev);
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev)
1564 rdev->bios = NULL; 1692 rdev->bios = NULL;
1565} 1693}
1566 1694
1695/*
1696 * vm
1697 */
1698int cayman_vm_init(struct radeon_device *rdev)
1699{
1700 /* number of VMs */
1701 rdev->vm_manager.nvm = 8;
1702 /* base offset of vram pages */
1703 rdev->vm_manager.vram_base_offset = 0;
1704 return 0;
1705}
1706
1707void cayman_vm_fini(struct radeon_device *rdev)
1708{
1709}
1710
1711int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
1712{
1713 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
1714 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
1715 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
1716 /* flush hdp cache */
1717 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1718 /* bits 0-7 are the VM contexts0-7 */
1719 WREG32(VM_INVALIDATE_REQUEST, 1 << id);
1720 return 0;
1721}
1722
1723void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
1724{
1725 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
1726 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
1727 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
1728 /* flush hdp cache */
1729 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1730 /* bits 0-7 are the VM contexts0-7 */
1731 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1732}
1733
1734void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
1735{
1736 if (vm->id == -1)
1737 return;
1738
1739 /* flush hdp cache */
1740 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1741 /* bits 0-7 are the VM contexts0-7 */
1742 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1743}
1744
1745#define R600_PTE_VALID (1 << 0)
1746#define R600_PTE_SYSTEM (1 << 1)
1747#define R600_PTE_SNOOPED (1 << 2)
1748#define R600_PTE_READABLE (1 << 5)
1749#define R600_PTE_WRITEABLE (1 << 6)
1750
1751uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
1752 struct radeon_vm *vm,
1753 uint32_t flags)
1754{
1755 uint32_t r600_flags = 0;
1756
1757 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
1758 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1759 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1760 if (flags & RADEON_VM_PAGE_SYSTEM) {
1761 r600_flags |= R600_PTE_SYSTEM;
1762 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
1763 }
1764 return r600_flags;
1765}
1766
1767void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
1768 unsigned pfn, uint64_t addr, uint32_t flags)
1769{
1770 void __iomem *ptr = (void *)vm->pt;
1771
1772 addr = addr & 0xFFFFFFFFFFFFF000ULL;
1773 addr |= flags;
1774 writeq(addr, ptr + (pfn * 8));
1775}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 4672869cdb2..f9df2a645e7 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,6 +42,9 @@
42#define CAYMAN_MAX_TCC_MASK 0xFF 42#define CAYMAN_MAX_TCC_MASK 0xFF
43 43
44#define DMIF_ADDR_CONFIG 0xBD4 44#define DMIF_ADDR_CONFIG 0xBD4
45#define SRBM_GFX_CNTL 0x0E44
46#define RINGID(x) (((x) & 0x3) << 0)
47#define VMID(x) (((x) & 0x7) << 0)
45#define SRBM_STATUS 0x0E50 48#define SRBM_STATUS 0x0E50
46 49
47#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 50#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -219,6 +222,7 @@
219#define SCRATCH_UMSK 0x8540 222#define SCRATCH_UMSK 0x8540
220#define SCRATCH_ADDR 0x8544 223#define SCRATCH_ADDR 0x8544
221#define CP_SEM_WAIT_TIMER 0x85BC 224#define CP_SEM_WAIT_TIMER 0x85BC
225#define CP_COHER_CNTL2 0x85E8
222#define CP_ME_CNTL 0x86D8 226#define CP_ME_CNTL 0x86D8
223#define CP_ME_HALT (1 << 28) 227#define CP_ME_HALT (1 << 28)
224#define CP_PFP_HALT (1 << 26) 228#define CP_PFP_HALT (1 << 26)
@@ -394,6 +398,12 @@
394#define CP_RB0_RPTR_ADDR 0xC10C 398#define CP_RB0_RPTR_ADDR 0xC10C
395#define CP_RB0_RPTR_ADDR_HI 0xC110 399#define CP_RB0_RPTR_ADDR_HI 0xC110
396#define CP_RB0_WPTR 0xC114 400#define CP_RB0_WPTR 0xC114
401
402#define CP_INT_CNTL 0xC124
403# define CNTX_BUSY_INT_ENABLE (1 << 19)
404# define CNTX_EMPTY_INT_ENABLE (1 << 20)
405# define TIME_STAMP_INT_ENABLE (1 << 26)
406
397#define CP_RB1_BASE 0xC180 407#define CP_RB1_BASE 0xC180
398#define CP_RB1_CNTL 0xC184 408#define CP_RB1_CNTL 0xC184
399#define CP_RB1_RPTR_ADDR 0xC188 409#define CP_RB1_RPTR_ADDR 0xC188
@@ -411,6 +421,10 @@
411#define CP_ME_RAM_DATA 0xC160 421#define CP_ME_RAM_DATA 0xC160
412#define CP_DEBUG 0xC1FC 422#define CP_DEBUG 0xC1FC
413 423
424#define VGT_EVENT_INITIATOR 0x28a90
425# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
426# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
427
414/* 428/*
415 * PM4 429 * PM4
416 */ 430 */
@@ -445,6 +459,7 @@
445#define PACKET3_DISPATCH_DIRECT 0x15 459#define PACKET3_DISPATCH_DIRECT 0x15
446#define PACKET3_DISPATCH_INDIRECT 0x16 460#define PACKET3_DISPATCH_INDIRECT 0x16
447#define PACKET3_INDIRECT_BUFFER_END 0x17 461#define PACKET3_INDIRECT_BUFFER_END 0x17
462#define PACKET3_MODE_CONTROL 0x18
448#define PACKET3_SET_PREDICATION 0x20 463#define PACKET3_SET_PREDICATION 0x20
449#define PACKET3_REG_RMW 0x21 464#define PACKET3_REG_RMW 0x21
450#define PACKET3_COND_EXEC 0x22 465#define PACKET3_COND_EXEC 0x22
@@ -494,7 +509,27 @@
494#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) 509#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
495#define PACKET3_COND_WRITE 0x45 510#define PACKET3_COND_WRITE 0x45
496#define PACKET3_EVENT_WRITE 0x46 511#define PACKET3_EVENT_WRITE 0x46
512#define EVENT_TYPE(x) ((x) << 0)
513#define EVENT_INDEX(x) ((x) << 8)
514 /* 0 - any non-TS event
515 * 1 - ZPASS_DONE
516 * 2 - SAMPLE_PIPELINESTAT
517 * 3 - SAMPLE_STREAMOUTSTAT*
518 * 4 - *S_PARTIAL_FLUSH
519 * 5 - TS events
520 */
497#define PACKET3_EVENT_WRITE_EOP 0x47 521#define PACKET3_EVENT_WRITE_EOP 0x47
522#define DATA_SEL(x) ((x) << 29)
523 /* 0 - discard
524 * 1 - send low 32bit data
525 * 2 - send 64bit data
526 * 3 - send 64bit counter value
527 */
528#define INT_SEL(x) ((x) << 24)
529 /* 0 - none
530 * 1 - interrupt only (DATA_SEL = 0)
531 * 2 - interrupt when data write is confirmed
532 */
498#define PACKET3_EVENT_WRITE_EOS 0x48 533#define PACKET3_EVENT_WRITE_EOS 0x48
499#define PACKET3_PREAMBLE_CNTL 0x4A 534#define PACKET3_PREAMBLE_CNTL 0x4A
500# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) 535# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index bfc08f6320f..3ec81c3d510 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
667 WREG32(R_000040_GEN_INT_CNTL, 0); 667 WREG32(R_000040_GEN_INT_CNTL, 0);
668 return -EINVAL; 668 return -EINVAL;
669 } 669 }
670 if (rdev->irq.sw_int) { 670 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
671 tmp |= RADEON_SW_INT_ENABLE; 671 tmp |= RADEON_SW_INT_ENABLE;
672 } 672 }
673 if (rdev->irq.gui_idle) { 673 if (rdev->irq.gui_idle) {
@@ -739,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
739 while (status) { 739 while (status) {
740 /* SW interrupt */ 740 /* SW interrupt */
741 if (status & RADEON_SW_INT_TEST) { 741 if (status & RADEON_SW_INT_TEST) {
742 radeon_fence_process(rdev); 742 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
743 } 743 }
744 /* gui idle interrupt */ 744 /* gui idle interrupt */
745 if (status & RADEON_GUI_IDLE_STAT) { 745 if (status & RADEON_GUI_IDLE_STAT) {
@@ -811,25 +811,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
811void r100_fence_ring_emit(struct radeon_device *rdev, 811void r100_fence_ring_emit(struct radeon_device *rdev,
812 struct radeon_fence *fence) 812 struct radeon_fence *fence)
813{ 813{
814 struct radeon_ring *ring = &rdev->ring[fence->ring];
815
814 /* We have to make sure that caches are flushed before 816 /* We have to make sure that caches are flushed before
815 * CPU might read something from VRAM. */ 817 * CPU might read something from VRAM. */
816 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 818 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
817 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); 819 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
818 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 820 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
819 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 821 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
820 /* Wait until IDLE & CLEAN */ 822 /* Wait until IDLE & CLEAN */
821 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 823 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
822 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 824 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
823 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 825 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
824 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 826 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
825 RADEON_HDP_READ_BUFFER_INVALIDATE); 827 RADEON_HDP_READ_BUFFER_INVALIDATE);
826 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 828 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
827 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); 829 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
828 /* Emit fence sequence & fire IRQ */ 830 /* Emit fence sequence & fire IRQ */
829 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 831 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
830 radeon_ring_write(rdev, fence->seq); 832 radeon_ring_write(ring, fence->seq);
831 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 833 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
832 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 834 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
835}
836
837void r100_semaphore_ring_emit(struct radeon_device *rdev,
838 struct radeon_ring *ring,
839 struct radeon_semaphore *semaphore,
840 bool emit_wait)
841{
842 /* Unused on older asics, since we don't have semaphores or multiple rings */
843 BUG();
833} 844}
834 845
835int r100_copy_blit(struct radeon_device *rdev, 846int r100_copy_blit(struct radeon_device *rdev,
@@ -838,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
838 unsigned num_gpu_pages, 849 unsigned num_gpu_pages,
839 struct radeon_fence *fence) 850 struct radeon_fence *fence)
840{ 851{
852 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
841 uint32_t cur_pages; 853 uint32_t cur_pages;
842 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 854 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
843 uint32_t pitch; 855 uint32_t pitch;
@@ -855,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
855 867
856 /* Ask for enough room for blit + flush + fence */ 868 /* Ask for enough room for blit + flush + fence */
857 ndw = 64 + (10 * num_loops); 869 ndw = 64 + (10 * num_loops);
858 r = radeon_ring_lock(rdev, ndw); 870 r = radeon_ring_lock(rdev, ring, ndw);
859 if (r) { 871 if (r) {
860 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 872 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
861 return -EINVAL; 873 return -EINVAL;
@@ -869,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
869 881
870 /* pages are in Y direction - height 882 /* pages are in Y direction - height
871 page width in X direction - width */ 883 page width in X direction - width */
872 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8)); 884 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
873 radeon_ring_write(rdev, 885 radeon_ring_write(ring,
874 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 886 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
875 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 887 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
876 RADEON_GMC_SRC_CLIPPING | 888 RADEON_GMC_SRC_CLIPPING |
@@ -882,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
882 RADEON_DP_SRC_SOURCE_MEMORY | 894 RADEON_DP_SRC_SOURCE_MEMORY |
883 RADEON_GMC_CLR_CMP_CNTL_DIS | 895 RADEON_GMC_CLR_CMP_CNTL_DIS |
884 RADEON_GMC_WR_MSK_DIS); 896 RADEON_GMC_WR_MSK_DIS);
885 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10)); 897 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
886 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10)); 898 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
887 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 899 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
888 radeon_ring_write(rdev, 0); 900 radeon_ring_write(ring, 0);
889 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 901 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
890 radeon_ring_write(rdev, num_gpu_pages); 902 radeon_ring_write(ring, num_gpu_pages);
891 radeon_ring_write(rdev, num_gpu_pages); 903 radeon_ring_write(ring, num_gpu_pages);
892 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 904 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
893 } 905 }
894 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 906 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
895 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL); 907 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
896 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 908 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
897 radeon_ring_write(rdev, 909 radeon_ring_write(ring,
898 RADEON_WAIT_2D_IDLECLEAN | 910 RADEON_WAIT_2D_IDLECLEAN |
899 RADEON_WAIT_HOST_IDLECLEAN | 911 RADEON_WAIT_HOST_IDLECLEAN |
900 RADEON_WAIT_DMA_GUI_IDLE); 912 RADEON_WAIT_DMA_GUI_IDLE);
901 if (fence) { 913 if (fence) {
902 r = radeon_fence_emit(rdev, fence); 914 r = radeon_fence_emit(rdev, fence);
903 } 915 }
904 radeon_ring_unlock_commit(rdev); 916 radeon_ring_unlock_commit(rdev, ring);
905 return r; 917 return r;
906} 918}
907 919
@@ -922,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
922 934
923void r100_ring_start(struct radeon_device *rdev) 935void r100_ring_start(struct radeon_device *rdev)
924{ 936{
937 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
925 int r; 938 int r;
926 939
927 r = radeon_ring_lock(rdev, 2); 940 r = radeon_ring_lock(rdev, ring, 2);
928 if (r) { 941 if (r) {
929 return; 942 return;
930 } 943 }
931 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 944 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
932 radeon_ring_write(rdev, 945 radeon_ring_write(ring,
933 RADEON_ISYNC_ANY2D_IDLE3D | 946 RADEON_ISYNC_ANY2D_IDLE3D |
934 RADEON_ISYNC_ANY3D_IDLE2D | 947 RADEON_ISYNC_ANY3D_IDLE2D |
935 RADEON_ISYNC_WAIT_IDLEGUI | 948 RADEON_ISYNC_WAIT_IDLEGUI |
936 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 949 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
937 radeon_ring_unlock_commit(rdev); 950 radeon_ring_unlock_commit(rdev, ring);
938} 951}
939 952
940 953
@@ -1035,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
1035 1048
1036int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1049int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1037{ 1050{
1051 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1038 unsigned rb_bufsz; 1052 unsigned rb_bufsz;
1039 unsigned rb_blksz; 1053 unsigned rb_blksz;
1040 unsigned max_fetch; 1054 unsigned max_fetch;
@@ -1060,7 +1074,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1060 rb_bufsz = drm_order(ring_size / 8); 1074 rb_bufsz = drm_order(ring_size / 8);
1061 ring_size = (1 << (rb_bufsz + 1)) * 4; 1075 ring_size = (1 << (rb_bufsz + 1)) * 4;
1062 r100_cp_load_microcode(rdev); 1076 r100_cp_load_microcode(rdev);
1063 r = radeon_ring_init(rdev, ring_size); 1077 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1078 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1079 0, 0x7fffff, RADEON_CP_PACKET2);
1064 if (r) { 1080 if (r) {
1065 return r; 1081 return r;
1066 } 1082 }
@@ -1069,7 +1085,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1069 rb_blksz = 9; 1085 rb_blksz = 9;
1070 /* cp will read 128bytes at a time (4 dwords) */ 1086 /* cp will read 128bytes at a time (4 dwords) */
1071 max_fetch = 1; 1087 max_fetch = 1;
1072 rdev->cp.align_mask = 16 - 1; 1088 ring->align_mask = 16 - 1;
1073 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1089 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1074 pre_write_timer = 64; 1090 pre_write_timer = 64;
1075 /* Force CP_RB_WPTR write if written more than one time before the 1091 /* Force CP_RB_WPTR write if written more than one time before the
@@ -1099,13 +1115,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1099 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1115 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1100 1116
1101 /* Set ring address */ 1117 /* Set ring address */
1102 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); 1118 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1103 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); 1119 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1104 /* Force read & write ptr to 0 */ 1120 /* Force read & write ptr to 0 */
1105 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1121 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1106 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1122 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1107 rdev->cp.wptr = 0; 1123 ring->wptr = 0;
1108 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 1124 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1109 1125
1110 /* set the wb address whether it's enabled or not */ 1126 /* set the wb address whether it's enabled or not */
1111 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1127 WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1121,7 +1137,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1121 1137
1122 WREG32(RADEON_CP_RB_CNTL, tmp); 1138 WREG32(RADEON_CP_RB_CNTL, tmp);
1123 udelay(10); 1139 udelay(10);
1124 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1140 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1125 /* Set cp mode to bus mastering & enable cp*/ 1141 /* Set cp mode to bus mastering & enable cp*/
1126 WREG32(RADEON_CP_CSQ_MODE, 1142 WREG32(RADEON_CP_CSQ_MODE,
1127 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1143 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1130,12 +1146,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1130 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1146 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1131 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1147 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1132 radeon_ring_start(rdev); 1148 radeon_ring_start(rdev);
1133 r = radeon_ring_test(rdev); 1149 r = radeon_ring_test(rdev, ring);
1134 if (r) { 1150 if (r) {
1135 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1151 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1136 return r; 1152 return r;
1137 } 1153 }
1138 rdev->cp.ready = true; 1154 ring->ready = true;
1139 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1155 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1140 return 0; 1156 return 0;
1141} 1157}
@@ -1147,7 +1163,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1147 } 1163 }
1148 /* Disable ring */ 1164 /* Disable ring */
1149 r100_cp_disable(rdev); 1165 r100_cp_disable(rdev);
1150 radeon_ring_fini(rdev); 1166 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1151 DRM_INFO("radeon: cp finalized\n"); 1167 DRM_INFO("radeon: cp finalized\n");
1152} 1168}
1153 1169
@@ -1155,7 +1171,7 @@ void r100_cp_disable(struct radeon_device *rdev)
1155{ 1171{
1156 /* Disable ring */ 1172 /* Disable ring */
1157 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1173 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1158 rdev->cp.ready = false; 1174 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1159 WREG32(RADEON_CP_CSQ_MODE, 0); 1175 WREG32(RADEON_CP_CSQ_MODE, 0);
1160 WREG32(RADEON_CP_CSQ_CNTL, 0); 1176 WREG32(RADEON_CP_CSQ_CNTL, 0);
1161 WREG32(R_000770_SCRATCH_UMSK, 0); 1177 WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1165,13 +1181,6 @@ void r100_cp_disable(struct radeon_device *rdev)
1165 } 1181 }
1166} 1182}
1167 1183
1168void r100_cp_commit(struct radeon_device *rdev)
1169{
1170 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
1171 (void)RREG32(RADEON_CP_RB_WPTR);
1172}
1173
1174
1175/* 1184/*
1176 * CS functions 1185 * CS functions
1177 */ 1186 */
@@ -2099,9 +2108,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
2099 return -1; 2108 return -1;
2100} 2109}
2101 2110
2102void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) 2111void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2103{ 2112{
2104 lockup->last_cp_rptr = cp->rptr; 2113 lockup->last_cp_rptr = ring->rptr;
2105 lockup->last_jiffies = jiffies; 2114 lockup->last_jiffies = jiffies;
2106} 2115}
2107 2116
@@ -2126,20 +2135,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
2126 * false positive when CP is just gived nothing to do. 2135 * false positive when CP is just gived nothing to do.
2127 * 2136 *
2128 **/ 2137 **/
2129bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) 2138bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
2130{ 2139{
2131 unsigned long cjiffies, elapsed; 2140 unsigned long cjiffies, elapsed;
2132 2141
2133 cjiffies = jiffies; 2142 cjiffies = jiffies;
2134 if (!time_after(cjiffies, lockup->last_jiffies)) { 2143 if (!time_after(cjiffies, lockup->last_jiffies)) {
2135 /* likely a wrap around */ 2144 /* likely a wrap around */
2136 lockup->last_cp_rptr = cp->rptr; 2145 lockup->last_cp_rptr = ring->rptr;
2137 lockup->last_jiffies = jiffies; 2146 lockup->last_jiffies = jiffies;
2138 return false; 2147 return false;
2139 } 2148 }
2140 if (cp->rptr != lockup->last_cp_rptr) { 2149 if (ring->rptr != lockup->last_cp_rptr) {
2141 /* CP is still working no lockup */ 2150 /* CP is still working no lockup */
2142 lockup->last_cp_rptr = cp->rptr; 2151 lockup->last_cp_rptr = ring->rptr;
2143 lockup->last_jiffies = jiffies; 2152 lockup->last_jiffies = jiffies;
2144 return false; 2153 return false;
2145 } 2154 }
@@ -2152,31 +2161,32 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2152 return false; 2161 return false;
2153} 2162}
2154 2163
2155bool r100_gpu_is_lockup(struct radeon_device *rdev) 2164bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2156{ 2165{
2157 u32 rbbm_status; 2166 u32 rbbm_status;
2158 int r; 2167 int r;
2159 2168
2160 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2169 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2161 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2170 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2162 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); 2171 r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
2163 return false; 2172 return false;
2164 } 2173 }
2165 /* force CP activities */ 2174 /* force CP activities */
2166 r = radeon_ring_lock(rdev, 2); 2175 r = radeon_ring_lock(rdev, ring, 2);
2167 if (!r) { 2176 if (!r) {
2168 /* PACKET2 NOP */ 2177 /* PACKET2 NOP */
2169 radeon_ring_write(rdev, 0x80000000); 2178 radeon_ring_write(ring, 0x80000000);
2170 radeon_ring_write(rdev, 0x80000000); 2179 radeon_ring_write(ring, 0x80000000);
2171 radeon_ring_unlock_commit(rdev); 2180 radeon_ring_unlock_commit(rdev, ring);
2172 } 2181 }
2173 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 2182 ring->rptr = RREG32(ring->rptr_reg);
2174 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); 2183 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
2175} 2184}
2176 2185
2177void r100_bm_disable(struct radeon_device *rdev) 2186void r100_bm_disable(struct radeon_device *rdev)
2178{ 2187{
2179 u32 tmp; 2188 u32 tmp;
2189 u16 tmp16;
2180 2190
2181 /* disable bus mastering */ 2191 /* disable bus mastering */
2182 tmp = RREG32(R_000030_BUS_CNTL); 2192 tmp = RREG32(R_000030_BUS_CNTL);
@@ -2187,8 +2197,8 @@ void r100_bm_disable(struct radeon_device *rdev)
2187 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); 2197 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2188 tmp = RREG32(RADEON_BUS_CNTL); 2198 tmp = RREG32(RADEON_BUS_CNTL);
2189 mdelay(1); 2199 mdelay(1);
2190 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); 2200 pci_read_config_word(rdev->pdev, 0x4, &tmp16);
2191 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); 2201 pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
2192 mdelay(1); 2202 mdelay(1);
2193} 2203}
2194 2204
@@ -2579,21 +2589,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2579 struct drm_info_node *node = (struct drm_info_node *) m->private; 2589 struct drm_info_node *node = (struct drm_info_node *) m->private;
2580 struct drm_device *dev = node->minor->dev; 2590 struct drm_device *dev = node->minor->dev;
2581 struct radeon_device *rdev = dev->dev_private; 2591 struct radeon_device *rdev = dev->dev_private;
2592 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2582 uint32_t rdp, wdp; 2593 uint32_t rdp, wdp;
2583 unsigned count, i, j; 2594 unsigned count, i, j;
2584 2595
2585 radeon_ring_free_size(rdev); 2596 radeon_ring_free_size(rdev, ring);
2586 rdp = RREG32(RADEON_CP_RB_RPTR); 2597 rdp = RREG32(RADEON_CP_RB_RPTR);
2587 wdp = RREG32(RADEON_CP_RB_WPTR); 2598 wdp = RREG32(RADEON_CP_RB_WPTR);
2588 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; 2599 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2589 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2600 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2590 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2601 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2591 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2602 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2592 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2603 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2593 seq_printf(m, "%u dwords in ring\n", count); 2604 seq_printf(m, "%u dwords in ring\n", count);
2594 for (j = 0; j <= count; j++) { 2605 for (j = 0; j <= count; j++) {
2595 i = (rdp + j) & rdev->cp.ptr_mask; 2606 i = (rdp + j) & ring->ptr_mask;
2596 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2607 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2597 } 2608 }
2598 return 0; 2609 return 0;
2599} 2610}
@@ -3635,7 +3646,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3635 } 3646 }
3636} 3647}
3637 3648
3638int r100_ring_test(struct radeon_device *rdev) 3649int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3639{ 3650{
3640 uint32_t scratch; 3651 uint32_t scratch;
3641 uint32_t tmp = 0; 3652 uint32_t tmp = 0;
@@ -3648,15 +3659,15 @@ int r100_ring_test(struct radeon_device *rdev)
3648 return r; 3659 return r;
3649 } 3660 }
3650 WREG32(scratch, 0xCAFEDEAD); 3661 WREG32(scratch, 0xCAFEDEAD);
3651 r = radeon_ring_lock(rdev, 2); 3662 r = radeon_ring_lock(rdev, ring, 2);
3652 if (r) { 3663 if (r) {
3653 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3664 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3654 radeon_scratch_free(rdev, scratch); 3665 radeon_scratch_free(rdev, scratch);
3655 return r; 3666 return r;
3656 } 3667 }
3657 radeon_ring_write(rdev, PACKET0(scratch, 0)); 3668 radeon_ring_write(ring, PACKET0(scratch, 0));
3658 radeon_ring_write(rdev, 0xDEADBEEF); 3669 radeon_ring_write(ring, 0xDEADBEEF);
3659 radeon_ring_unlock_commit(rdev); 3670 radeon_ring_unlock_commit(rdev, ring);
3660 for (i = 0; i < rdev->usec_timeout; i++) { 3671 for (i = 0; i < rdev->usec_timeout; i++) {
3661 tmp = RREG32(scratch); 3672 tmp = RREG32(scratch);
3662 if (tmp == 0xDEADBEEF) { 3673 if (tmp == 0xDEADBEEF) {
@@ -3677,9 +3688,11 @@ int r100_ring_test(struct radeon_device *rdev)
3677 3688
3678void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3689void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3679{ 3690{
3680 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 3691 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3681 radeon_ring_write(rdev, ib->gpu_addr); 3692
3682 radeon_ring_write(rdev, ib->length_dw); 3693 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3694 radeon_ring_write(ring, ib->gpu_addr);
3695 radeon_ring_write(ring, ib->length_dw);
3683} 3696}
3684 3697
3685int r100_ib_test(struct radeon_device *rdev) 3698int r100_ib_test(struct radeon_device *rdev)
@@ -3696,7 +3709,7 @@ int r100_ib_test(struct radeon_device *rdev)
3696 return r; 3709 return r;
3697 } 3710 }
3698 WREG32(scratch, 0xCAFEDEAD); 3711 WREG32(scratch, 0xCAFEDEAD);
3699 r = radeon_ib_get(rdev, &ib); 3712 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
3700 if (r) { 3713 if (r) {
3701 return r; 3714 return r;
3702 } 3715 }
@@ -3740,34 +3753,16 @@ int r100_ib_test(struct radeon_device *rdev)
3740 3753
3741void r100_ib_fini(struct radeon_device *rdev) 3754void r100_ib_fini(struct radeon_device *rdev)
3742{ 3755{
3756 radeon_ib_pool_suspend(rdev);
3743 radeon_ib_pool_fini(rdev); 3757 radeon_ib_pool_fini(rdev);
3744} 3758}
3745 3759
3746int r100_ib_init(struct radeon_device *rdev)
3747{
3748 int r;
3749
3750 r = radeon_ib_pool_init(rdev);
3751 if (r) {
3752 dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
3753 r100_ib_fini(rdev);
3754 return r;
3755 }
3756 r = r100_ib_test(rdev);
3757 if (r) {
3758 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
3759 r100_ib_fini(rdev);
3760 return r;
3761 }
3762 return 0;
3763}
3764
3765void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3760void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3766{ 3761{
3767 /* Shutdown CP we shouldn't need to do that but better be safe than 3762 /* Shutdown CP we shouldn't need to do that but better be safe than
3768 * sorry 3763 * sorry
3769 */ 3764 */
3770 rdev->cp.ready = false; 3765 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3771 WREG32(R_000740_CP_CSQ_CNTL, 0); 3766 WREG32(R_000740_CP_CSQ_CNTL, 0);
3772 3767
3773 /* Save few CRTC registers */ 3768 /* Save few CRTC registers */
@@ -3905,6 +3900,12 @@ static int r100_startup(struct radeon_device *rdev)
3905 if (r) 3900 if (r)
3906 return r; 3901 return r;
3907 3902
3903 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3904 if (r) {
3905 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3906 return r;
3907 }
3908
3908 /* Enable IRQ */ 3909 /* Enable IRQ */
3909 r100_irq_set(rdev); 3910 r100_irq_set(rdev);
3910 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3911 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3914,11 +3915,18 @@ static int r100_startup(struct radeon_device *rdev)
3914 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 3915 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3915 return r; 3916 return r;
3916 } 3917 }
3917 r = r100_ib_init(rdev); 3918
3919 r = radeon_ib_pool_start(rdev);
3920 if (r)
3921 return r;
3922
3923 r = r100_ib_test(rdev);
3918 if (r) { 3924 if (r) {
3919 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 3925 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
3926 rdev->accel_working = false;
3920 return r; 3927 return r;
3921 } 3928 }
3929
3922 return 0; 3930 return 0;
3923} 3931}
3924 3932
@@ -3941,11 +3949,14 @@ int r100_resume(struct radeon_device *rdev)
3941 r100_clock_startup(rdev); 3949 r100_clock_startup(rdev);
3942 /* Initialize surface registers */ 3950 /* Initialize surface registers */
3943 radeon_surface_init(rdev); 3951 radeon_surface_init(rdev);
3952
3953 rdev->accel_working = true;
3944 return r100_startup(rdev); 3954 return r100_startup(rdev);
3945} 3955}
3946 3956
3947int r100_suspend(struct radeon_device *rdev) 3957int r100_suspend(struct radeon_device *rdev)
3948{ 3958{
3959 radeon_ib_pool_suspend(rdev);
3949 r100_cp_disable(rdev); 3960 r100_cp_disable(rdev);
3950 radeon_wb_disable(rdev); 3961 radeon_wb_disable(rdev);
3951 r100_irq_disable(rdev); 3962 r100_irq_disable(rdev);
@@ -4064,7 +4075,14 @@ int r100_init(struct radeon_device *rdev)
4064 return r; 4075 return r;
4065 } 4076 }
4066 r100_set_safe_registers(rdev); 4077 r100_set_safe_registers(rdev);
4078
4079 r = radeon_ib_pool_init(rdev);
4067 rdev->accel_working = true; 4080 rdev->accel_working = true;
4081 if (r) {
4082 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
4083 rdev->accel_working = false;
4084 }
4085
4068 r = r100_startup(rdev); 4086 r = r100_startup(rdev);
4069 if (r) { 4087 if (r) {
4070 /* Somethings want wront with the accel init stop accel */ 4088 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a1f3ba063c2..eba4cbfa78f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
87 unsigned num_gpu_pages, 87 unsigned num_gpu_pages,
88 struct radeon_fence *fence) 88 struct radeon_fence *fence)
89{ 89{
90 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90 uint32_t size; 91 uint32_t size;
91 uint32_t cur_size; 92 uint32_t cur_size;
92 int i, num_loops; 93 int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
95 /* radeon pitch is /64 */ 96 /* radeon pitch is /64 */
96 size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; 97 size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF); 98 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98 r = radeon_ring_lock(rdev, num_loops * 4 + 64); 99 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
99 if (r) { 100 if (r) {
100 DRM_ERROR("radeon: moving bo (%d).\n", r); 101 DRM_ERROR("radeon: moving bo (%d).\n", r);
101 return r; 102 return r;
102 } 103 }
103 /* Must wait for 2D idle & clean before DMA or hangs might happen */ 104 /* Must wait for 2D idle & clean before DMA or hangs might happen */
104 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 105 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
105 radeon_ring_write(rdev, (1 << 16)); 106 radeon_ring_write(ring, (1 << 16));
106 for (i = 0; i < num_loops; i++) { 107 for (i = 0; i < num_loops; i++) {
107 cur_size = size; 108 cur_size = size;
108 if (cur_size > 0x1FFFFF) { 109 if (cur_size > 0x1FFFFF) {
109 cur_size = 0x1FFFFF; 110 cur_size = 0x1FFFFF;
110 } 111 }
111 size -= cur_size; 112 size -= cur_size;
112 radeon_ring_write(rdev, PACKET0(0x720, 2)); 113 radeon_ring_write(ring, PACKET0(0x720, 2));
113 radeon_ring_write(rdev, src_offset); 114 radeon_ring_write(ring, src_offset);
114 radeon_ring_write(rdev, dst_offset); 115 radeon_ring_write(ring, dst_offset);
115 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); 116 radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
116 src_offset += cur_size; 117 src_offset += cur_size;
117 dst_offset += cur_size; 118 dst_offset += cur_size;
118 } 119 }
119 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 120 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
120 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); 121 radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
121 if (fence) { 122 if (fence) {
122 r = radeon_fence_emit(rdev, fence); 123 r = radeon_fence_emit(rdev, fence);
123 } 124 }
124 radeon_ring_unlock_commit(rdev); 125 radeon_ring_unlock_commit(rdev, ring);
125 return r; 126 return r;
126} 127}
127 128
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c93bc64707e..3fc0d29a5f3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
175void r300_fence_ring_emit(struct radeon_device *rdev, 175void r300_fence_ring_emit(struct radeon_device *rdev,
176 struct radeon_fence *fence) 176 struct radeon_fence *fence)
177{ 177{
178 struct radeon_ring *ring = &rdev->ring[fence->ring];
179
178 /* Who ever call radeon_fence_emit should call ring_lock and ask 180 /* Who ever call radeon_fence_emit should call ring_lock and ask
179 * for enough space (today caller are ib schedule and buffer move) */ 181 * for enough space (today caller are ib schedule and buffer move) */
180 /* Write SC register so SC & US assert idle */ 182 /* Write SC register so SC & US assert idle */
181 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); 183 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
182 radeon_ring_write(rdev, 0); 184 radeon_ring_write(ring, 0);
183 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); 185 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
184 radeon_ring_write(rdev, 0); 186 radeon_ring_write(ring, 0);
185 /* Flush 3D cache */ 187 /* Flush 3D cache */
186 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 188 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
187 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); 189 radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
188 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 190 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
189 radeon_ring_write(rdev, R300_ZC_FLUSH); 191 radeon_ring_write(ring, R300_ZC_FLUSH);
190 /* Wait until IDLE & CLEAN */ 192 /* Wait until IDLE & CLEAN */
191 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 193 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
192 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | 194 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
193 RADEON_WAIT_2D_IDLECLEAN | 195 RADEON_WAIT_2D_IDLECLEAN |
194 RADEON_WAIT_DMA_GUI_IDLE)); 196 RADEON_WAIT_DMA_GUI_IDLE));
195 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 197 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
196 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | 198 radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
197 RADEON_HDP_READ_BUFFER_INVALIDATE); 199 RADEON_HDP_READ_BUFFER_INVALIDATE);
198 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 200 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
199 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); 201 radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
200 /* Emit fence sequence & fire IRQ */ 202 /* Emit fence sequence & fire IRQ */
201 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 203 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
202 radeon_ring_write(rdev, fence->seq); 204 radeon_ring_write(ring, fence->seq);
203 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 205 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
204 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 206 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
205} 207}
206 208
207void r300_ring_start(struct radeon_device *rdev) 209void r300_ring_start(struct radeon_device *rdev)
208{ 210{
211 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
209 unsigned gb_tile_config; 212 unsigned gb_tile_config;
210 int r; 213 int r;
211 214
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
227 break; 230 break;
228 } 231 }
229 232
230 r = radeon_ring_lock(rdev, 64); 233 r = radeon_ring_lock(rdev, ring, 64);
231 if (r) { 234 if (r) {
232 return; 235 return;
233 } 236 }
234 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 237 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
235 radeon_ring_write(rdev, 238 radeon_ring_write(ring,
236 RADEON_ISYNC_ANY2D_IDLE3D | 239 RADEON_ISYNC_ANY2D_IDLE3D |
237 RADEON_ISYNC_ANY3D_IDLE2D | 240 RADEON_ISYNC_ANY3D_IDLE2D |
238 RADEON_ISYNC_WAIT_IDLEGUI | 241 RADEON_ISYNC_WAIT_IDLEGUI |
239 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 242 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
240 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); 243 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
241 radeon_ring_write(rdev, gb_tile_config); 244 radeon_ring_write(ring, gb_tile_config);
242 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 245 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
243 radeon_ring_write(rdev, 246 radeon_ring_write(ring,
244 RADEON_WAIT_2D_IDLECLEAN | 247 RADEON_WAIT_2D_IDLECLEAN |
245 RADEON_WAIT_3D_IDLECLEAN); 248 RADEON_WAIT_3D_IDLECLEAN);
246 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); 249 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
247 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); 250 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
248 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 251 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
249 radeon_ring_write(rdev, 0); 252 radeon_ring_write(ring, 0);
250 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 253 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
251 radeon_ring_write(rdev, 0); 254 radeon_ring_write(ring, 0);
252 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 255 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
253 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 256 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
254 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 257 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
255 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 258 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
256 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 259 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
257 radeon_ring_write(rdev, 260 radeon_ring_write(ring,
258 RADEON_WAIT_2D_IDLECLEAN | 261 RADEON_WAIT_2D_IDLECLEAN |
259 RADEON_WAIT_3D_IDLECLEAN); 262 RADEON_WAIT_3D_IDLECLEAN);
260 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); 263 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
261 radeon_ring_write(rdev, 0); 264 radeon_ring_write(ring, 0);
262 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 265 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
263 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 266 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
264 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 267 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
265 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 268 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
266 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); 269 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
267 radeon_ring_write(rdev, 270 radeon_ring_write(ring,
268 ((6 << R300_MS_X0_SHIFT) | 271 ((6 << R300_MS_X0_SHIFT) |
269 (6 << R300_MS_Y0_SHIFT) | 272 (6 << R300_MS_Y0_SHIFT) |
270 (6 << R300_MS_X1_SHIFT) | 273 (6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
273 (6 << R300_MS_Y2_SHIFT) | 276 (6 << R300_MS_Y2_SHIFT) |
274 (6 << R300_MSBD0_Y_SHIFT) | 277 (6 << R300_MSBD0_Y_SHIFT) |
275 (6 << R300_MSBD0_X_SHIFT))); 278 (6 << R300_MSBD0_X_SHIFT)));
276 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); 279 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
277 radeon_ring_write(rdev, 280 radeon_ring_write(ring,
278 ((6 << R300_MS_X3_SHIFT) | 281 ((6 << R300_MS_X3_SHIFT) |
279 (6 << R300_MS_Y3_SHIFT) | 282 (6 << R300_MS_Y3_SHIFT) |
280 (6 << R300_MS_X4_SHIFT) | 283 (6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
282 (6 << R300_MS_X5_SHIFT) | 285 (6 << R300_MS_X5_SHIFT) |
283 (6 << R300_MS_Y5_SHIFT) | 286 (6 << R300_MS_Y5_SHIFT) |
284 (6 << R300_MSBD1_SHIFT))); 287 (6 << R300_MSBD1_SHIFT)));
285 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); 288 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
286 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 289 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
287 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); 290 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
288 radeon_ring_write(rdev, 291 radeon_ring_write(ring,
289 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 292 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
290 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); 293 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
291 radeon_ring_write(rdev, 294 radeon_ring_write(ring,
292 R300_GEOMETRY_ROUND_NEAREST | 295 R300_GEOMETRY_ROUND_NEAREST |
293 R300_COLOR_ROUND_NEAREST); 296 R300_COLOR_ROUND_NEAREST);
294 radeon_ring_unlock_commit(rdev); 297 radeon_ring_unlock_commit(rdev, ring);
295} 298}
296 299
297void r300_errata(struct radeon_device *rdev) 300void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
375 rdev->num_gb_pipes, rdev->num_z_pipes); 378 rdev->num_gb_pipes, rdev->num_z_pipes);
376} 379}
377 380
378bool r300_gpu_is_lockup(struct radeon_device *rdev) 381bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
379{ 382{
380 u32 rbbm_status; 383 u32 rbbm_status;
381 int r; 384 int r;
382 385
383 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 386 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
384 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 387 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
385 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); 388 r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
386 return false; 389 return false;
387 } 390 }
388 /* force CP activities */ 391 /* force CP activities */
389 r = radeon_ring_lock(rdev, 2); 392 r = radeon_ring_lock(rdev, ring, 2);
390 if (!r) { 393 if (!r) {
391 /* PACKET2 NOP */ 394 /* PACKET2 NOP */
392 radeon_ring_write(rdev, 0x80000000); 395 radeon_ring_write(ring, 0x80000000);
393 radeon_ring_write(rdev, 0x80000000); 396 radeon_ring_write(ring, 0x80000000);
394 radeon_ring_unlock_commit(rdev); 397 radeon_ring_unlock_commit(rdev, ring);
395 } 398 }
396 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 399 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
397 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); 400 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
398} 401}
399 402
400int r300_asic_reset(struct radeon_device *rdev) 403int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 704 return r;
702 } 705 }
703 706
704 if (p->keep_tiling_flags) { 707 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 708 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); 709 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 } else { 710 } else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
765 /* RB3D_COLORPITCH1 */ 768 /* RB3D_COLORPITCH1 */
766 /* RB3D_COLORPITCH2 */ 769 /* RB3D_COLORPITCH2 */
767 /* RB3D_COLORPITCH3 */ 770 /* RB3D_COLORPITCH3 */
768 if (!p->keep_tiling_flags) { 771 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
769 r = r100_cs_packet_next_reloc(p, &reloc); 772 r = r100_cs_packet_next_reloc(p, &reloc);
770 if (r) { 773 if (r) {
771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 774 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
850 break; 853 break;
851 case 0x4F24: 854 case 0x4F24:
852 /* ZB_DEPTHPITCH */ 855 /* ZB_DEPTHPITCH */
853 if (!p->keep_tiling_flags) { 856 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
854 r = r100_cs_packet_next_reloc(p, &reloc); 857 r = r100_cs_packet_next_reloc(p, &reloc);
855 if (r) { 858 if (r) {
856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 859 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
1396 if (r) 1399 if (r)
1397 return r; 1400 return r;
1398 1401
1402 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1403 if (r) {
1404 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1405 return r;
1406 }
1407
1399 /* Enable IRQ */ 1408 /* Enable IRQ */
1400 r100_irq_set(rdev); 1409 r100_irq_set(rdev);
1401 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1410 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev)
1405 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1414 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1406 return r; 1415 return r;
1407 } 1416 }
1408 r = r100_ib_init(rdev); 1417
1418 r = radeon_ib_pool_start(rdev);
1419 if (r)
1420 return r;
1421
1422 r = r100_ib_test(rdev);
1409 if (r) { 1423 if (r) {
1410 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 1424 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
1425 rdev->accel_working = false;
1411 return r; 1426 return r;
1412 } 1427 }
1428
1413 return 0; 1429 return 0;
1414} 1430}
1415 1431
@@ -1434,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev)
1434 r300_clock_startup(rdev); 1450 r300_clock_startup(rdev);
1435 /* Initialize surface registers */ 1451 /* Initialize surface registers */
1436 radeon_surface_init(rdev); 1452 radeon_surface_init(rdev);
1453
1454 rdev->accel_working = true;
1437 return r300_startup(rdev); 1455 return r300_startup(rdev);
1438} 1456}
1439 1457
1440int r300_suspend(struct radeon_device *rdev) 1458int r300_suspend(struct radeon_device *rdev)
1441{ 1459{
1460 radeon_ib_pool_suspend(rdev);
1442 r100_cp_disable(rdev); 1461 r100_cp_disable(rdev);
1443 radeon_wb_disable(rdev); 1462 radeon_wb_disable(rdev);
1444 r100_irq_disable(rdev); 1463 r100_irq_disable(rdev);
@@ -1539,7 +1558,14 @@ int r300_init(struct radeon_device *rdev)
1539 return r; 1558 return r;
1540 } 1559 }
1541 r300_set_reg_safe(rdev); 1560 r300_set_reg_safe(rdev);
1561
1562 r = radeon_ib_pool_init(rdev);
1542 rdev->accel_working = true; 1563 rdev->accel_working = true;
1564 if (r) {
1565 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1566 rdev->accel_working = false;
1567 }
1568
1543 r = r300_startup(rdev); 1569 r = r300_startup(rdev);
1544 if (r) { 1570 if (r) {
1545 /* Somethings want wront with the accel init stop accel */ 1571 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 417fab81812..666e28fe509 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
199 199
200static void r420_cp_errata_init(struct radeon_device *rdev) 200static void r420_cp_errata_init(struct radeon_device *rdev)
201{ 201{
202 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
203
202 /* RV410 and R420 can lock up if CP DMA to host memory happens 204 /* RV410 and R420 can lock up if CP DMA to host memory happens
203 * while the 2D engine is busy. 205 * while the 2D engine is busy.
204 * 206 *
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
206 * of the CP init, apparently. 208 * of the CP init, apparently.
207 */ 209 */
208 radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); 210 radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
209 radeon_ring_lock(rdev, 8); 211 radeon_ring_lock(rdev, ring, 8);
210 radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1)); 212 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
211 radeon_ring_write(rdev, rdev->config.r300.resync_scratch); 213 radeon_ring_write(ring, rdev->config.r300.resync_scratch);
212 radeon_ring_write(rdev, 0xDEADBEEF); 214 radeon_ring_write(ring, 0xDEADBEEF);
213 radeon_ring_unlock_commit(rdev); 215 radeon_ring_unlock_commit(rdev, ring);
214} 216}
215 217
216static void r420_cp_errata_fini(struct radeon_device *rdev) 218static void r420_cp_errata_fini(struct radeon_device *rdev)
217{ 219{
220 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
221
218 /* Catch the RESYNC we dispatched all the way back, 222 /* Catch the RESYNC we dispatched all the way back,
219 * at the very beginning of the CP init. 223 * at the very beginning of the CP init.
220 */ 224 */
221 radeon_ring_lock(rdev, 8); 225 radeon_ring_lock(rdev, ring, 8);
222 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 226 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
223 radeon_ring_write(rdev, R300_RB3D_DC_FINISH); 227 radeon_ring_write(ring, R300_RB3D_DC_FINISH);
224 radeon_ring_unlock_commit(rdev); 228 radeon_ring_unlock_commit(rdev, ring);
225 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); 229 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
226} 230}
227 231
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
254 if (r) 258 if (r)
255 return r; 259 return r;
256 260
261 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
262 if (r) {
263 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
264 return r;
265 }
266
257 /* Enable IRQ */ 267 /* Enable IRQ */
258 r100_irq_set(rdev); 268 r100_irq_set(rdev);
259 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 269 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev)
264 return r; 274 return r;
265 } 275 }
266 r420_cp_errata_init(rdev); 276 r420_cp_errata_init(rdev);
267 r = r100_ib_init(rdev); 277
278 r = radeon_ib_pool_start(rdev);
279 if (r)
280 return r;
281
282 r = r100_ib_test(rdev);
268 if (r) { 283 if (r) {
269 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 284 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
285 rdev->accel_working = false;
270 return r; 286 return r;
271 } 287 }
288
272 return 0; 289 return 0;
273} 290}
274 291
@@ -297,11 +314,14 @@ int r420_resume(struct radeon_device *rdev)
297 r420_clock_resume(rdev); 314 r420_clock_resume(rdev);
298 /* Initialize surface registers */ 315 /* Initialize surface registers */
299 radeon_surface_init(rdev); 316 radeon_surface_init(rdev);
317
318 rdev->accel_working = true;
300 return r420_startup(rdev); 319 return r420_startup(rdev);
301} 320}
302 321
303int r420_suspend(struct radeon_device *rdev) 322int r420_suspend(struct radeon_device *rdev)
304{ 323{
324 radeon_ib_pool_suspend(rdev);
305 r420_cp_errata_fini(rdev); 325 r420_cp_errata_fini(rdev);
306 r100_cp_disable(rdev); 326 r100_cp_disable(rdev);
307 radeon_wb_disable(rdev); 327 radeon_wb_disable(rdev);
@@ -414,7 +434,14 @@ int r420_init(struct radeon_device *rdev)
414 return r; 434 return r;
415 } 435 }
416 r420_set_reg_safe(rdev); 436 r420_set_reg_safe(rdev);
437
438 r = radeon_ib_pool_init(rdev);
417 rdev->accel_working = true; 439 rdev->accel_working = true;
440 if (r) {
441 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
442 rdev->accel_working = false;
443 }
444
418 r = r420_startup(rdev); 445 r = r420_startup(rdev);
419 if (r) { 446 if (r) {
420 /* Somethings want wront with the accel init stop accel */ 447 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index fc437059918..3bd8f1b1c60 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -573,6 +573,7 @@
573 573
574#define AVIVO_TMDSA_CNTL 0x7880 574#define AVIVO_TMDSA_CNTL 0x7880
575# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0) 575# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
576# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
576# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4) 577# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
577# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8) 578# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
578# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12) 579# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
@@ -633,6 +634,7 @@
633 634
634#define AVIVO_LVTMA_CNTL 0x7a80 635#define AVIVO_LVTMA_CNTL 0x7a80
635# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0) 636# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
637# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
636# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4) 638# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
637# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8) 639# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
638# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12) 640# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3081d07f8de..4ae1615e752 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
187 if (r) 187 if (r)
188 return r; 188 return r;
189 189
190 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
191 if (r) {
192 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
193 return r;
194 }
195
190 /* Enable IRQ */ 196 /* Enable IRQ */
191 rs600_irq_set(rdev); 197 rs600_irq_set(rdev);
192 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 198 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
196 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 202 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
197 return r; 203 return r;
198 } 204 }
199 r = r100_ib_init(rdev); 205
206 r = radeon_ib_pool_start(rdev);
207 if (r)
208 return r;
209
210 r = r100_ib_test(rdev);
200 if (r) { 211 if (r) {
201 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 212 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
213 rdev->accel_working = false;
202 return r; 214 return r;
203 } 215 }
204 return 0; 216 return 0;
@@ -223,6 +235,8 @@ int r520_resume(struct radeon_device *rdev)
223 rv515_clock_startup(rdev); 235 rv515_clock_startup(rdev);
224 /* Initialize surface registers */ 236 /* Initialize surface registers */
225 radeon_surface_init(rdev); 237 radeon_surface_init(rdev);
238
239 rdev->accel_working = true;
226 return r520_startup(rdev); 240 return r520_startup(rdev);
227} 241}
228 242
@@ -292,7 +306,14 @@ int r520_init(struct radeon_device *rdev)
292 if (r) 306 if (r)
293 return r; 307 return r;
294 rv515_set_safe_registers(rdev); 308 rv515_set_safe_registers(rdev);
309
310 r = radeon_ib_pool_init(rdev);
295 rdev->accel_working = true; 311 rdev->accel_working = true;
312 if (r) {
313 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
314 rdev->accel_working = false;
315 }
316
296 r = r520_startup(rdev); 317 r = r520_startup(rdev);
297 if (r) { 318 if (r) {
298 /* Somethings want wront with the accel init stop accel */ 319 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9cdda0b3b08..4f08e5e6ee9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
1344 return 0; 1344 return 0;
1345} 1345}
1346 1346
1347bool r600_gpu_is_lockup(struct radeon_device *rdev) 1347bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1348{ 1348{
1349 u32 srbm_status; 1349 u32 srbm_status;
1350 u32 grbm_status; 1350 u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
1361 grbm_status = RREG32(R_008010_GRBM_STATUS); 1361 grbm_status = RREG32(R_008010_GRBM_STATUS);
1362 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1362 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1363 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1363 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1364 r100_gpu_lockup_update(lockup, &rdev->cp); 1364 r100_gpu_lockup_update(lockup, ring);
1365 return false; 1365 return false;
1366 } 1366 }
1367 /* force CP activities */ 1367 /* force CP activities */
1368 r = radeon_ring_lock(rdev, 2); 1368 r = radeon_ring_lock(rdev, ring, 2);
1369 if (!r) { 1369 if (!r) {
1370 /* PACKET2 NOP */ 1370 /* PACKET2 NOP */
1371 radeon_ring_write(rdev, 0x80000000); 1371 radeon_ring_write(ring, 0x80000000);
1372 radeon_ring_write(rdev, 0x80000000); 1372 radeon_ring_write(ring, 0x80000000);
1373 radeon_ring_unlock_commit(rdev); 1373 radeon_ring_unlock_commit(rdev, ring);
1374 } 1374 }
1375 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 1375 ring->rptr = RREG32(ring->rptr_reg);
1376 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1376 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1377} 1377}
1378 1378
1379int r600_asic_reset(struct radeon_device *rdev) 1379int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2144 2144
2145int r600_cp_start(struct radeon_device *rdev) 2145int r600_cp_start(struct radeon_device *rdev)
2146{ 2146{
2147 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2147 int r; 2148 int r;
2148 uint32_t cp_me; 2149 uint32_t cp_me;
2149 2150
2150 r = radeon_ring_lock(rdev, 7); 2151 r = radeon_ring_lock(rdev, ring, 7);
2151 if (r) { 2152 if (r) {
2152 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2153 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2153 return r; 2154 return r;
2154 } 2155 }
2155 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2156 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2156 radeon_ring_write(rdev, 0x1); 2157 radeon_ring_write(ring, 0x1);
2157 if (rdev->family >= CHIP_RV770) { 2158 if (rdev->family >= CHIP_RV770) {
2158 radeon_ring_write(rdev, 0x0); 2159 radeon_ring_write(ring, 0x0);
2159 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2160 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2160 } else { 2161 } else {
2161 radeon_ring_write(rdev, 0x3); 2162 radeon_ring_write(ring, 0x3);
2162 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); 2163 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2163 } 2164 }
2164 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2165 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2165 radeon_ring_write(rdev, 0); 2166 radeon_ring_write(ring, 0);
2166 radeon_ring_write(rdev, 0); 2167 radeon_ring_write(ring, 0);
2167 radeon_ring_unlock_commit(rdev); 2168 radeon_ring_unlock_commit(rdev, ring);
2168 2169
2169 cp_me = 0xff; 2170 cp_me = 0xff;
2170 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2171 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
2173 2174
2174int r600_cp_resume(struct radeon_device *rdev) 2175int r600_cp_resume(struct radeon_device *rdev)
2175{ 2176{
2177 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2176 u32 tmp; 2178 u32 tmp;
2177 u32 rb_bufsz; 2179 u32 rb_bufsz;
2178 int r; 2180 int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
2184 WREG32(GRBM_SOFT_RESET, 0); 2186 WREG32(GRBM_SOFT_RESET, 0);
2185 2187
2186 /* Set ring buffer size */ 2188 /* Set ring buffer size */
2187 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 2189 rb_bufsz = drm_order(ring->ring_size / 8);
2188 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2190 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2189#ifdef __BIG_ENDIAN 2191#ifdef __BIG_ENDIAN
2190 tmp |= BUF_SWAP_32BIT; 2192 tmp |= BUF_SWAP_32BIT;
2191#endif 2193#endif
2192 WREG32(CP_RB_CNTL, tmp); 2194 WREG32(CP_RB_CNTL, tmp);
2193 WREG32(CP_SEM_WAIT_TIMER, 0x4); 2195 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2194 2196
2195 /* Set the write pointer delay */ 2197 /* Set the write pointer delay */
2196 WREG32(CP_RB_WPTR_DELAY, 0); 2198 WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2198 /* Initialize the ring buffer's read and write pointers */ 2200 /* Initialize the ring buffer's read and write pointers */
2199 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2201 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2200 WREG32(CP_RB_RPTR_WR, 0); 2202 WREG32(CP_RB_RPTR_WR, 0);
2201 rdev->cp.wptr = 0; 2203 ring->wptr = 0;
2202 WREG32(CP_RB_WPTR, rdev->cp.wptr); 2204 WREG32(CP_RB_WPTR, ring->wptr);
2203 2205
2204 /* set the wb address whether it's enabled or not */ 2206 /* set the wb address whether it's enabled or not */
2205 WREG32(CP_RB_RPTR_ADDR, 2207 WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
2217 mdelay(1); 2219 mdelay(1);
2218 WREG32(CP_RB_CNTL, tmp); 2220 WREG32(CP_RB_CNTL, tmp);
2219 2221
2220 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); 2222 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2221 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2223 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2222 2224
2223 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2225 ring->rptr = RREG32(CP_RB_RPTR);
2224 2226
2225 r600_cp_start(rdev); 2227 r600_cp_start(rdev);
2226 rdev->cp.ready = true; 2228 ring->ready = true;
2227 r = radeon_ring_test(rdev); 2229 r = radeon_ring_test(rdev, ring);
2228 if (r) { 2230 if (r) {
2229 rdev->cp.ready = false; 2231 ring->ready = false;
2230 return r; 2232 return r;
2231 } 2233 }
2232 return 0; 2234 return 0;
2233} 2235}
2234 2236
2235void r600_cp_commit(struct radeon_device *rdev) 2237void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2236{
2237 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2238 (void)RREG32(CP_RB_WPTR);
2239}
2240
2241void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2242{ 2238{
2243 u32 rb_bufsz; 2239 u32 rb_bufsz;
2244 2240
2245 /* Align ring size */ 2241 /* Align ring size */
2246 rb_bufsz = drm_order(ring_size / 8); 2242 rb_bufsz = drm_order(ring_size / 8);
2247 ring_size = (1 << (rb_bufsz + 1)) * 4; 2243 ring_size = (1 << (rb_bufsz + 1)) * 4;
2248 rdev->cp.ring_size = ring_size; 2244 ring->ring_size = ring_size;
2249 rdev->cp.align_mask = 16 - 1; 2245 ring->align_mask = 16 - 1;
2250} 2246}
2251 2247
2252void r600_cp_fini(struct radeon_device *rdev) 2248void r600_cp_fini(struct radeon_device *rdev)
2253{ 2249{
2254 r600_cp_stop(rdev); 2250 r600_cp_stop(rdev);
2255 radeon_ring_fini(rdev); 2251 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2256} 2252}
2257 2253
2258 2254
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
2271 } 2267 }
2272} 2268}
2273 2269
2274int r600_ring_test(struct radeon_device *rdev) 2270int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2275{ 2271{
2276 uint32_t scratch; 2272 uint32_t scratch;
2277 uint32_t tmp = 0; 2273 uint32_t tmp = 0;
2278 unsigned i; 2274 unsigned i, ridx = radeon_ring_index(rdev, ring);
2279 int r; 2275 int r;
2280 2276
2281 r = radeon_scratch_get(rdev, &scratch); 2277 r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
2284 return r; 2280 return r;
2285 } 2281 }
2286 WREG32(scratch, 0xCAFEDEAD); 2282 WREG32(scratch, 0xCAFEDEAD);
2287 r = radeon_ring_lock(rdev, 3); 2283 r = radeon_ring_lock(rdev, ring, 3);
2288 if (r) { 2284 if (r) {
2289 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2285 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2290 radeon_scratch_free(rdev, scratch); 2286 radeon_scratch_free(rdev, scratch);
2291 return r; 2287 return r;
2292 } 2288 }
2293 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2289 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2294 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2290 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2295 radeon_ring_write(rdev, 0xDEADBEEF); 2291 radeon_ring_write(ring, 0xDEADBEEF);
2296 radeon_ring_unlock_commit(rdev); 2292 radeon_ring_unlock_commit(rdev, ring);
2297 for (i = 0; i < rdev->usec_timeout; i++) { 2293 for (i = 0; i < rdev->usec_timeout; i++) {
2298 tmp = RREG32(scratch); 2294 tmp = RREG32(scratch);
2299 if (tmp == 0xDEADBEEF) 2295 if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
2301 DRM_UDELAY(1); 2297 DRM_UDELAY(1);
2302 } 2298 }
2303 if (i < rdev->usec_timeout) { 2299 if (i < rdev->usec_timeout) {
2304 DRM_INFO("ring test succeeded in %d usecs\n", i); 2300 DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
2305 } else { 2301 } else {
2306 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", 2302 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2307 scratch, tmp); 2303 ridx, scratch, tmp);
2308 r = -EINVAL; 2304 r = -EINVAL;
2309 } 2305 }
2310 radeon_scratch_free(rdev, scratch); 2306 radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,63 @@ int r600_ring_test(struct radeon_device *rdev)
2314void r600_fence_ring_emit(struct radeon_device *rdev, 2310void r600_fence_ring_emit(struct radeon_device *rdev,
2315 struct radeon_fence *fence) 2311 struct radeon_fence *fence)
2316{ 2312{
2313 struct radeon_ring *ring = &rdev->ring[fence->ring];
2314
2317 if (rdev->wb.use_event) { 2315 if (rdev->wb.use_event) {
2318 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + 2316 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2319 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2320 /* flush read cache over gart */ 2317 /* flush read cache over gart */
2321 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2318 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2322 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | 2319 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2323 PACKET3_VC_ACTION_ENA | 2320 PACKET3_VC_ACTION_ENA |
2324 PACKET3_SH_ACTION_ENA); 2321 PACKET3_SH_ACTION_ENA);
2325 radeon_ring_write(rdev, 0xFFFFFFFF); 2322 radeon_ring_write(ring, 0xFFFFFFFF);
2326 radeon_ring_write(rdev, 0); 2323 radeon_ring_write(ring, 0);
2327 radeon_ring_write(rdev, 10); /* poll interval */ 2324 radeon_ring_write(ring, 10); /* poll interval */
2328 /* EVENT_WRITE_EOP - flush caches, send int */ 2325 /* EVENT_WRITE_EOP - flush caches, send int */
2329 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2326 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2330 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2327 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2331 radeon_ring_write(rdev, addr & 0xffffffff); 2328 radeon_ring_write(ring, addr & 0xffffffff);
2332 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2329 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2333 radeon_ring_write(rdev, fence->seq); 2330 radeon_ring_write(ring, fence->seq);
2334 radeon_ring_write(rdev, 0); 2331 radeon_ring_write(ring, 0);
2335 } else { 2332 } else {
2336 /* flush read cache over gart */ 2333 /* flush read cache over gart */
2337 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2334 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2338 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | 2335 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2339 PACKET3_VC_ACTION_ENA | 2336 PACKET3_VC_ACTION_ENA |
2340 PACKET3_SH_ACTION_ENA); 2337 PACKET3_SH_ACTION_ENA);
2341 radeon_ring_write(rdev, 0xFFFFFFFF); 2338 radeon_ring_write(ring, 0xFFFFFFFF);
2342 radeon_ring_write(rdev, 0); 2339 radeon_ring_write(ring, 0);
2343 radeon_ring_write(rdev, 10); /* poll interval */ 2340 radeon_ring_write(ring, 10); /* poll interval */
2344 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); 2341 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2345 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2342 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2346 /* wait for 3D idle clean */ 2343 /* wait for 3D idle clean */
2347 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2344 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2348 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2345 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2349 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2346 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2350 /* Emit fence sequence & fire IRQ */ 2347 /* Emit fence sequence & fire IRQ */
2351 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2348 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2352 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2349 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2353 radeon_ring_write(rdev, fence->seq); 2350 radeon_ring_write(ring, fence->seq);
2354 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2351 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2355 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 2352 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2356 radeon_ring_write(rdev, RB_INT_STAT); 2353 radeon_ring_write(ring, RB_INT_STAT);
2357 } 2354 }
2358} 2355}
2359 2356
2357void r600_semaphore_ring_emit(struct radeon_device *rdev,
2358 struct radeon_ring *ring,
2359 struct radeon_semaphore *semaphore,
2360 bool emit_wait)
2361{
2362 uint64_t addr = semaphore->gpu_addr;
2363 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2364
2365 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2366 radeon_ring_write(ring, addr & 0xffffffff);
2367 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2368}
2369
2360int r600_copy_blit(struct radeon_device *rdev, 2370int r600_copy_blit(struct radeon_device *rdev,
2361 uint64_t src_offset, 2371 uint64_t src_offset,
2362 uint64_t dst_offset, 2372 uint64_t dst_offset,
@@ -2409,6 +2419,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2409 2419
2410int r600_startup(struct radeon_device *rdev) 2420int r600_startup(struct radeon_device *rdev)
2411{ 2421{
2422 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2412 int r; 2423 int r;
2413 2424
2414 /* enable pcie gen2 link */ 2425 /* enable pcie gen2 link */
@@ -2447,6 +2458,12 @@ int r600_startup(struct radeon_device *rdev)
2447 if (r) 2458 if (r)
2448 return r; 2459 return r;
2449 2460
2461 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2462 if (r) {
2463 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2464 return r;
2465 }
2466
2450 /* Enable IRQ */ 2467 /* Enable IRQ */
2451 r = r600_irq_init(rdev); 2468 r = r600_irq_init(rdev);
2452 if (r) { 2469 if (r) {
@@ -2456,7 +2473,10 @@ int r600_startup(struct radeon_device *rdev)
2456 } 2473 }
2457 r600_irq_set(rdev); 2474 r600_irq_set(rdev);
2458 2475
2459 r = radeon_ring_init(rdev, rdev->cp.ring_size); 2476 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2477 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2478 0, 0xfffff, RADEON_CP_PACKET2);
2479
2460 if (r) 2480 if (r)
2461 return r; 2481 return r;
2462 r = r600_cp_load_microcode(rdev); 2482 r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev)
2466 if (r) 2486 if (r)
2467 return r; 2487 return r;
2468 2488
2489 r = radeon_ib_pool_start(rdev);
2490 if (r)
2491 return r;
2492
2493 r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
2494 if (r) {
2495 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2496 rdev->accel_working = false;
2497 return r;
2498 }
2499
2469 return 0; 2500 return 0;
2470} 2501}
2471 2502
@@ -2494,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev)
2494 /* post card */ 2525 /* post card */
2495 atom_asic_init(rdev->mode_info.atom_context); 2526 atom_asic_init(rdev->mode_info.atom_context);
2496 2527
2528 rdev->accel_working = true;
2497 r = r600_startup(rdev); 2529 r = r600_startup(rdev);
2498 if (r) { 2530 if (r) {
2499 DRM_ERROR("r600 startup failed on resume\n"); 2531 DRM_ERROR("r600 startup failed on resume\n");
2500 return r; 2532 return r;
2501 } 2533 }
2502 2534
2503 r = r600_ib_test(rdev);
2504 if (r) {
2505 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2506 return r;
2507 }
2508
2509 r = r600_audio_init(rdev); 2535 r = r600_audio_init(rdev);
2510 if (r) { 2536 if (r) {
2511 DRM_ERROR("radeon: audio resume failed\n"); 2537 DRM_ERROR("radeon: audio resume failed\n");
@@ -2518,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev)
2518int r600_suspend(struct radeon_device *rdev) 2544int r600_suspend(struct radeon_device *rdev)
2519{ 2545{
2520 r600_audio_fini(rdev); 2546 r600_audio_fini(rdev);
2547 radeon_ib_pool_suspend(rdev);
2548 r600_blit_suspend(rdev);
2521 /* FIXME: we should wait for ring to be empty */ 2549 /* FIXME: we should wait for ring to be empty */
2522 r600_cp_stop(rdev); 2550 r600_cp_stop(rdev);
2523 rdev->cp.ready = false; 2551 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2524 r600_irq_suspend(rdev); 2552 r600_irq_suspend(rdev);
2525 radeon_wb_disable(rdev); 2553 radeon_wb_disable(rdev);
2526 r600_pcie_gart_disable(rdev); 2554 r600_pcie_gart_disable(rdev);
2527 r600_blit_suspend(rdev);
2528 2555
2529 return 0; 2556 return 0;
2530} 2557}
@@ -2595,8 +2622,8 @@ int r600_init(struct radeon_device *rdev)
2595 if (r) 2622 if (r)
2596 return r; 2623 return r;
2597 2624
2598 rdev->cp.ring_obj = NULL; 2625 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2599 r600_ring_init(rdev, 1024 * 1024); 2626 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2600 2627
2601 rdev->ih.ring_obj = NULL; 2628 rdev->ih.ring_obj = NULL;
2602 r600_ih_ring_init(rdev, 64 * 1024); 2629 r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2632,24 @@ int r600_init(struct radeon_device *rdev)
2605 if (r) 2632 if (r)
2606 return r; 2633 return r;
2607 2634
2635 r = radeon_ib_pool_init(rdev);
2608 rdev->accel_working = true; 2636 rdev->accel_working = true;
2637 if (r) {
2638 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2639 rdev->accel_working = false;
2640 }
2641
2609 r = r600_startup(rdev); 2642 r = r600_startup(rdev);
2610 if (r) { 2643 if (r) {
2611 dev_err(rdev->dev, "disabling GPU acceleration\n"); 2644 dev_err(rdev->dev, "disabling GPU acceleration\n");
2612 r600_cp_fini(rdev); 2645 r600_cp_fini(rdev);
2613 r600_irq_fini(rdev); 2646 r600_irq_fini(rdev);
2614 radeon_wb_fini(rdev); 2647 radeon_wb_fini(rdev);
2648 r100_ib_fini(rdev);
2615 radeon_irq_kms_fini(rdev); 2649 radeon_irq_kms_fini(rdev);
2616 r600_pcie_gart_fini(rdev); 2650 r600_pcie_gart_fini(rdev);
2617 rdev->accel_working = false; 2651 rdev->accel_working = false;
2618 } 2652 }
2619 if (rdev->accel_working) {
2620 r = radeon_ib_pool_init(rdev);
2621 if (r) {
2622 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2623 rdev->accel_working = false;
2624 } else {
2625 r = r600_ib_test(rdev);
2626 if (r) {
2627 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2628 rdev->accel_working = false;
2629 }
2630 }
2631 }
2632 2653
2633 r = r600_audio_init(rdev); 2654 r = r600_audio_init(rdev);
2634 if (r) 2655 if (r)
@@ -2643,12 +2664,13 @@ void r600_fini(struct radeon_device *rdev)
2643 r600_cp_fini(rdev); 2664 r600_cp_fini(rdev);
2644 r600_irq_fini(rdev); 2665 r600_irq_fini(rdev);
2645 radeon_wb_fini(rdev); 2666 radeon_wb_fini(rdev);
2646 radeon_ib_pool_fini(rdev); 2667 r100_ib_fini(rdev);
2647 radeon_irq_kms_fini(rdev); 2668 radeon_irq_kms_fini(rdev);
2648 r600_pcie_gart_fini(rdev); 2669 r600_pcie_gart_fini(rdev);
2649 r600_vram_scratch_fini(rdev); 2670 r600_vram_scratch_fini(rdev);
2650 radeon_agp_fini(rdev); 2671 radeon_agp_fini(rdev);
2651 radeon_gem_fini(rdev); 2672 radeon_gem_fini(rdev);
2673 radeon_semaphore_driver_fini(rdev);
2652 radeon_fence_driver_fini(rdev); 2674 radeon_fence_driver_fini(rdev);
2653 radeon_bo_fini(rdev); 2675 radeon_bo_fini(rdev);
2654 radeon_atombios_fini(rdev); 2676 radeon_atombios_fini(rdev);
@@ -2662,18 +2684,20 @@ void r600_fini(struct radeon_device *rdev)
2662 */ 2684 */
2663void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 2685void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2664{ 2686{
2687 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
2688
2665 /* FIXME: implement */ 2689 /* FIXME: implement */
2666 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2690 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2667 radeon_ring_write(rdev, 2691 radeon_ring_write(ring,
2668#ifdef __BIG_ENDIAN 2692#ifdef __BIG_ENDIAN
2669 (2 << 0) | 2693 (2 << 0) |
2670#endif 2694#endif
2671 (ib->gpu_addr & 0xFFFFFFFC)); 2695 (ib->gpu_addr & 0xFFFFFFFC));
2672 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 2696 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2673 radeon_ring_write(rdev, ib->length_dw); 2697 radeon_ring_write(ring, ib->length_dw);
2674} 2698}
2675 2699
2676int r600_ib_test(struct radeon_device *rdev) 2700int r600_ib_test(struct radeon_device *rdev, int ring)
2677{ 2701{
2678 struct radeon_ib *ib; 2702 struct radeon_ib *ib;
2679 uint32_t scratch; 2703 uint32_t scratch;
@@ -2687,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev)
2687 return r; 2711 return r;
2688 } 2712 }
2689 WREG32(scratch, 0xCAFEDEAD); 2713 WREG32(scratch, 0xCAFEDEAD);
2690 r = radeon_ib_get(rdev, &ib); 2714 r = radeon_ib_get(rdev, ring, &ib, 256);
2691 if (r) { 2715 if (r) {
2692 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 2716 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2693 return r; 2717 return r;
@@ -2728,7 +2752,7 @@ int r600_ib_test(struct radeon_device *rdev)
2728 DRM_UDELAY(1); 2752 DRM_UDELAY(1);
2729 } 2753 }
2730 if (i < rdev->usec_timeout) { 2754 if (i < rdev->usec_timeout) {
2731 DRM_INFO("ib test succeeded in %u usecs\n", i); 2755 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
2732 } else { 2756 } else {
2733 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 2757 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2734 scratch, tmp); 2758 scratch, tmp);
@@ -3075,7 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
3075 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3099 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3076 } 3100 }
3077 3101
3078 if (rdev->irq.sw_int) { 3102 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
3079 DRM_DEBUG("r600_irq_set: sw int\n"); 3103 DRM_DEBUG("r600_irq_set: sw int\n");
3080 cp_int_cntl |= RB_INT_ENABLE; 3104 cp_int_cntl |= RB_INT_ENABLE;
3081 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3105 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3483,11 @@ restart_ih:
3459 case 177: /* CP_INT in IB1 */ 3483 case 177: /* CP_INT in IB1 */
3460 case 178: /* CP_INT in IB2 */ 3484 case 178: /* CP_INT in IB2 */
3461 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 3485 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3462 radeon_fence_process(rdev); 3486 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3463 break; 3487 break;
3464 case 181: /* CP EOP event */ 3488 case 181: /* CP EOP event */
3465 DRM_DEBUG("IH: CP EOP\n"); 3489 DRM_DEBUG("IH: CP EOP\n");
3466 radeon_fence_process(rdev); 3490 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3467 break; 3491 break;
3468 case 233: /* GUI IDLE */ 3492 case 233: /* GUI IDLE */
3469 DRM_DEBUG("IH: GUI idle\n"); 3493 DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3520,6 @@ restart_ih:
3496 */ 3520 */
3497#if defined(CONFIG_DEBUG_FS) 3521#if defined(CONFIG_DEBUG_FS)
3498 3522
3499static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3500{
3501 struct drm_info_node *node = (struct drm_info_node *) m->private;
3502 struct drm_device *dev = node->minor->dev;
3503 struct radeon_device *rdev = dev->dev_private;
3504 unsigned count, i, j;
3505
3506 radeon_ring_free_size(rdev);
3507 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3508 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3509 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3510 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3511 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3512 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3513 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3514 seq_printf(m, "%u dwords in ring\n", count);
3515 i = rdev->cp.rptr;
3516 for (j = 0; j <= count; j++) {
3517 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3518 i = (i + 1) & rdev->cp.ptr_mask;
3519 }
3520 return 0;
3521}
3522
3523static int r600_debugfs_mc_info(struct seq_file *m, void *data) 3523static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3524{ 3524{
3525 struct drm_info_node *node = (struct drm_info_node *) m->private; 3525 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3533,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3533 3533
3534static struct drm_info_list r600_mc_info_list[] = { 3534static struct drm_info_list r600_mc_info_list[] = {
3535 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 3535 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3536 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3537}; 3536};
3538#endif 3537#endif
3539 3538
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 846fae57639..ba66f3093d4 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -36,7 +36,7 @@
36 */ 36 */
37static int r600_audio_chipset_supported(struct radeon_device *rdev) 37static int r600_audio_chipset_supported(struct radeon_device *rdev)
38{ 38{
39 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) 39 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
40 || rdev->family == CHIP_RS600 40 || rdev->family == CHIP_RS600
41 || rdev->family == CHIP_RS690 41 || rdev->family == CHIP_RS690
42 || rdev->family == CHIP_RS740; 42 || rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
161 */ 161 */
162static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) 162static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
163{ 163{
164 u32 value = 0;
164 DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling"); 165 DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
165 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); 166 if (ASIC_IS_DCE4(rdev)) {
167 if (enable) {
168 value |= 0x81000000; /* Required to enable audio */
169 value |= 0x0e1000f0; /* fglrx sets that too */
170 }
171 WREG32(EVERGREEN_AUDIO_ENABLE, value);
172 } else {
173 WREG32_P(R600_AUDIO_ENABLE,
174 enable ? 0x81000000 : 0x0, ~0x81000000);
175 }
166 rdev->audio_enabled = enable; 176 rdev->audio_enabled = enable;
167} 177}
168 178
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
248 return; 258 return;
249 } 259 }
250 260
251 switch (dig->dig_encoder) { 261 if (ASIC_IS_DCE4(rdev)) {
252 case 0: 262 /* TODO: other PLLs? */
253 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50); 263 WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
254 WREG32(R600_AUDIO_PLL1_DIV, clock * 100); 264 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
255 WREG32(R600_AUDIO_CLK_SRCSEL, 0); 265 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
256 break; 266
257 267 /* Some magic trigger or src sel? */
258 case 1: 268 WREG32_P(0x5ac, 0x01, ~0x77);
259 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50); 269 } else {
260 WREG32(R600_AUDIO_PLL2_DIV, clock * 100); 270 switch (dig->dig_encoder) {
261 WREG32(R600_AUDIO_CLK_SRCSEL, 1); 271 case 0:
262 break; 272 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
263 default: 273 WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
264 dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n", 274 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
265 radeon_encoder->encoder_id); 275 break;
266 return; 276
277 case 1:
278 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
279 WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
280 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
281 break;
282 default:
283 dev_err(rdev->dev,
284 "Unsupported DIG on encoder 0x%02X\n",
285 radeon_encoder->encoder_id);
286 return;
287 }
267 } 288 }
268} 289}
269 290
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e09d2818f94..d996f438113 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -50,6 +50,7 @@ static void
50set_render_target(struct radeon_device *rdev, int format, 50set_render_target(struct radeon_device *rdev, int format,
51 int w, int h, u64 gpu_addr) 51 int w, int h, u64 gpu_addr)
52{ 52{
53 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
53 u32 cb_color_info; 54 u32 cb_color_info;
54 int pitch, slice; 55 int pitch, slice;
55 56
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
63 pitch = (w / 8) - 1; 64 pitch = (w / 8) - 1;
64 slice = ((w * h) / 64) - 1; 65 slice = ((w * h) / 64) - 1;
65 66
66 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 67 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
67 radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 68 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
68 radeon_ring_write(rdev, gpu_addr >> 8); 69 radeon_ring_write(ring, gpu_addr >> 8);
69 70
70 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { 71 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
71 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); 72 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
72 radeon_ring_write(rdev, 2 << 0); 73 radeon_ring_write(ring, 2 << 0);
73 } 74 }
74 75
75 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 76 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
76 radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 77 radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
77 radeon_ring_write(rdev, (pitch << 0) | (slice << 10)); 78 radeon_ring_write(ring, (pitch << 0) | (slice << 10));
78 79
79 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 80 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
80 radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 81 radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
81 radeon_ring_write(rdev, 0); 82 radeon_ring_write(ring, 0);
82 83
83 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 84 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
84 radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 85 radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
85 radeon_ring_write(rdev, cb_color_info); 86 radeon_ring_write(ring, cb_color_info);
86 87
87 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 88 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
88 radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 89 radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
89 radeon_ring_write(rdev, 0); 90 radeon_ring_write(ring, 0);
90 91
91 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 92 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
92 radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 93 radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
93 radeon_ring_write(rdev, 0); 94 radeon_ring_write(ring, 0);
94 95
95 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 96 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
96 radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 97 radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
97 radeon_ring_write(rdev, 0); 98 radeon_ring_write(ring, 0);
98} 99}
99 100
100/* emits 5dw */ 101/* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
103 u32 sync_type, u32 size, 104 u32 sync_type, u32 size,
104 u64 mc_addr) 105 u64 mc_addr)
105{ 106{
107 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
106 u32 cp_coher_size; 108 u32 cp_coher_size;
107 109
108 if (size == 0xffffffff) 110 if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
110 else 112 else
111 cp_coher_size = ((size + 255) >> 8); 113 cp_coher_size = ((size + 255) >> 8);
112 114
113 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); 115 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
114 radeon_ring_write(rdev, sync_type); 116 radeon_ring_write(ring, sync_type);
115 radeon_ring_write(rdev, cp_coher_size); 117 radeon_ring_write(ring, cp_coher_size);
116 radeon_ring_write(rdev, mc_addr >> 8); 118 radeon_ring_write(ring, mc_addr >> 8);
117 radeon_ring_write(rdev, 10); /* poll interval */ 119 radeon_ring_write(ring, 10); /* poll interval */
118} 120}
119 121
120/* emits 21dw + 1 surface sync = 26dw */ 122/* emits 21dw + 1 surface sync = 26dw */
121static void 123static void
122set_shaders(struct radeon_device *rdev) 124set_shaders(struct radeon_device *rdev)
123{ 125{
126 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
124 u64 gpu_addr; 127 u64 gpu_addr;
125 u32 sq_pgm_resources; 128 u32 sq_pgm_resources;
126 129
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
129 132
130 /* VS */ 133 /* VS */
131 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 134 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
132 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 135 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
133 radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 136 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
134 radeon_ring_write(rdev, gpu_addr >> 8); 137 radeon_ring_write(ring, gpu_addr >> 8);
135 138
136 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 139 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
137 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 140 radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
138 radeon_ring_write(rdev, sq_pgm_resources); 141 radeon_ring_write(ring, sq_pgm_resources);
139 142
140 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 143 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
141 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 144 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
142 radeon_ring_write(rdev, 0); 145 radeon_ring_write(ring, 0);
143 146
144 /* PS */ 147 /* PS */
145 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; 148 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
146 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 149 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
147 radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 150 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
148 radeon_ring_write(rdev, gpu_addr >> 8); 151 radeon_ring_write(ring, gpu_addr >> 8);
149 152
150 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 153 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
151 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 154 radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
152 radeon_ring_write(rdev, sq_pgm_resources | (1 << 28)); 155 radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
153 156
154 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 157 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
155 radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 158 radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
156 radeon_ring_write(rdev, 2); 159 radeon_ring_write(ring, 2);
157 160
158 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 161 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
159 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 162 radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
160 radeon_ring_write(rdev, 0); 163 radeon_ring_write(ring, 0);
161 164
162 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; 165 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
163 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); 166 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
167static void 170static void
168set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) 171set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
169{ 172{
173 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
170 u32 sq_vtx_constant_word2; 174 u32 sq_vtx_constant_word2;
171 175
172 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | 176 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
175 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); 179 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
176#endif 180#endif
177 181
178 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 182 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
179 radeon_ring_write(rdev, 0x460); 183 radeon_ring_write(ring, 0x460);
180 radeon_ring_write(rdev, gpu_addr & 0xffffffff); 184 radeon_ring_write(ring, gpu_addr & 0xffffffff);
181 radeon_ring_write(rdev, 48 - 1); 185 radeon_ring_write(ring, 48 - 1);
182 radeon_ring_write(rdev, sq_vtx_constant_word2); 186 radeon_ring_write(ring, sq_vtx_constant_word2);
183 radeon_ring_write(rdev, 1 << 0); 187 radeon_ring_write(ring, 1 << 0);
184 radeon_ring_write(rdev, 0); 188 radeon_ring_write(ring, 0);
185 radeon_ring_write(rdev, 0); 189 radeon_ring_write(ring, 0);
186 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); 190 radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
187 191
188 if ((rdev->family == CHIP_RV610) || 192 if ((rdev->family == CHIP_RV610) ||
189 (rdev->family == CHIP_RV620) || 193 (rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
203 int format, int w, int h, int pitch, 207 int format, int w, int h, int pitch,
204 u64 gpu_addr, u32 size) 208 u64 gpu_addr, u32 size)
205{ 209{
210 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
206 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; 211 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
207 212
208 if (h < 1) 213 if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
225 cp_set_surface_sync(rdev, 230 cp_set_surface_sync(rdev,
226 PACKET3_TC_ACTION_ENA, size, gpu_addr); 231 PACKET3_TC_ACTION_ENA, size, gpu_addr);
227 232
228 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 233 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
229 radeon_ring_write(rdev, 0); 234 radeon_ring_write(ring, 0);
230 radeon_ring_write(rdev, sq_tex_resource_word0); 235 radeon_ring_write(ring, sq_tex_resource_word0);
231 radeon_ring_write(rdev, sq_tex_resource_word1); 236 radeon_ring_write(ring, sq_tex_resource_word1);
232 radeon_ring_write(rdev, gpu_addr >> 8); 237 radeon_ring_write(ring, gpu_addr >> 8);
233 radeon_ring_write(rdev, gpu_addr >> 8); 238 radeon_ring_write(ring, gpu_addr >> 8);
234 radeon_ring_write(rdev, sq_tex_resource_word4); 239 radeon_ring_write(ring, sq_tex_resource_word4);
235 radeon_ring_write(rdev, 0); 240 radeon_ring_write(ring, 0);
236 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30); 241 radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
237} 242}
238 243
239/* emits 12 */ 244/* emits 12 */
@@ -241,43 +246,45 @@ static void
241set_scissors(struct radeon_device *rdev, int x1, int y1, 246set_scissors(struct radeon_device *rdev, int x1, int y1,
242 int x2, int y2) 247 int x2, int y2)
243{ 248{
244 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 249 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
245 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 250 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
246 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); 251 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
247 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 252 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
248 253 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
249 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 254
250 radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 255 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
251 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); 256 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
252 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 257 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
253 258 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
254 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 259
255 radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 260 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
256 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); 261 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
257 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); 262 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
263 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
258} 264}
259 265
260/* emits 10 */ 266/* emits 10 */
261static void 267static void
262draw_auto(struct radeon_device *rdev) 268draw_auto(struct radeon_device *rdev)
263{ 269{
264 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 270 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
265 radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 271 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
266 radeon_ring_write(rdev, DI_PT_RECTLIST); 272 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
273 radeon_ring_write(ring, DI_PT_RECTLIST);
267 274
268 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 275 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
269 radeon_ring_write(rdev, 276 radeon_ring_write(ring,
270#ifdef __BIG_ENDIAN 277#ifdef __BIG_ENDIAN
271 (2 << 2) | 278 (2 << 2) |
272#endif 279#endif
273 DI_INDEX_SIZE_16_BIT); 280 DI_INDEX_SIZE_16_BIT);
274 281
275 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 282 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
276 radeon_ring_write(rdev, 1); 283 radeon_ring_write(ring, 1);
277 284
278 radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); 285 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
279 radeon_ring_write(rdev, 3); 286 radeon_ring_write(ring, 3);
280 radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); 287 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
281 288
282} 289}
283 290
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
285static void 292static void
286set_default_state(struct radeon_device *rdev) 293set_default_state(struct radeon_device *rdev)
287{ 294{
295 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
288 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; 296 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
289 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; 297 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
290 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; 298 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
440 /* emit an IB pointing at default state */ 448 /* emit an IB pointing at default state */
441 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 449 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
442 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 450 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
443 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 451 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
444 radeon_ring_write(rdev, 452 radeon_ring_write(ring,
445#ifdef __BIG_ENDIAN 453#ifdef __BIG_ENDIAN
446 (2 << 0) | 454 (2 << 0) |
447#endif 455#endif
448 (gpu_addr & 0xFFFFFFFC)); 456 (gpu_addr & 0xFFFFFFFC));
449 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 457 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
450 radeon_ring_write(rdev, dwords); 458 radeon_ring_write(ring, dwords);
451 459
452 /* SQ config */ 460 /* SQ config */
453 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); 461 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
454 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 462 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
455 radeon_ring_write(rdev, sq_config); 463 radeon_ring_write(ring, sq_config);
456 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); 464 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
457 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); 465 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
458 radeon_ring_write(rdev, sq_thread_resource_mgmt); 466 radeon_ring_write(ring, sq_thread_resource_mgmt);
459 radeon_ring_write(rdev, sq_stack_resource_mgmt_1); 467 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
460 radeon_ring_write(rdev, sq_stack_resource_mgmt_2); 468 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
461} 469}
462 470
463static uint32_t i2f(uint32_t input) 471static uint32_t i2f(uint32_t input)
@@ -611,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
611 radeon_bo_unref(&rdev->r600_blit.shader_obj); 619 radeon_bo_unref(&rdev->r600_blit.shader_obj);
612} 620}
613 621
614static int r600_vb_ib_get(struct radeon_device *rdev) 622static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
615{ 623{
616 int r; 624 int r;
617 r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); 625 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
626 &rdev->r600_blit.vb_ib, size);
618 if (r) { 627 if (r) {
619 DRM_ERROR("failed to get IB for vertex buffer\n"); 628 DRM_ERROR("failed to get IB for vertex buffer\n");
620 return r; 629 return r;
621 } 630 }
622 631
623 rdev->r600_blit.vb_total = 64*1024; 632 rdev->r600_blit.vb_total = size;
624 rdev->r600_blit.vb_used = 0; 633 rdev->r600_blit.vb_used = 0;
625 return 0; 634 return 0;
626} 635}
@@ -679,15 +688,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
679 688
680int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) 689int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
681{ 690{
691 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
682 int r; 692 int r;
683 int ring_size; 693 int ring_size;
684 int num_loops = 0; 694 int num_loops = 0;
685 int dwords_per_loop = rdev->r600_blit.ring_size_per_loop; 695 int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
686 696
687 r = r600_vb_ib_get(rdev);
688 if (r)
689 return r;
690
691 /* num loops */ 697 /* num loops */
692 while (num_gpu_pages) { 698 while (num_gpu_pages) {
693 num_gpu_pages -= 699 num_gpu_pages -=
@@ -696,10 +702,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
696 num_loops++; 702 num_loops++;
697 } 703 }
698 704
705 /* 48 bytes for vertex per loop */
706 r = r600_vb_ib_get(rdev, (num_loops*48)+256);
707 if (r)
708 return r;
709
699 /* calculate number of loops correctly */ 710 /* calculate number of loops correctly */
700 ring_size = num_loops * dwords_per_loop; 711 ring_size = num_loops * dwords_per_loop;
701 ring_size += rdev->r600_blit.ring_size_common; 712 ring_size += rdev->r600_blit.ring_size_common;
702 r = radeon_ring_lock(rdev, ring_size); 713 r = radeon_ring_lock(rdev, ring, ring_size);
703 if (r) 714 if (r)
704 return r; 715 return r;
705 716
@@ -718,7 +729,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
718 if (fence) 729 if (fence)
719 r = radeon_fence_emit(rdev, fence); 730 r = radeon_fence_emit(rdev, fence);
720 731
721 radeon_ring_unlock_commit(rdev); 732 radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
722} 733}
723 734
724void r600_kms_blit_copy(struct radeon_device *rdev, 735void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c9db4931913..84c54625095 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1815 dev_priv->ring.size_l2qw); 1815 dev_priv->ring.size_l2qw);
1816#endif 1816#endif
1817 1817
1818 RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4); 1818 RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
1819 1819
1820 /* Set the write pointer delay */ 1820 /* Set the write pointer delay */
1821 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 1821 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb1acffd243..38ce5d0427e 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (!p->keep_tiling_flags && 944 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
945 r600_cs_packet_next_is_pkt3_nop(p)) { 945 r600_cs_packet_next_is_pkt3_nop(p)) {
946 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
947 if (r) { 947 if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
993 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
994 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
995 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
996 if (!p->keep_tiling_flags && 996 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
997 r600_cs_packet_next_is_pkt3_nop(p)) { 997 r600_cs_packet_next_is_pkt3_nop(p)) {
998 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
999 if (r) { 999 if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1293 mip_offset <<= 8; 1293 mip_offset <<= 8;
1294 1294
1295 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1296 if (!p->keep_tiling_flags) { 1296 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1297 if (tiling_flags & RADEON_TILING_MACRO) 1297 if (tiling_flags & RADEON_TILING_MACRO)
1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1299 else if (tiling_flags & RADEON_TILING_MICRO) 1299 else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1625,7 +1625,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1625 return -EINVAL; 1625 return -EINVAL;
1626 } 1626 }
1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1628 if (!p->keep_tiling_flags) { 1628 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f5ac7e788d8..0b592067145 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
196 frame[0xD] = (right_bar >> 8); 196 frame[0xD] = (right_bar >> 8);
197 197
198 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); 198 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
199 /* Our header values (type, version, length) should be alright, Intel
200 * is using the same. Checksum function also seems to be OK, it works
201 * fine for audio infoframe. However calculated value is always lower
202 * by 2 in comparison to fglrx. It breaks displaying anything in case
203 * of TVs that strictly check the checksum. Hack it manually here to
204 * workaround this issue. */
205 frame[0x0] += 2;
199 206
200 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, 207 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
201 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 208 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
313 struct radeon_device *rdev = dev->dev_private; 320 struct radeon_device *rdev = dev->dev_private;
314 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 321 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
315 322
316 if (ASIC_IS_DCE4(rdev)) 323 if (ASIC_IS_DCE5(rdev))
317 return; 324 return;
318 325
319 if (!offset) 326 if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
455 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 462 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
456 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 463 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
457 464
465 u16 eg_offsets[] = {
466 EVERGREEN_CRTC0_REGISTER_OFFSET,
467 EVERGREEN_CRTC1_REGISTER_OFFSET,
468 EVERGREEN_CRTC2_REGISTER_OFFSET,
469 EVERGREEN_CRTC3_REGISTER_OFFSET,
470 EVERGREEN_CRTC4_REGISTER_OFFSET,
471 EVERGREEN_CRTC5_REGISTER_OFFSET,
472 };
473
458 if (!dig) { 474 if (!dig) {
459 dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n"); 475 dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
460 return; 476 return;
461 } 477 }
462 478
463 if (ASIC_IS_DCE4(rdev)) { 479 if (ASIC_IS_DCE5(rdev)) {
464 /* TODO */ 480 /* TODO */
481 } else if (ASIC_IS_DCE4(rdev)) {
482 if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
483 dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
484 return;
485 }
486 radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
487 eg_offsets[dig->dig_encoder];
488 radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
489 + EVERGREEN_HDMI_CONFIG_OFFSET;
465 } else if (ASIC_IS_DCE3(rdev)) { 490 } else if (ASIC_IS_DCE3(rdev)) {
466 radeon_encoder->hdmi_offset = dig->dig_encoder ? 491 radeon_encoder->hdmi_offset = dig->dig_encoder ?
467 R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1; 492 R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
484 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
485 uint32_t offset; 510 uint32_t offset;
486 511
487 if (ASIC_IS_DCE4(rdev)) 512 if (ASIC_IS_DCE5(rdev))
488 return; 513 return;
489 514
490 if (!radeon_encoder->hdmi_offset) { 515 if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
497 } 522 }
498 523
499 offset = radeon_encoder->hdmi_offset; 524 offset = radeon_encoder->hdmi_offset;
500 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 525 if (ASIC_IS_DCE5(rdev)) {
526 /* TODO */
527 } else if (ASIC_IS_DCE4(rdev)) {
528 WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
529 } else if (ASIC_IS_DCE32(rdev)) {
501 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); 530 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
502 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 531 } else if (ASIC_IS_DCE3(rdev)) {
532 /* TODO */
533 } else if (rdev->family >= CHIP_R600) {
503 switch (radeon_encoder->encoder_id) { 534 switch (radeon_encoder->encoder_id) {
504 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 535 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
505 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); 536 WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
537 ~AVIVO_TMDSA_CNTL_HDMI_EN);
506 WREG32(offset + R600_HDMI_ENABLE, 0x101); 538 WREG32(offset + R600_HDMI_ENABLE, 0x101);
507 break; 539 break;
508 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 540 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
509 WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4); 541 WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
542 ~AVIVO_LVTMA_CNTL_HDMI_EN);
510 WREG32(offset + R600_HDMI_ENABLE, 0x105); 543 WREG32(offset + R600_HDMI_ENABLE, 0x105);
511 break; 544 break;
512 default: 545 default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
518 if (rdev->irq.installed 551 if (rdev->irq.installed
519 && rdev->family != CHIP_RS600 552 && rdev->family != CHIP_RS600
520 && rdev->family != CHIP_RS690 553 && rdev->family != CHIP_RS690
521 && rdev->family != CHIP_RS740) { 554 && rdev->family != CHIP_RS740
522 555 && !ASIC_IS_DCE4(rdev)) {
523 /* if irq is available use it */ 556 /* if irq is available use it */
524 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; 557 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
525 radeon_irq_set(rdev); 558 radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
544 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 577 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
545 uint32_t offset; 578 uint32_t offset;
546 579
547 if (ASIC_IS_DCE4(rdev)) 580 if (ASIC_IS_DCE5(rdev))
548 return; 581 return;
549 582
550 offset = radeon_encoder->hdmi_offset; 583 offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
563 /* disable polling */ 596 /* disable polling */
564 r600_audio_disable_polling(encoder); 597 r600_audio_disable_polling(encoder);
565 598
566 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 599 if (ASIC_IS_DCE5(rdev)) {
600 /* TODO */
601 } else if (ASIC_IS_DCE4(rdev)) {
602 WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
603 } else if (ASIC_IS_DCE32(rdev)) {
567 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); 604 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
568 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 605 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
569 switch (radeon_encoder->encoder_id) { 606 switch (radeon_encoder->encoder_id) {
570 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 607 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
571 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); 608 WREG32_P(AVIVO_TMDSA_CNTL, 0,
609 ~AVIVO_TMDSA_CNTL_HDMI_EN);
572 WREG32(offset + R600_HDMI_ENABLE, 0); 610 WREG32(offset + R600_HDMI_ENABLE, 0);
573 break; 611 break;
574 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 612 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
575 WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4); 613 WREG32_P(AVIVO_LVTMA_CNTL, 0,
614 ~AVIVO_LVTMA_CNTL_HDMI_EN);
576 WREG32(offset + R600_HDMI_ENABLE, 0); 615 WREG32(offset + R600_HDMI_ENABLE, 0);
577 break; 616 break;
578 default: 617 default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bfe1b5d92af..3ee1fd7ef39 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -831,6 +831,8 @@
831#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 831#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
832#define PACKET3_INDIRECT_BUFFER_MP 0x38 832#define PACKET3_INDIRECT_BUFFER_MP 0x38
833#define PACKET3_MEM_SEMAPHORE 0x39 833#define PACKET3_MEM_SEMAPHORE 0x39
834# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
835# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
834#define PACKET3_MPEG_INDEX 0x3A 836#define PACKET3_MPEG_INDEX 0x3A
835#define PACKET3_WAIT_REG_MEM 0x3C 837#define PACKET3_WAIT_REG_MEM 0x3C
836#define PACKET3_MEM_WRITE 0x3D 838#define PACKET3_MEM_WRITE 0x3D
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8227e76b5c7..73e05cb85ec 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -107,6 +107,21 @@ extern int radeon_msi;
107#define RADEONFB_CONN_LIMIT 4 107#define RADEONFB_CONN_LIMIT 4
108#define RADEON_BIOS_NUM_SCRATCH 8 108#define RADEON_BIOS_NUM_SCRATCH 8
109 109
110/* max number of rings */
111#define RADEON_NUM_RINGS 3
112
113/* internal ring indices */
114/* r1xx+ has gfx CP ring */
115#define RADEON_RING_TYPE_GFX_INDEX 0
116
117/* cayman has 2 compute CP rings */
118#define CAYMAN_RING_TYPE_CP1_INDEX 1
119#define CAYMAN_RING_TYPE_CP2_INDEX 2
120
121/* hardcode those limit for now */
122#define RADEON_VA_RESERVED_SIZE (8 << 20)
123#define RADEON_IB_VM_MAX_SIZE (64 << 10)
124
110/* 125/*
111 * Errata workarounds. 126 * Errata workarounds.
112 */ 127 */
@@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
192 */ 207 */
193struct radeon_fence_driver { 208struct radeon_fence_driver {
194 uint32_t scratch_reg; 209 uint32_t scratch_reg;
210 uint64_t gpu_addr;
211 volatile uint32_t *cpu_addr;
195 atomic_t seq; 212 atomic_t seq;
196 uint32_t last_seq; 213 uint32_t last_seq;
197 unsigned long last_jiffies; 214 unsigned long last_jiffies;
198 unsigned long last_timeout; 215 unsigned long last_timeout;
199 wait_queue_head_t queue; 216 wait_queue_head_t queue;
200 rwlock_t lock;
201 struct list_head created; 217 struct list_head created;
202 struct list_head emited; 218 struct list_head emitted;
203 struct list_head signaled; 219 struct list_head signaled;
204 bool initialized; 220 bool initialized;
205}; 221};
@@ -210,21 +226,26 @@ struct radeon_fence {
210 struct list_head list; 226 struct list_head list;
211 /* protected by radeon_fence.lock */ 227 /* protected by radeon_fence.lock */
212 uint32_t seq; 228 uint32_t seq;
213 bool emited; 229 bool emitted;
214 bool signaled; 230 bool signaled;
231 /* RB, DMA, etc. */
232 int ring;
233 struct radeon_semaphore *semaphore;
215}; 234};
216 235
236int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
217int radeon_fence_driver_init(struct radeon_device *rdev); 237int radeon_fence_driver_init(struct radeon_device *rdev);
218void radeon_fence_driver_fini(struct radeon_device *rdev); 238void radeon_fence_driver_fini(struct radeon_device *rdev);
219int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); 239int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
220int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); 240int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
221void radeon_fence_process(struct radeon_device *rdev); 241void radeon_fence_process(struct radeon_device *rdev, int ring);
222bool radeon_fence_signaled(struct radeon_fence *fence); 242bool radeon_fence_signaled(struct radeon_fence *fence);
223int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 243int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
224int radeon_fence_wait_next(struct radeon_device *rdev); 244int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
225int radeon_fence_wait_last(struct radeon_device *rdev); 245int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
226struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 246struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
227void radeon_fence_unref(struct radeon_fence **fence); 247void radeon_fence_unref(struct radeon_fence **fence);
248int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
228 249
229/* 250/*
230 * Tiling registers 251 * Tiling registers
@@ -246,6 +267,21 @@ struct radeon_mman {
246 bool initialized; 267 bool initialized;
247}; 268};
248 269
270/* bo virtual address in a specific vm */
271struct radeon_bo_va {
272 /* bo list is protected by bo being reserved */
273 struct list_head bo_list;
274 /* vm list is protected by vm mutex */
275 struct list_head vm_list;
276 /* constant after initialization */
277 struct radeon_vm *vm;
278 struct radeon_bo *bo;
279 uint64_t soffset;
280 uint64_t eoffset;
281 uint32_t flags;
282 bool valid;
283};
284
249struct radeon_bo { 285struct radeon_bo {
250 /* Protected by gem.mutex */ 286 /* Protected by gem.mutex */
251 struct list_head list; 287 struct list_head list;
@@ -259,6 +295,10 @@ struct radeon_bo {
259 u32 tiling_flags; 295 u32 tiling_flags;
260 u32 pitch; 296 u32 pitch;
261 int surface_reg; 297 int surface_reg;
298 /* list of all virtual address to which this bo
299 * is associated to
300 */
301 struct list_head va;
262 /* Constant after initialization */ 302 /* Constant after initialization */
263 struct radeon_device *rdev; 303 struct radeon_device *rdev;
264 struct drm_gem_object gem_base; 304 struct drm_gem_object gem_base;
@@ -274,6 +314,48 @@ struct radeon_bo_list {
274 u32 tiling_flags; 314 u32 tiling_flags;
275}; 315};
276 316
317/* sub-allocation manager, it has to be protected by another lock.
318 * By conception this is an helper for other part of the driver
319 * like the indirect buffer or semaphore, which both have their
320 * locking.
321 *
322 * Principe is simple, we keep a list of sub allocation in offset
323 * order (first entry has offset == 0, last entry has the highest
324 * offset).
325 *
326 * When allocating new object we first check if there is room at
327 * the end total_size - (last_object_offset + last_object_size) >=
328 * alloc_size. If so we allocate new object there.
329 *
330 * When there is not enough room at the end, we start waiting for
331 * each sub object until we reach object_offset+object_size >=
332 * alloc_size, this object then become the sub object we return.
333 *
334 * Alignment can't be bigger than page size.
335 *
336 * Hole are not considered for allocation to keep things simple.
337 * Assumption is that there won't be hole (all object on same
338 * alignment).
339 */
340struct radeon_sa_manager {
341 struct radeon_bo *bo;
342 struct list_head sa_bo;
343 unsigned size;
344 uint64_t gpu_addr;
345 void *cpu_ptr;
346 uint32_t domain;
347};
348
349struct radeon_sa_bo;
350
351/* sub-allocation buffer */
352struct radeon_sa_bo {
353 struct list_head list;
354 struct radeon_sa_manager *manager;
355 unsigned offset;
356 unsigned size;
357};
358
277/* 359/*
278 * GEM objects. 360 * GEM objects.
279 */ 361 */
@@ -303,6 +385,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
303 uint32_t handle); 385 uint32_t handle);
304 386
305/* 387/*
388 * Semaphores.
389 */
390struct radeon_ring;
391
392#define RADEON_SEMAPHORE_BO_SIZE 256
393
394struct radeon_semaphore_driver {
395 rwlock_t lock;
396 struct list_head bo;
397};
398
399struct radeon_semaphore_bo;
400
401/* everything here is constant */
402struct radeon_semaphore {
403 struct list_head list;
404 uint64_t gpu_addr;
405 uint32_t *cpu_ptr;
406 struct radeon_semaphore_bo *bo;
407};
408
409struct radeon_semaphore_bo {
410 struct list_head list;
411 struct radeon_ib *ib;
412 struct list_head free;
413 struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
414 unsigned nused;
415};
416
417void radeon_semaphore_driver_fini(struct radeon_device *rdev);
418int radeon_semaphore_create(struct radeon_device *rdev,
419 struct radeon_semaphore **semaphore);
420void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
421 struct radeon_semaphore *semaphore);
422void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
423 struct radeon_semaphore *semaphore);
424void radeon_semaphore_free(struct radeon_device *rdev,
425 struct radeon_semaphore *semaphore);
426
427/*
306 * GART structures, functions & helpers 428 * GART structures, functions & helpers
307 */ 429 */
308struct radeon_mc; 430struct radeon_mc;
@@ -310,6 +432,7 @@ struct radeon_mc;
310#define RADEON_GPU_PAGE_SIZE 4096 432#define RADEON_GPU_PAGE_SIZE 4096
311#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 433#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
312#define RADEON_GPU_PAGE_SHIFT 12 434#define RADEON_GPU_PAGE_SHIFT 12
435#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
313 436
314struct radeon_gart { 437struct radeon_gart {
315 dma_addr_t table_addr; 438 dma_addr_t table_addr;
@@ -320,7 +443,6 @@ struct radeon_gart {
320 unsigned table_size; 443 unsigned table_size;
321 struct page **pages; 444 struct page **pages;
322 dma_addr_t *pages_addr; 445 dma_addr_t *pages_addr;
323 bool *ttm_alloced;
324 bool ready; 446 bool ready;
325}; 447};
326 448
@@ -434,7 +556,7 @@ union radeon_irq_stat_regs {
434 556
435struct radeon_irq { 557struct radeon_irq {
436 bool installed; 558 bool installed;
437 bool sw_int; 559 bool sw_int[RADEON_NUM_RINGS];
438 bool crtc_vblank_int[RADEON_MAX_CRTCS]; 560 bool crtc_vblank_int[RADEON_MAX_CRTCS];
439 bool pflip[RADEON_MAX_CRTCS]; 561 bool pflip[RADEON_MAX_CRTCS];
440 wait_queue_head_t vblank_queue; 562 wait_queue_head_t vblank_queue;
@@ -444,7 +566,7 @@ struct radeon_irq {
444 wait_queue_head_t idle_queue; 566 wait_queue_head_t idle_queue;
445 bool hdmi[RADEON_MAX_HDMI_BLOCKS]; 567 bool hdmi[RADEON_MAX_HDMI_BLOCKS];
446 spinlock_t sw_lock; 568 spinlock_t sw_lock;
447 int sw_refcount; 569 int sw_refcount[RADEON_NUM_RINGS];
448 union radeon_irq_stat_regs stat_regs; 570 union radeon_irq_stat_regs stat_regs;
449 spinlock_t pflip_lock[RADEON_MAX_CRTCS]; 571 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
450 int pflip_refcount[RADEON_MAX_CRTCS]; 572 int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +574,23 @@ struct radeon_irq {
452 574
453int radeon_irq_kms_init(struct radeon_device *rdev); 575int radeon_irq_kms_init(struct radeon_device *rdev);
454void radeon_irq_kms_fini(struct radeon_device *rdev); 576void radeon_irq_kms_fini(struct radeon_device *rdev);
455void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); 577void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
456void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); 578void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
457void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 579void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
458void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 580void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
459 581
460/* 582/*
461 * CP & ring. 583 * CP & rings.
462 */ 584 */
585
463struct radeon_ib { 586struct radeon_ib {
464 struct list_head list; 587 struct radeon_sa_bo sa_bo;
465 unsigned idx; 588 unsigned idx;
589 uint32_t length_dw;
466 uint64_t gpu_addr; 590 uint64_t gpu_addr;
467 struct radeon_fence *fence;
468 uint32_t *ptr; 591 uint32_t *ptr;
469 uint32_t length_dw; 592 struct radeon_fence *fence;
470 bool free; 593 unsigned vm_id;
471}; 594};
472 595
473/* 596/*
@@ -475,20 +598,22 @@ struct radeon_ib {
475 * mutex protects scheduled_ibs, ready, alloc_bm 598 * mutex protects scheduled_ibs, ready, alloc_bm
476 */ 599 */
477struct radeon_ib_pool { 600struct radeon_ib_pool {
478 struct mutex mutex; 601 struct mutex mutex;
479 struct radeon_bo *robj; 602 struct radeon_sa_manager sa_manager;
480 struct list_head bogus_ib; 603 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
481 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 604 bool ready;
482 bool ready; 605 unsigned head_id;
483 unsigned head_id;
484}; 606};
485 607
486struct radeon_cp { 608struct radeon_ring {
487 struct radeon_bo *ring_obj; 609 struct radeon_bo *ring_obj;
488 volatile uint32_t *ring; 610 volatile uint32_t *ring;
489 unsigned rptr; 611 unsigned rptr;
612 unsigned rptr_offs;
613 unsigned rptr_reg;
490 unsigned wptr; 614 unsigned wptr;
491 unsigned wptr_old; 615 unsigned wptr_old;
616 unsigned wptr_reg;
492 unsigned ring_size; 617 unsigned ring_size;
493 unsigned ring_free_dw; 618 unsigned ring_free_dw;
494 int count_dw; 619 int count_dw;
@@ -497,6 +622,61 @@ struct radeon_cp {
497 uint32_t ptr_mask; 622 uint32_t ptr_mask;
498 struct mutex mutex; 623 struct mutex mutex;
499 bool ready; 624 bool ready;
625 u32 ptr_reg_shift;
626 u32 ptr_reg_mask;
627 u32 nop;
628};
629
630/*
631 * VM
632 */
633struct radeon_vm {
634 struct list_head list;
635 struct list_head va;
636 int id;
637 unsigned last_pfn;
638 u64 pt_gpu_addr;
639 u64 *pt;
640 struct radeon_sa_bo sa_bo;
641 struct mutex mutex;
642 /* last fence for cs using this vm */
643 struct radeon_fence *fence;
644};
645
646struct radeon_vm_funcs {
647 int (*init)(struct radeon_device *rdev);
648 void (*fini)(struct radeon_device *rdev);
649 /* cs mutex must be lock for schedule_ib */
650 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
651 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
652 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
653 uint32_t (*page_flags)(struct radeon_device *rdev,
654 struct radeon_vm *vm,
655 uint32_t flags);
656 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
657 unsigned pfn, uint64_t addr, uint32_t flags);
658};
659
660struct radeon_vm_manager {
661 struct list_head lru_vm;
662 uint32_t use_bitmap;
663 struct radeon_sa_manager sa_manager;
664 uint32_t max_pfn;
665 /* fields constant after init */
666 const struct radeon_vm_funcs *funcs;
667 /* number of VMIDs */
668 unsigned nvm;
669 /* vram base address for page table entry */
670 u64 vram_base_offset;
671 /* is vm enabled? */
672 bool enabled;
673};
674
675/*
676 * file private structure
677 */
678struct radeon_fpriv {
679 struct radeon_vm vm;
500}; 680};
501 681
502/* 682/*
@@ -506,6 +686,7 @@ struct r600_ih {
506 struct radeon_bo *ring_obj; 686 struct radeon_bo *ring_obj;
507 volatile uint32_t *ring; 687 volatile uint32_t *ring;
508 unsigned rptr; 688 unsigned rptr;
689 unsigned rptr_offs;
509 unsigned wptr; 690 unsigned wptr;
510 unsigned wptr_old; 691 unsigned wptr_old;
511 unsigned ring_size; 692 unsigned ring_size;
@@ -549,23 +730,29 @@ struct r600_blit {
549 730
550void r600_blit_suspend(struct radeon_device *rdev); 731void r600_blit_suspend(struct radeon_device *rdev);
551 732
552int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); 733int radeon_ib_get(struct radeon_device *rdev, int ring,
734 struct radeon_ib **ib, unsigned size);
553void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 735void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
736bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
554int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 737int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
555int radeon_ib_pool_init(struct radeon_device *rdev); 738int radeon_ib_pool_init(struct radeon_device *rdev);
556void radeon_ib_pool_fini(struct radeon_device *rdev); 739void radeon_ib_pool_fini(struct radeon_device *rdev);
740int radeon_ib_pool_start(struct radeon_device *rdev);
741int radeon_ib_pool_suspend(struct radeon_device *rdev);
557int radeon_ib_test(struct radeon_device *rdev); 742int radeon_ib_test(struct radeon_device *rdev);
558extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
559/* Ring access between begin & end cannot sleep */ 743/* Ring access between begin & end cannot sleep */
560void radeon_ring_free_size(struct radeon_device *rdev); 744int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
561int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); 745void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
562int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 746int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
563void radeon_ring_commit(struct radeon_device *rdev); 747int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
564void radeon_ring_unlock_commit(struct radeon_device *rdev); 748void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
565void radeon_ring_unlock_undo(struct radeon_device *rdev); 749void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
566int radeon_ring_test(struct radeon_device *rdev); 750void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
567int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); 751int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
568void radeon_ring_fini(struct radeon_device *rdev); 752int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
753 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
754 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
755void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
569 756
570 757
571/* 758/*
@@ -582,12 +769,12 @@ struct radeon_cs_reloc {
582struct radeon_cs_chunk { 769struct radeon_cs_chunk {
583 uint32_t chunk_id; 770 uint32_t chunk_id;
584 uint32_t length_dw; 771 uint32_t length_dw;
585 int kpage_idx[2]; 772 int kpage_idx[2];
586 uint32_t *kpage[2]; 773 uint32_t *kpage[2];
587 uint32_t *kdata; 774 uint32_t *kdata;
588 void __user *user_ptr; 775 void __user *user_ptr;
589 int last_copied_page; 776 int last_copied_page;
590 int last_page_index; 777 int last_page_index;
591}; 778};
592 779
593struct radeon_cs_parser { 780struct radeon_cs_parser {
@@ -605,14 +792,18 @@ struct radeon_cs_parser {
605 struct radeon_cs_reloc *relocs; 792 struct radeon_cs_reloc *relocs;
606 struct radeon_cs_reloc **relocs_ptr; 793 struct radeon_cs_reloc **relocs_ptr;
607 struct list_head validated; 794 struct list_head validated;
795 bool sync_to_ring[RADEON_NUM_RINGS];
608 /* indices of various chunks */ 796 /* indices of various chunks */
609 int chunk_ib_idx; 797 int chunk_ib_idx;
610 int chunk_relocs_idx; 798 int chunk_relocs_idx;
799 int chunk_flags_idx;
611 struct radeon_ib *ib; 800 struct radeon_ib *ib;
612 void *track; 801 void *track;
613 unsigned family; 802 unsigned family;
614 int parser_error; 803 int parser_error;
615 bool keep_tiling_flags; 804 u32 cs_flags;
805 u32 ring;
806 s32 priority;
616}; 807};
617 808
618extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 809extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
869 * Testing 1060 * Testing
870 */ 1061 */
871void radeon_test_moves(struct radeon_device *rdev); 1062void radeon_test_moves(struct radeon_device *rdev);
1063void radeon_test_ring_sync(struct radeon_device *rdev,
1064 struct radeon_ring *cpA,
1065 struct radeon_ring *cpB);
1066void radeon_test_syncing(struct radeon_device *rdev);
872 1067
873 1068
874/* 1069/*
875 * Debugfs 1070 * Debugfs
876 */ 1071 */
1072struct radeon_debugfs {
1073 struct drm_info_list *files;
1074 unsigned num_files;
1075};
1076
877int radeon_debugfs_add_files(struct radeon_device *rdev, 1077int radeon_debugfs_add_files(struct radeon_device *rdev,
878 struct drm_info_list *files, 1078 struct drm_info_list *files,
879 unsigned nfiles); 1079 unsigned nfiles);
@@ -889,21 +1089,27 @@ struct radeon_asic {
889 int (*resume)(struct radeon_device *rdev); 1089 int (*resume)(struct radeon_device *rdev);
890 int (*suspend)(struct radeon_device *rdev); 1090 int (*suspend)(struct radeon_device *rdev);
891 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1091 void (*vga_set_state)(struct radeon_device *rdev, bool state);
892 bool (*gpu_is_lockup)(struct radeon_device *rdev); 1092 bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
893 int (*asic_reset)(struct radeon_device *rdev); 1093 int (*asic_reset)(struct radeon_device *rdev);
894 void (*gart_tlb_flush)(struct radeon_device *rdev); 1094 void (*gart_tlb_flush)(struct radeon_device *rdev);
895 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1095 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
896 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 1096 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
897 void (*cp_fini)(struct radeon_device *rdev); 1097 void (*cp_fini)(struct radeon_device *rdev);
898 void (*cp_disable)(struct radeon_device *rdev); 1098 void (*cp_disable)(struct radeon_device *rdev);
899 void (*cp_commit)(struct radeon_device *rdev);
900 void (*ring_start)(struct radeon_device *rdev); 1099 void (*ring_start)(struct radeon_device *rdev);
901 int (*ring_test)(struct radeon_device *rdev); 1100
902 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1101 struct {
1102 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1103 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1104 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1105 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1106 struct radeon_semaphore *semaphore, bool emit_wait);
1107 } ring[RADEON_NUM_RINGS];
1108
1109 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
903 int (*irq_set)(struct radeon_device *rdev); 1110 int (*irq_set)(struct radeon_device *rdev);
904 int (*irq_process)(struct radeon_device *rdev); 1111 int (*irq_process)(struct radeon_device *rdev);
905 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1112 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
906 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
907 int (*cs_parse)(struct radeon_cs_parser *p); 1113 int (*cs_parse)(struct radeon_cs_parser *p);
908 int (*copy_blit)(struct radeon_device *rdev, 1114 int (*copy_blit)(struct radeon_device *rdev,
909 uint64_t src_offset, 1115 uint64_t src_offset,
@@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1132 struct drm_file *filp); 1338 struct drm_file *filp);
1133int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1339int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1134 struct drm_file *filp); 1340 struct drm_file *filp);
1341int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1342 struct drm_file *filp);
1135int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1343int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1136int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 1344int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1137 struct drm_file *filp); 1345 struct drm_file *filp);
@@ -1231,11 +1439,10 @@ struct radeon_device {
1231 struct radeon_mode_info mode_info; 1439 struct radeon_mode_info mode_info;
1232 struct radeon_scratch scratch; 1440 struct radeon_scratch scratch;
1233 struct radeon_mman mman; 1441 struct radeon_mman mman;
1234 struct radeon_fence_driver fence_drv; 1442 rwlock_t fence_lock;
1235 struct radeon_cp cp; 1443 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
1236 /* cayman compute rings */ 1444 struct radeon_semaphore_driver semaphore_drv;
1237 struct radeon_cp cp1; 1445 struct radeon_ring ring[RADEON_NUM_RINGS];
1238 struct radeon_cp cp2;
1239 struct radeon_ib_pool ib_pool; 1446 struct radeon_ib_pool ib_pool;
1240 struct radeon_irq irq; 1447 struct radeon_irq irq;
1241 struct radeon_asic *asic; 1448 struct radeon_asic *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
1279 struct drm_file *cmask_filp; 1486 struct drm_file *cmask_filp;
1280 /* i2c buses */ 1487 /* i2c buses */
1281 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 1488 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
1489 /* debugfs */
1490 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1491 unsigned debugfs_count;
1492 /* virtual memory */
1493 struct radeon_vm_manager vm_manager;
1494 /* ring used for bo copies */
1495 u32 copy_ring;
1282}; 1496};
1283 1497
1284int radeon_device_init(struct radeon_device *rdev, 1498int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
1414/* 1628/*
1415 * RING helpers. 1629 * RING helpers.
1416 */ 1630 */
1417
1418#if DRM_DEBUG_CODE == 0 1631#if DRM_DEBUG_CODE == 0
1419static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 1632static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
1420{ 1633{
1421 rdev->cp.ring[rdev->cp.wptr++] = v; 1634 ring->ring[ring->wptr++] = v;
1422 rdev->cp.wptr &= rdev->cp.ptr_mask; 1635 ring->wptr &= ring->ptr_mask;
1423 rdev->cp.count_dw--; 1636 ring->count_dw--;
1424 rdev->cp.ring_free_dw--; 1637 ring->ring_free_dw--;
1425} 1638}
1426#else 1639#else
1427/* With debugging this is just too big to inline */ 1640/* With debugging this is just too big to inline */
1428void radeon_ring_write(struct radeon_device *rdev, uint32_t v); 1641void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1429#endif 1642#endif
1430 1643
1431/* 1644/*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
1437#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1650#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1438#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 1651#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
1439#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1652#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1440#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) 1653#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
1441#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1654#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1442#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 1655#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
1443#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 1656#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
1444#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
1445#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 1657#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
1446#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) 1658#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
1447#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) 1659#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1660#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1448#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 1661#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
1449#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 1662#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
1450#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 1663#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
1451#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) 1664#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1665#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1452#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) 1666#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
1453#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) 1667#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
1454#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) 1668#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1503extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1717extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1504 1718
1505/* 1719/*
1720 * vm
1721 */
1722int radeon_vm_manager_init(struct radeon_device *rdev);
1723void radeon_vm_manager_fini(struct radeon_device *rdev);
1724int radeon_vm_manager_start(struct radeon_device *rdev);
1725int radeon_vm_manager_suspend(struct radeon_device *rdev);
1726int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1727void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1728int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
1729void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1730int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1731 struct radeon_vm *vm,
1732 struct radeon_bo *bo,
1733 struct ttm_mem_reg *mem);
1734void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1735 struct radeon_bo *bo);
1736int radeon_vm_bo_add(struct radeon_device *rdev,
1737 struct radeon_vm *vm,
1738 struct radeon_bo *bo,
1739 uint64_t offset,
1740 uint32_t flags);
1741int radeon_vm_bo_rmv(struct radeon_device *rdev,
1742 struct radeon_vm *vm,
1743 struct radeon_bo *bo);
1744
1745
1746/*
1506 * R600 vram scratch functions 1747 * R600 vram scratch functions
1507 */ 1748 */
1508int r600_vram_scratch_init(struct radeon_device *rdev); 1749int r600_vram_scratch_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2e1eae114e..36a6192ce86 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
138 .asic_reset = &r100_asic_reset, 138 .asic_reset = &r100_asic_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page, 140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start, 141 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test, 142 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute, 143 .ring = {
144 [RADEON_RING_TYPE_GFX_INDEX] = {
145 .ib_execute = &r100_ring_ib_execute,
146 .emit_fence = &r100_fence_ring_emit,
147 .emit_semaphore = &r100_semaphore_ring_emit,
148 }
149 },
145 .irq_set = &r100_irq_set, 150 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process, 151 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter, 152 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse, 153 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit, 154 .copy_blit = &r100_copy_blit,
151 .copy_dma = NULL, 155 .copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
186 .asic_reset = &r100_asic_reset, 190 .asic_reset = &r100_asic_reset,
187 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 191 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
188 .gart_set_page = &r100_pci_gart_set_page, 192 .gart_set_page = &r100_pci_gart_set_page,
189 .cp_commit = &r100_cp_commit,
190 .ring_start = &r100_ring_start, 193 .ring_start = &r100_ring_start,
191 .ring_test = &r100_ring_test, 194 .ring_test = &r100_ring_test,
192 .ring_ib_execute = &r100_ring_ib_execute, 195 .ring = {
196 [RADEON_RING_TYPE_GFX_INDEX] = {
197 .ib_execute = &r100_ring_ib_execute,
198 .emit_fence = &r100_fence_ring_emit,
199 .emit_semaphore = &r100_semaphore_ring_emit,
200 }
201 },
193 .irq_set = &r100_irq_set, 202 .irq_set = &r100_irq_set,
194 .irq_process = &r100_irq_process, 203 .irq_process = &r100_irq_process,
195 .get_vblank_counter = &r100_get_vblank_counter, 204 .get_vblank_counter = &r100_get_vblank_counter,
196 .fence_ring_emit = &r100_fence_ring_emit,
197 .cs_parse = &r100_cs_parse, 205 .cs_parse = &r100_cs_parse,
198 .copy_blit = &r100_copy_blit, 206 .copy_blit = &r100_copy_blit,
199 .copy_dma = &r200_copy_dma, 207 .copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
233 .asic_reset = &r300_asic_reset, 241 .asic_reset = &r300_asic_reset,
234 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 242 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
235 .gart_set_page = &r100_pci_gart_set_page, 243 .gart_set_page = &r100_pci_gart_set_page,
236 .cp_commit = &r100_cp_commit,
237 .ring_start = &r300_ring_start, 244 .ring_start = &r300_ring_start,
238 .ring_test = &r100_ring_test, 245 .ring_test = &r100_ring_test,
239 .ring_ib_execute = &r100_ring_ib_execute, 246 .ring = {
247 [RADEON_RING_TYPE_GFX_INDEX] = {
248 .ib_execute = &r100_ring_ib_execute,
249 .emit_fence = &r300_fence_ring_emit,
250 .emit_semaphore = &r100_semaphore_ring_emit,
251 }
252 },
240 .irq_set = &r100_irq_set, 253 .irq_set = &r100_irq_set,
241 .irq_process = &r100_irq_process, 254 .irq_process = &r100_irq_process,
242 .get_vblank_counter = &r100_get_vblank_counter, 255 .get_vblank_counter = &r100_get_vblank_counter,
243 .fence_ring_emit = &r300_fence_ring_emit,
244 .cs_parse = &r300_cs_parse, 256 .cs_parse = &r300_cs_parse,
245 .copy_blit = &r100_copy_blit, 257 .copy_blit = &r100_copy_blit,
246 .copy_dma = &r200_copy_dma, 258 .copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
281 .asic_reset = &r300_asic_reset, 293 .asic_reset = &r300_asic_reset,
282 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 294 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
283 .gart_set_page = &rv370_pcie_gart_set_page, 295 .gart_set_page = &rv370_pcie_gart_set_page,
284 .cp_commit = &r100_cp_commit,
285 .ring_start = &r300_ring_start, 296 .ring_start = &r300_ring_start,
286 .ring_test = &r100_ring_test, 297 .ring_test = &r100_ring_test,
287 .ring_ib_execute = &r100_ring_ib_execute, 298 .ring = {
299 [RADEON_RING_TYPE_GFX_INDEX] = {
300 .ib_execute = &r100_ring_ib_execute,
301 .emit_fence = &r300_fence_ring_emit,
302 .emit_semaphore = &r100_semaphore_ring_emit,
303 }
304 },
288 .irq_set = &r100_irq_set, 305 .irq_set = &r100_irq_set,
289 .irq_process = &r100_irq_process, 306 .irq_process = &r100_irq_process,
290 .get_vblank_counter = &r100_get_vblank_counter, 307 .get_vblank_counter = &r100_get_vblank_counter,
291 .fence_ring_emit = &r300_fence_ring_emit,
292 .cs_parse = &r300_cs_parse, 308 .cs_parse = &r300_cs_parse,
293 .copy_blit = &r100_copy_blit, 309 .copy_blit = &r100_copy_blit,
294 .copy_dma = &r200_copy_dma, 310 .copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
328 .asic_reset = &r300_asic_reset, 344 .asic_reset = &r300_asic_reset,
329 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 345 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
330 .gart_set_page = &rv370_pcie_gart_set_page, 346 .gart_set_page = &rv370_pcie_gart_set_page,
331 .cp_commit = &r100_cp_commit,
332 .ring_start = &r300_ring_start, 347 .ring_start = &r300_ring_start,
333 .ring_test = &r100_ring_test, 348 .ring_test = &r100_ring_test,
334 .ring_ib_execute = &r100_ring_ib_execute, 349 .ring = {
350 [RADEON_RING_TYPE_GFX_INDEX] = {
351 .ib_execute = &r100_ring_ib_execute,
352 .emit_fence = &r300_fence_ring_emit,
353 .emit_semaphore = &r100_semaphore_ring_emit,
354 }
355 },
335 .irq_set = &r100_irq_set, 356 .irq_set = &r100_irq_set,
336 .irq_process = &r100_irq_process, 357 .irq_process = &r100_irq_process,
337 .get_vblank_counter = &r100_get_vblank_counter, 358 .get_vblank_counter = &r100_get_vblank_counter,
338 .fence_ring_emit = &r300_fence_ring_emit,
339 .cs_parse = &r300_cs_parse, 359 .cs_parse = &r300_cs_parse,
340 .copy_blit = &r100_copy_blit, 360 .copy_blit = &r100_copy_blit,
341 .copy_dma = &r200_copy_dma, 361 .copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
376 .asic_reset = &r300_asic_reset, 396 .asic_reset = &r300_asic_reset,
377 .gart_tlb_flush = &rs400_gart_tlb_flush, 397 .gart_tlb_flush = &rs400_gart_tlb_flush,
378 .gart_set_page = &rs400_gart_set_page, 398 .gart_set_page = &rs400_gart_set_page,
379 .cp_commit = &r100_cp_commit,
380 .ring_start = &r300_ring_start, 399 .ring_start = &r300_ring_start,
381 .ring_test = &r100_ring_test, 400 .ring_test = &r100_ring_test,
382 .ring_ib_execute = &r100_ring_ib_execute, 401 .ring = {
402 [RADEON_RING_TYPE_GFX_INDEX] = {
403 .ib_execute = &r100_ring_ib_execute,
404 .emit_fence = &r300_fence_ring_emit,
405 .emit_semaphore = &r100_semaphore_ring_emit,
406 }
407 },
383 .irq_set = &r100_irq_set, 408 .irq_set = &r100_irq_set,
384 .irq_process = &r100_irq_process, 409 .irq_process = &r100_irq_process,
385 .get_vblank_counter = &r100_get_vblank_counter, 410 .get_vblank_counter = &r100_get_vblank_counter,
386 .fence_ring_emit = &r300_fence_ring_emit,
387 .cs_parse = &r300_cs_parse, 411 .cs_parse = &r300_cs_parse,
388 .copy_blit = &r100_copy_blit, 412 .copy_blit = &r100_copy_blit,
389 .copy_dma = &r200_copy_dma, 413 .copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
424 .asic_reset = &rs600_asic_reset, 448 .asic_reset = &rs600_asic_reset,
425 .gart_tlb_flush = &rs600_gart_tlb_flush, 449 .gart_tlb_flush = &rs600_gart_tlb_flush,
426 .gart_set_page = &rs600_gart_set_page, 450 .gart_set_page = &rs600_gart_set_page,
427 .cp_commit = &r100_cp_commit,
428 .ring_start = &r300_ring_start, 451 .ring_start = &r300_ring_start,
429 .ring_test = &r100_ring_test, 452 .ring_test = &r100_ring_test,
430 .ring_ib_execute = &r100_ring_ib_execute, 453 .ring = {
454 [RADEON_RING_TYPE_GFX_INDEX] = {
455 .ib_execute = &r100_ring_ib_execute,
456 .emit_fence = &r300_fence_ring_emit,
457 .emit_semaphore = &r100_semaphore_ring_emit,
458 }
459 },
431 .irq_set = &rs600_irq_set, 460 .irq_set = &rs600_irq_set,
432 .irq_process = &rs600_irq_process, 461 .irq_process = &rs600_irq_process,
433 .get_vblank_counter = &rs600_get_vblank_counter, 462 .get_vblank_counter = &rs600_get_vblank_counter,
434 .fence_ring_emit = &r300_fence_ring_emit,
435 .cs_parse = &r300_cs_parse, 463 .cs_parse = &r300_cs_parse,
436 .copy_blit = &r100_copy_blit, 464 .copy_blit = &r100_copy_blit,
437 .copy_dma = &r200_copy_dma, 465 .copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
472 .asic_reset = &rs600_asic_reset, 500 .asic_reset = &rs600_asic_reset,
473 .gart_tlb_flush = &rs400_gart_tlb_flush, 501 .gart_tlb_flush = &rs400_gart_tlb_flush,
474 .gart_set_page = &rs400_gart_set_page, 502 .gart_set_page = &rs400_gart_set_page,
475 .cp_commit = &r100_cp_commit,
476 .ring_start = &r300_ring_start, 503 .ring_start = &r300_ring_start,
477 .ring_test = &r100_ring_test, 504 .ring_test = &r100_ring_test,
478 .ring_ib_execute = &r100_ring_ib_execute, 505 .ring = {
506 [RADEON_RING_TYPE_GFX_INDEX] = {
507 .ib_execute = &r100_ring_ib_execute,
508 .emit_fence = &r300_fence_ring_emit,
509 .emit_semaphore = &r100_semaphore_ring_emit,
510 }
511 },
479 .irq_set = &rs600_irq_set, 512 .irq_set = &rs600_irq_set,
480 .irq_process = &rs600_irq_process, 513 .irq_process = &rs600_irq_process,
481 .get_vblank_counter = &rs600_get_vblank_counter, 514 .get_vblank_counter = &rs600_get_vblank_counter,
482 .fence_ring_emit = &r300_fence_ring_emit,
483 .cs_parse = &r300_cs_parse, 515 .cs_parse = &r300_cs_parse,
484 .copy_blit = &r100_copy_blit, 516 .copy_blit = &r100_copy_blit,
485 .copy_dma = &r200_copy_dma, 517 .copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
520 .asic_reset = &rs600_asic_reset, 552 .asic_reset = &rs600_asic_reset,
521 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 553 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
522 .gart_set_page = &rv370_pcie_gart_set_page, 554 .gart_set_page = &rv370_pcie_gart_set_page,
523 .cp_commit = &r100_cp_commit,
524 .ring_start = &rv515_ring_start, 555 .ring_start = &rv515_ring_start,
525 .ring_test = &r100_ring_test, 556 .ring_test = &r100_ring_test,
526 .ring_ib_execute = &r100_ring_ib_execute, 557 .ring = {
558 [RADEON_RING_TYPE_GFX_INDEX] = {
559 .ib_execute = &r100_ring_ib_execute,
560 .emit_fence = &r300_fence_ring_emit,
561 .emit_semaphore = &r100_semaphore_ring_emit,
562 }
563 },
527 .irq_set = &rs600_irq_set, 564 .irq_set = &rs600_irq_set,
528 .irq_process = &rs600_irq_process, 565 .irq_process = &rs600_irq_process,
529 .get_vblank_counter = &rs600_get_vblank_counter, 566 .get_vblank_counter = &rs600_get_vblank_counter,
530 .fence_ring_emit = &r300_fence_ring_emit,
531 .cs_parse = &r300_cs_parse, 567 .cs_parse = &r300_cs_parse,
532 .copy_blit = &r100_copy_blit, 568 .copy_blit = &r100_copy_blit,
533 .copy_dma = &r200_copy_dma, 569 .copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
568 .asic_reset = &rs600_asic_reset, 604 .asic_reset = &rs600_asic_reset,
569 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 605 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
570 .gart_set_page = &rv370_pcie_gart_set_page, 606 .gart_set_page = &rv370_pcie_gart_set_page,
571 .cp_commit = &r100_cp_commit,
572 .ring_start = &rv515_ring_start, 607 .ring_start = &rv515_ring_start,
573 .ring_test = &r100_ring_test, 608 .ring_test = &r100_ring_test,
574 .ring_ib_execute = &r100_ring_ib_execute, 609 .ring = {
610 [RADEON_RING_TYPE_GFX_INDEX] = {
611 .ib_execute = &r100_ring_ib_execute,
612 .emit_fence = &r300_fence_ring_emit,
613 .emit_semaphore = &r100_semaphore_ring_emit,
614 }
615 },
575 .irq_set = &rs600_irq_set, 616 .irq_set = &rs600_irq_set,
576 .irq_process = &rs600_irq_process, 617 .irq_process = &rs600_irq_process,
577 .get_vblank_counter = &rs600_get_vblank_counter, 618 .get_vblank_counter = &rs600_get_vblank_counter,
578 .fence_ring_emit = &r300_fence_ring_emit,
579 .cs_parse = &r300_cs_parse, 619 .cs_parse = &r300_cs_parse,
580 .copy_blit = &r100_copy_blit, 620 .copy_blit = &r100_copy_blit,
581 .copy_dma = &r200_copy_dma, 621 .copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
611 .fini = &r600_fini, 651 .fini = &r600_fini,
612 .suspend = &r600_suspend, 652 .suspend = &r600_suspend,
613 .resume = &r600_resume, 653 .resume = &r600_resume,
614 .cp_commit = &r600_cp_commit,
615 .vga_set_state = &r600_vga_set_state, 654 .vga_set_state = &r600_vga_set_state,
616 .gpu_is_lockup = &r600_gpu_is_lockup, 655 .gpu_is_lockup = &r600_gpu_is_lockup,
617 .asic_reset = &r600_asic_reset, 656 .asic_reset = &r600_asic_reset,
618 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 657 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
619 .gart_set_page = &rs600_gart_set_page, 658 .gart_set_page = &rs600_gart_set_page,
620 .ring_test = &r600_ring_test, 659 .ring_test = &r600_ring_test,
621 .ring_ib_execute = &r600_ring_ib_execute, 660 .ring = {
661 [RADEON_RING_TYPE_GFX_INDEX] = {
662 .ib_execute = &r600_ring_ib_execute,
663 .emit_fence = &r600_fence_ring_emit,
664 .emit_semaphore = &r600_semaphore_ring_emit,
665 }
666 },
622 .irq_set = &r600_irq_set, 667 .irq_set = &r600_irq_set,
623 .irq_process = &r600_irq_process, 668 .irq_process = &r600_irq_process,
624 .get_vblank_counter = &rs600_get_vblank_counter, 669 .get_vblank_counter = &rs600_get_vblank_counter,
625 .fence_ring_emit = &r600_fence_ring_emit,
626 .cs_parse = &r600_cs_parse, 670 .cs_parse = &r600_cs_parse,
627 .copy_blit = &r600_copy_blit, 671 .copy_blit = &r600_copy_blit,
628 .copy_dma = NULL, 672 .copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
658 .fini = &r600_fini, 702 .fini = &r600_fini,
659 .suspend = &r600_suspend, 703 .suspend = &r600_suspend,
660 .resume = &r600_resume, 704 .resume = &r600_resume,
661 .cp_commit = &r600_cp_commit,
662 .gpu_is_lockup = &r600_gpu_is_lockup, 705 .gpu_is_lockup = &r600_gpu_is_lockup,
663 .vga_set_state = &r600_vga_set_state, 706 .vga_set_state = &r600_vga_set_state,
664 .asic_reset = &r600_asic_reset, 707 .asic_reset = &r600_asic_reset,
665 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 708 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
666 .gart_set_page = &rs600_gart_set_page, 709 .gart_set_page = &rs600_gart_set_page,
667 .ring_test = &r600_ring_test, 710 .ring_test = &r600_ring_test,
668 .ring_ib_execute = &r600_ring_ib_execute, 711 .ring = {
712 [RADEON_RING_TYPE_GFX_INDEX] = {
713 .ib_execute = &r600_ring_ib_execute,
714 .emit_fence = &r600_fence_ring_emit,
715 .emit_semaphore = &r600_semaphore_ring_emit,
716 }
717 },
669 .irq_set = &r600_irq_set, 718 .irq_set = &r600_irq_set,
670 .irq_process = &r600_irq_process, 719 .irq_process = &r600_irq_process,
671 .get_vblank_counter = &rs600_get_vblank_counter, 720 .get_vblank_counter = &rs600_get_vblank_counter,
672 .fence_ring_emit = &r600_fence_ring_emit,
673 .cs_parse = &r600_cs_parse, 721 .cs_parse = &r600_cs_parse,
674 .copy_blit = &r600_copy_blit, 722 .copy_blit = &r600_copy_blit,
675 .copy_dma = NULL, 723 .copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
705 .fini = &rv770_fini, 753 .fini = &rv770_fini,
706 .suspend = &rv770_suspend, 754 .suspend = &rv770_suspend,
707 .resume = &rv770_resume, 755 .resume = &rv770_resume,
708 .cp_commit = &r600_cp_commit,
709 .asic_reset = &r600_asic_reset, 756 .asic_reset = &r600_asic_reset,
710 .gpu_is_lockup = &r600_gpu_is_lockup, 757 .gpu_is_lockup = &r600_gpu_is_lockup,
711 .vga_set_state = &r600_vga_set_state, 758 .vga_set_state = &r600_vga_set_state,
712 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 759 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
713 .gart_set_page = &rs600_gart_set_page, 760 .gart_set_page = &rs600_gart_set_page,
714 .ring_test = &r600_ring_test, 761 .ring_test = &r600_ring_test,
715 .ring_ib_execute = &r600_ring_ib_execute, 762 .ring = {
763 [RADEON_RING_TYPE_GFX_INDEX] = {
764 .ib_execute = &r600_ring_ib_execute,
765 .emit_fence = &r600_fence_ring_emit,
766 .emit_semaphore = &r600_semaphore_ring_emit,
767 }
768 },
716 .irq_set = &r600_irq_set, 769 .irq_set = &r600_irq_set,
717 .irq_process = &r600_irq_process, 770 .irq_process = &r600_irq_process,
718 .get_vblank_counter = &rs600_get_vblank_counter, 771 .get_vblank_counter = &rs600_get_vblank_counter,
719 .fence_ring_emit = &r600_fence_ring_emit,
720 .cs_parse = &r600_cs_parse, 772 .cs_parse = &r600_cs_parse,
721 .copy_blit = &r600_copy_blit, 773 .copy_blit = &r600_copy_blit,
722 .copy_dma = NULL, 774 .copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
752 .fini = &evergreen_fini, 804 .fini = &evergreen_fini,
753 .suspend = &evergreen_suspend, 805 .suspend = &evergreen_suspend,
754 .resume = &evergreen_resume, 806 .resume = &evergreen_resume,
755 .cp_commit = &r600_cp_commit,
756 .gpu_is_lockup = &evergreen_gpu_is_lockup, 807 .gpu_is_lockup = &evergreen_gpu_is_lockup,
757 .asic_reset = &evergreen_asic_reset, 808 .asic_reset = &evergreen_asic_reset,
758 .vga_set_state = &r600_vga_set_state, 809 .vga_set_state = &r600_vga_set_state,
759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 810 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
760 .gart_set_page = &rs600_gart_set_page, 811 .gart_set_page = &rs600_gart_set_page,
761 .ring_test = &r600_ring_test, 812 .ring_test = &r600_ring_test,
762 .ring_ib_execute = &evergreen_ring_ib_execute, 813 .ring = {
814 [RADEON_RING_TYPE_GFX_INDEX] = {
815 .ib_execute = &evergreen_ring_ib_execute,
816 .emit_fence = &r600_fence_ring_emit,
817 .emit_semaphore = &r600_semaphore_ring_emit,
818 }
819 },
763 .irq_set = &evergreen_irq_set, 820 .irq_set = &evergreen_irq_set,
764 .irq_process = &evergreen_irq_process, 821 .irq_process = &evergreen_irq_process,
765 .get_vblank_counter = &evergreen_get_vblank_counter, 822 .get_vblank_counter = &evergreen_get_vblank_counter,
766 .fence_ring_emit = &r600_fence_ring_emit,
767 .cs_parse = &evergreen_cs_parse, 823 .cs_parse = &evergreen_cs_parse,
768 .copy_blit = &r600_copy_blit, 824 .copy_blit = &r600_copy_blit,
769 .copy_dma = NULL, 825 .copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
799 .fini = &evergreen_fini, 855 .fini = &evergreen_fini,
800 .suspend = &evergreen_suspend, 856 .suspend = &evergreen_suspend,
801 .resume = &evergreen_resume, 857 .resume = &evergreen_resume,
802 .cp_commit = &r600_cp_commit,
803 .gpu_is_lockup = &evergreen_gpu_is_lockup, 858 .gpu_is_lockup = &evergreen_gpu_is_lockup,
804 .asic_reset = &evergreen_asic_reset, 859 .asic_reset = &evergreen_asic_reset,
805 .vga_set_state = &r600_vga_set_state, 860 .vga_set_state = &r600_vga_set_state,
806 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 861 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
807 .gart_set_page = &rs600_gart_set_page, 862 .gart_set_page = &rs600_gart_set_page,
808 .ring_test = &r600_ring_test, 863 .ring_test = &r600_ring_test,
809 .ring_ib_execute = &evergreen_ring_ib_execute, 864 .ring = {
865 [RADEON_RING_TYPE_GFX_INDEX] = {
866 .ib_execute = &evergreen_ring_ib_execute,
867 .emit_fence = &r600_fence_ring_emit,
868 .emit_semaphore = &r600_semaphore_ring_emit,
869 }
870 },
810 .irq_set = &evergreen_irq_set, 871 .irq_set = &evergreen_irq_set,
811 .irq_process = &evergreen_irq_process, 872 .irq_process = &evergreen_irq_process,
812 .get_vblank_counter = &evergreen_get_vblank_counter, 873 .get_vblank_counter = &evergreen_get_vblank_counter,
813 .fence_ring_emit = &r600_fence_ring_emit,
814 .cs_parse = &evergreen_cs_parse, 874 .cs_parse = &evergreen_cs_parse,
815 .copy_blit = &r600_copy_blit, 875 .copy_blit = &r600_copy_blit,
816 .copy_dma = NULL, 876 .copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
846 .fini = &evergreen_fini, 906 .fini = &evergreen_fini,
847 .suspend = &evergreen_suspend, 907 .suspend = &evergreen_suspend,
848 .resume = &evergreen_resume, 908 .resume = &evergreen_resume,
849 .cp_commit = &r600_cp_commit,
850 .gpu_is_lockup = &evergreen_gpu_is_lockup, 909 .gpu_is_lockup = &evergreen_gpu_is_lockup,
851 .asic_reset = &evergreen_asic_reset, 910 .asic_reset = &evergreen_asic_reset,
852 .vga_set_state = &r600_vga_set_state, 911 .vga_set_state = &r600_vga_set_state,
853 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 912 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
854 .gart_set_page = &rs600_gart_set_page, 913 .gart_set_page = &rs600_gart_set_page,
855 .ring_test = &r600_ring_test, 914 .ring_test = &r600_ring_test,
856 .ring_ib_execute = &evergreen_ring_ib_execute, 915 .ring = {
916 [RADEON_RING_TYPE_GFX_INDEX] = {
917 .ib_execute = &evergreen_ring_ib_execute,
918 .emit_fence = &r600_fence_ring_emit,
919 .emit_semaphore = &r600_semaphore_ring_emit,
920 }
921 },
857 .irq_set = &evergreen_irq_set, 922 .irq_set = &evergreen_irq_set,
858 .irq_process = &evergreen_irq_process, 923 .irq_process = &evergreen_irq_process,
859 .get_vblank_counter = &evergreen_get_vblank_counter, 924 .get_vblank_counter = &evergreen_get_vblank_counter,
860 .fence_ring_emit = &r600_fence_ring_emit,
861 .cs_parse = &evergreen_cs_parse, 925 .cs_parse = &evergreen_cs_parse,
862 .copy_blit = &r600_copy_blit, 926 .copy_blit = &r600_copy_blit,
863 .copy_dma = NULL, 927 .copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
888 .post_page_flip = &evergreen_post_page_flip, 952 .post_page_flip = &evergreen_post_page_flip,
889}; 953};
890 954
955static const struct radeon_vm_funcs cayman_vm_funcs = {
956 .init = &cayman_vm_init,
957 .fini = &cayman_vm_fini,
958 .bind = &cayman_vm_bind,
959 .unbind = &cayman_vm_unbind,
960 .tlb_flush = &cayman_vm_tlb_flush,
961 .page_flags = &cayman_vm_page_flags,
962 .set_page = &cayman_vm_set_page,
963};
964
891static struct radeon_asic cayman_asic = { 965static struct radeon_asic cayman_asic = {
892 .init = &cayman_init, 966 .init = &cayman_init,
893 .fini = &cayman_fini, 967 .fini = &cayman_fini,
894 .suspend = &cayman_suspend, 968 .suspend = &cayman_suspend,
895 .resume = &cayman_resume, 969 .resume = &cayman_resume,
896 .cp_commit = &r600_cp_commit,
897 .gpu_is_lockup = &cayman_gpu_is_lockup, 970 .gpu_is_lockup = &cayman_gpu_is_lockup,
898 .asic_reset = &cayman_asic_reset, 971 .asic_reset = &cayman_asic_reset,
899 .vga_set_state = &r600_vga_set_state, 972 .vga_set_state = &r600_vga_set_state,
900 .gart_tlb_flush = &cayman_pcie_gart_tlb_flush, 973 .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
901 .gart_set_page = &rs600_gart_set_page, 974 .gart_set_page = &rs600_gart_set_page,
902 .ring_test = &r600_ring_test, 975 .ring_test = &r600_ring_test,
903 .ring_ib_execute = &evergreen_ring_ib_execute, 976 .ring = {
977 [RADEON_RING_TYPE_GFX_INDEX] = {
978 .ib_execute = &cayman_ring_ib_execute,
979 .ib_parse = &evergreen_ib_parse,
980 .emit_fence = &cayman_fence_ring_emit,
981 .emit_semaphore = &r600_semaphore_ring_emit,
982 },
983 [CAYMAN_RING_TYPE_CP1_INDEX] = {
984 .ib_execute = &cayman_ring_ib_execute,
985 .ib_parse = &evergreen_ib_parse,
986 .emit_fence = &cayman_fence_ring_emit,
987 .emit_semaphore = &r600_semaphore_ring_emit,
988 },
989 [CAYMAN_RING_TYPE_CP2_INDEX] = {
990 .ib_execute = &cayman_ring_ib_execute,
991 .ib_parse = &evergreen_ib_parse,
992 .emit_fence = &cayman_fence_ring_emit,
993 .emit_semaphore = &r600_semaphore_ring_emit,
994 }
995 },
904 .irq_set = &evergreen_irq_set, 996 .irq_set = &evergreen_irq_set,
905 .irq_process = &evergreen_irq_process, 997 .irq_process = &evergreen_irq_process,
906 .get_vblank_counter = &evergreen_get_vblank_counter, 998 .get_vblank_counter = &evergreen_get_vblank_counter,
907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 999 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = &r600_copy_blit, 1000 .copy_blit = &r600_copy_blit,
910 .copy_dma = NULL, 1001 .copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
945 else 1036 else
946 rdev->num_crtc = 2; 1037 rdev->num_crtc = 2;
947 1038
1039 /* set the ring used for bo copies */
1040 rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
1041
948 switch (rdev->family) { 1042 switch (rdev->family) {
949 case CHIP_R100: 1043 case CHIP_R100:
950 case CHIP_RV100: 1044 case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
1050 rdev->asic = &cayman_asic; 1144 rdev->asic = &cayman_asic;
1051 /* set num crtcs */ 1145 /* set num crtcs */
1052 rdev->num_crtc = 6; 1146 rdev->num_crtc = 6;
1147 rdev->vm_manager.funcs = &cayman_vm_funcs;
1053 break; 1148 break;
1054 default: 1149 default:
1055 /* FIXME: not supported yet */ 1150 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 59914842a72..6304aef0d9b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
58int r100_suspend(struct radeon_device *rdev); 58int r100_suspend(struct radeon_device *rdev);
59int r100_resume(struct radeon_device *rdev); 59int r100_resume(struct radeon_device *rdev);
60void r100_vga_set_state(struct radeon_device *rdev, bool state); 60void r100_vga_set_state(struct radeon_device *rdev, bool state);
61bool r100_gpu_is_lockup(struct radeon_device *rdev); 61bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
62int r100_asic_reset(struct radeon_device *rdev); 62int r100_asic_reset(struct radeon_device *rdev);
63u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 63u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
64void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 64void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
65int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 65int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
66void r100_cp_commit(struct radeon_device *rdev);
67void r100_ring_start(struct radeon_device *rdev); 66void r100_ring_start(struct radeon_device *rdev);
68int r100_irq_set(struct radeon_device *rdev); 67int r100_irq_set(struct radeon_device *rdev);
69int r100_irq_process(struct radeon_device *rdev); 68int r100_irq_process(struct radeon_device *rdev);
70void r100_fence_ring_emit(struct radeon_device *rdev, 69void r100_fence_ring_emit(struct radeon_device *rdev,
71 struct radeon_fence *fence); 70 struct radeon_fence *fence);
71void r100_semaphore_ring_emit(struct radeon_device *rdev,
72 struct radeon_ring *cp,
73 struct radeon_semaphore *semaphore,
74 bool emit_wait);
72int r100_cs_parse(struct radeon_cs_parser *p); 75int r100_cs_parse(struct radeon_cs_parser *p);
73void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 76void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
74uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); 77uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
83void r100_clear_surface_reg(struct radeon_device *rdev, int reg); 86void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
84void r100_bandwidth_update(struct radeon_device *rdev); 87void r100_bandwidth_update(struct radeon_device *rdev);
85void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 88void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
86int r100_ring_test(struct radeon_device *rdev); 89int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
87void r100_hpd_init(struct radeon_device *rdev); 90void r100_hpd_init(struct radeon_device *rdev);
88void r100_hpd_fini(struct radeon_device *rdev); 91void r100_hpd_fini(struct radeon_device *rdev);
89bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 92bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
101int r100_debugfs_mc_info_init(struct radeon_device *rdev); 104int r100_debugfs_mc_info_init(struct radeon_device *rdev);
102int r100_gui_wait_for_idle(struct radeon_device *rdev); 105int r100_gui_wait_for_idle(struct radeon_device *rdev);
103void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, 106void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
104 struct radeon_cp *cp); 107 struct radeon_ring *cp);
105bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, 108bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
106 struct r100_gpu_lockup *lockup, 109 struct r100_gpu_lockup *lockup,
107 struct radeon_cp *cp); 110 struct radeon_ring *cp);
108void r100_ib_fini(struct radeon_device *rdev); 111void r100_ib_fini(struct radeon_device *rdev);
109int r100_ib_init(struct radeon_device *rdev); 112int r100_ib_test(struct radeon_device *rdev);
110void r100_irq_disable(struct radeon_device *rdev); 113void r100_irq_disable(struct radeon_device *rdev);
111void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); 114void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
112void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); 115void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
154extern void r300_fini(struct radeon_device *rdev); 157extern void r300_fini(struct radeon_device *rdev);
155extern int r300_suspend(struct radeon_device *rdev); 158extern int r300_suspend(struct radeon_device *rdev);
156extern int r300_resume(struct radeon_device *rdev); 159extern int r300_resume(struct radeon_device *rdev);
157extern bool r300_gpu_is_lockup(struct radeon_device *rdev); 160extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
158extern int r300_asic_reset(struct radeon_device *rdev); 161extern int r300_asic_reset(struct radeon_device *rdev);
159extern void r300_ring_start(struct radeon_device *rdev); 162extern void r300_ring_start(struct radeon_device *rdev);
160extern void r300_fence_ring_emit(struct radeon_device *rdev, 163extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
293void r600_vga_set_state(struct radeon_device *rdev, bool state); 296void r600_vga_set_state(struct radeon_device *rdev, bool state);
294int r600_wb_init(struct radeon_device *rdev); 297int r600_wb_init(struct radeon_device *rdev);
295void r600_wb_fini(struct radeon_device *rdev); 298void r600_wb_fini(struct radeon_device *rdev);
296void r600_cp_commit(struct radeon_device *rdev);
297void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 299void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
298uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 300uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
299void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 301void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
300int r600_cs_parse(struct radeon_cs_parser *p); 302int r600_cs_parse(struct radeon_cs_parser *p);
301void r600_fence_ring_emit(struct radeon_device *rdev, 303void r600_fence_ring_emit(struct radeon_device *rdev,
302 struct radeon_fence *fence); 304 struct radeon_fence *fence);
303bool r600_gpu_is_lockup(struct radeon_device *rdev); 305void r600_semaphore_ring_emit(struct radeon_device *rdev,
306 struct radeon_ring *cp,
307 struct radeon_semaphore *semaphore,
308 bool emit_wait);
309bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
304int r600_asic_reset(struct radeon_device *rdev); 310int r600_asic_reset(struct radeon_device *rdev);
305int r600_set_surface_reg(struct radeon_device *rdev, int reg, 311int r600_set_surface_reg(struct radeon_device *rdev, int reg,
306 uint32_t tiling_flags, uint32_t pitch, 312 uint32_t tiling_flags, uint32_t pitch,
307 uint32_t offset, uint32_t obj_size); 313 uint32_t offset, uint32_t obj_size);
308void r600_clear_surface_reg(struct radeon_device *rdev, int reg); 314void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
309int r600_ib_test(struct radeon_device *rdev); 315int r600_ib_test(struct radeon_device *rdev, int ring);
310void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 316void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
311int r600_ring_test(struct radeon_device *rdev); 317int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
312int r600_copy_blit(struct radeon_device *rdev, 318int r600_copy_blit(struct radeon_device *rdev,
313 uint64_t src_offset, uint64_t dst_offset, 319 uint64_t src_offset, uint64_t dst_offset,
314 unsigned num_gpu_pages, struct radeon_fence *fence); 320 unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
328bool r600_card_posted(struct radeon_device *rdev); 334bool r600_card_posted(struct radeon_device *rdev);
329void r600_cp_stop(struct radeon_device *rdev); 335void r600_cp_stop(struct radeon_device *rdev);
330int r600_cp_start(struct radeon_device *rdev); 336int r600_cp_start(struct radeon_device *rdev);
331void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 337void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
332int r600_cp_resume(struct radeon_device *rdev); 338int r600_cp_resume(struct radeon_device *rdev);
333void r600_cp_fini(struct radeon_device *rdev); 339void r600_cp_fini(struct radeon_device *rdev);
334int r600_count_pipe_bits(uint32_t val); 340int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
397void evergreen_fini(struct radeon_device *rdev); 403void evergreen_fini(struct radeon_device *rdev);
398int evergreen_suspend(struct radeon_device *rdev); 404int evergreen_suspend(struct radeon_device *rdev);
399int evergreen_resume(struct radeon_device *rdev); 405int evergreen_resume(struct radeon_device *rdev);
400bool evergreen_gpu_is_lockup(struct radeon_device *rdev); 406bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
401int evergreen_asic_reset(struct radeon_device *rdev); 407int evergreen_asic_reset(struct radeon_device *rdev);
402void evergreen_bandwidth_update(struct radeon_device *rdev); 408void evergreen_bandwidth_update(struct radeon_device *rdev);
403void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 409void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
423/* 429/*
424 * cayman 430 * cayman
425 */ 431 */
432void cayman_fence_ring_emit(struct radeon_device *rdev,
433 struct radeon_fence *fence);
426void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); 434void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
427int cayman_init(struct radeon_device *rdev); 435int cayman_init(struct radeon_device *rdev);
428void cayman_fini(struct radeon_device *rdev); 436void cayman_fini(struct radeon_device *rdev);
429int cayman_suspend(struct radeon_device *rdev); 437int cayman_suspend(struct radeon_device *rdev);
430int cayman_resume(struct radeon_device *rdev); 438int cayman_resume(struct radeon_device *rdev);
431bool cayman_gpu_is_lockup(struct radeon_device *rdev); 439bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
432int cayman_asic_reset(struct radeon_device *rdev); 440int cayman_asic_reset(struct radeon_device *rdev);
441void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
442int cayman_vm_init(struct radeon_device *rdev);
443void cayman_vm_fini(struct radeon_device *rdev);
444int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
445void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
446void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
447uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
448 struct radeon_vm *vm,
449 uint32_t flags);
450void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
451 unsigned pfn, uint64_t addr, uint32_t flags);
452int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
433 453
434#endif 454#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 17e1a9b2d8f..815f2341ab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
43 43
44 start_jiffies = jiffies; 44 start_jiffies = jiffies;
45 for (i = 0; i < n; i++) { 45 for (i = 0; i < n; i++) {
46 r = radeon_fence_create(rdev, &fence); 46 r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
47 if (r) 47 if (r)
48 return r; 48 return r;
49 49
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
229 break; 229 break;
230 case 6: 230 case 6:
231 /* GTT to VRAM, buffer size sweep, common modes */ 231 /* GTT to VRAM, buffer size sweep, common modes */
232 for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) 232 for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
233 radeon_benchmark_move(rdev, common_modes[i], 233 radeon_benchmark_move(rdev, common_modes[i],
234 RADEON_GEM_DOMAIN_GTT, 234 RADEON_GEM_DOMAIN_GTT,
235 RADEON_GEM_DOMAIN_VRAM); 235 RADEON_GEM_DOMAIN_VRAM);
236 break; 236 break;
237 case 7: 237 case 7:
238 /* VRAM to GTT, buffer size sweep, common modes */ 238 /* VRAM to GTT, buffer size sweep, common modes */
239 for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) 239 for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
240 radeon_benchmark_move(rdev, common_modes[i], 240 radeon_benchmark_move(rdev, common_modes[i],
241 RADEON_GEM_DOMAIN_VRAM, 241 RADEON_GEM_DOMAIN_VRAM,
242 RADEON_GEM_DOMAIN_GTT); 242 RADEON_GEM_DOMAIN_GTT);
243 break; 243 break;
244 case 8: 244 case 8:
245 /* VRAM to VRAM, buffer size sweep, common modes */ 245 /* VRAM to VRAM, buffer size sweep, common modes */
246 for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++) 246 for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
247 radeon_benchmark_move(rdev, common_modes[i], 247 radeon_benchmark_move(rdev, common_modes[i],
248 RADEON_GEM_DOMAIN_VRAM, 248 RADEON_GEM_DOMAIN_VRAM,
249 RADEON_GEM_DOMAIN_VRAM); 249 RADEON_GEM_DOMAIN_VRAM);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 29afd71e084..435a3d970ab 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
58 58
59 duplicate = false; 59 duplicate = false;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61 for (j = 0; j < p->nrelocs; j++) { 61 for (j = 0; j < i; j++) {
62 if (r->handle == p->relocs[j].handle) { 62 if (r->handle == p->relocs[j].handle) {
63 p->relocs_ptr[i] = &p->relocs[j]; 63 p->relocs_ptr[i] = &p->relocs[j];
64 duplicate = true; 64 duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
84 p->relocs[i].flags = r->flags; 84 p->relocs[i].flags = r->flags;
85 radeon_bo_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87
88 if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
89 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
90 if (!radeon_fence_signaled(fence)) {
91 p->sync_to_ring[fence->ring] = true;
92 }
93 }
94 } else
95 p->relocs[i].handle = 0;
88 } 96 }
89 return radeon_bo_list_validate(&p->validated); 97 return radeon_bo_list_validate(&p->validated);
90} 98}
91 99
100static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
101{
102 p->priority = priority;
103
104 switch (ring) {
105 default:
106 DRM_ERROR("unknown ring id: %d\n", ring);
107 return -EINVAL;
108 case RADEON_CS_RING_GFX:
109 p->ring = RADEON_RING_TYPE_GFX_INDEX;
110 break;
111 case RADEON_CS_RING_COMPUTE:
112 /* for now */
113 p->ring = RADEON_RING_TYPE_GFX_INDEX;
114 break;
115 }
116 return 0;
117}
118
119static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
120{
121 int i, r;
122
123 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
124 /* no need to sync to our own or unused rings */
125 if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
126 continue;
127
128 if (!p->ib->fence->semaphore) {
129 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
130 if (r)
131 return r;
132 }
133
134 r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
135 if (r)
136 return r;
137 radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
138 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
139
140 r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
141 if (r)
142 return r;
143 radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
144 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
145 }
146 return 0;
147}
148
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 149int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 150{
94 struct drm_radeon_cs *cs = data; 151 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 152 uint64_t *chunk_array_ptr;
96 unsigned size, i, flags = 0; 153 unsigned size, i;
154 u32 ring = RADEON_CS_RING_GFX;
155 s32 priority = 0;
97 156
98 if (!cs->num_chunks) { 157 if (!cs->num_chunks) {
99 return 0; 158 return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
103 p->idx = 0; 162 p->idx = 0;
104 p->chunk_ib_idx = -1; 163 p->chunk_ib_idx = -1;
105 p->chunk_relocs_idx = -1; 164 p->chunk_relocs_idx = -1;
165 p->chunk_flags_idx = -1;
106 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 166 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107 if (p->chunks_array == NULL) { 167 if (p->chunks_array == NULL) {
108 return -ENOMEM; 168 return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
112 sizeof(uint64_t)*cs->num_chunks)) { 172 sizeof(uint64_t)*cs->num_chunks)) {
113 return -EFAULT; 173 return -EFAULT;
114 } 174 }
175 p->cs_flags = 0;
115 p->nchunks = cs->num_chunks; 176 p->nchunks = cs->num_chunks;
116 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 177 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117 if (p->chunks == NULL) { 178 if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 201 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 202 return -EINVAL;
142 } 203 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && 204 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
144 !p->chunks[i].length_dw) { 205 p->chunk_flags_idx = i;
145 return -EINVAL; 206 /* zero length flags aren't useful */
207 if (p->chunks[i].length_dw == 0)
208 return -EINVAL;
146 } 209 }
147 210
148 p->chunks[i].length_dw = user_chunk.length_dw; 211 p->chunks[i].length_dw = user_chunk.length_dw;
149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 212 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
150 213
151 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 214 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
152 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { 215 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
216 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
153 size = p->chunks[i].length_dw * sizeof(uint32_t); 217 size = p->chunks[i].length_dw * sizeof(uint32_t);
154 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 218 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
155 if (p->chunks[i].kdata == NULL) { 219 if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
160 return -EFAULT; 224 return -EFAULT;
161 } 225 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 226 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0]; 227 p->cs_flags = p->chunks[i].kdata[0];
228 if (p->chunks[i].length_dw > 1)
229 ring = p->chunks[i].kdata[1];
230 if (p->chunks[i].length_dw > 2)
231 priority = (s32)p->chunks[i].kdata[2];
164 } 232 }
165 } else {
166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
168 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
169 kfree(p->chunks[i].kpage[0]);
170 kfree(p->chunks[i].kpage[1]);
171 return -ENOMEM;
172 }
173 p->chunks[i].kpage_idx[0] = -1;
174 p->chunks[i].kpage_idx[1] = -1;
175 p->chunks[i].last_copied_page = -1;
176 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
177 } 233 }
178 } 234 }
179 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 235
180 DRM_ERROR("cs IB too big: %d\n", 236 if ((p->cs_flags & RADEON_CS_USE_VM) &&
181 p->chunks[p->chunk_ib_idx].length_dw); 237 !p->rdev->vm_manager.enabled) {
238 DRM_ERROR("VM not active on asic!\n");
239 if (p->chunk_relocs_idx != -1)
240 kfree(p->chunks[p->chunk_relocs_idx].kdata);
241 if (p->chunk_flags_idx != -1)
242 kfree(p->chunks[p->chunk_flags_idx].kdata);
182 return -EINVAL; 243 return -EINVAL;
183 } 244 }
184 245
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; 246 if (radeon_cs_get_ring(p, ring, priority)) {
247 if (p->chunk_relocs_idx != -1)
248 kfree(p->chunks[p->chunk_relocs_idx].kdata);
249 if (p->chunk_flags_idx != -1)
250 kfree(p->chunks[p->chunk_flags_idx].kdata);
251 return -EINVAL;
252 }
253
254
255 /* deal with non-vm */
256 if ((p->chunk_ib_idx != -1) &&
257 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
258 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
259 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
260 DRM_ERROR("cs IB too big: %d\n",
261 p->chunks[p->chunk_ib_idx].length_dw);
262 return -EINVAL;
263 }
264 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
265 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
266 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
267 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
268 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
269 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
270 return -ENOMEM;
271 }
272 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
273 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
274 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
275 p->chunks[p->chunk_ib_idx].last_page_index =
276 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
277 }
278
186 return 0; 279 return 0;
187} 280}
188 281
@@ -224,11 +317,139 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
224 radeon_ib_free(parser->rdev, &parser->ib); 317 radeon_ib_free(parser->rdev, &parser->ib);
225} 318}
226 319
320static int radeon_cs_ib_chunk(struct radeon_device *rdev,
321 struct radeon_cs_parser *parser)
322{
323 struct radeon_cs_chunk *ib_chunk;
324 int r;
325
326 if (parser->chunk_ib_idx == -1)
327 return 0;
328
329 if (parser->cs_flags & RADEON_CS_USE_VM)
330 return 0;
331
332 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
333 /* Copy the packet into the IB, the parser will read from the
334 * input memory (cached) and write to the IB (which can be
335 * uncached).
336 */
337 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
338 ib_chunk->length_dw * 4);
339 if (r) {
340 DRM_ERROR("Failed to get ib !\n");
341 return r;
342 }
343 parser->ib->length_dw = ib_chunk->length_dw;
344 r = radeon_cs_parse(parser);
345 if (r || parser->parser_error) {
346 DRM_ERROR("Invalid command stream !\n");
347 return r;
348 }
349 r = radeon_cs_finish_pages(parser);
350 if (r) {
351 DRM_ERROR("Invalid command stream !\n");
352 return r;
353 }
354 r = radeon_cs_sync_rings(parser);
355 if (r) {
356 DRM_ERROR("Failed to synchronize rings !\n");
357 }
358 parser->ib->vm_id = 0;
359 r = radeon_ib_schedule(rdev, parser->ib);
360 if (r) {
361 DRM_ERROR("Failed to schedule IB !\n");
362 }
363 return 0;
364}
365
366static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
367 struct radeon_vm *vm)
368{
369 struct radeon_bo_list *lobj;
370 struct radeon_bo *bo;
371 int r;
372
373 list_for_each_entry(lobj, &parser->validated, tv.head) {
374 bo = lobj->bo;
375 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
376 if (r) {
377 return r;
378 }
379 }
380 return 0;
381}
382
383static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
384 struct radeon_cs_parser *parser)
385{
386 struct radeon_cs_chunk *ib_chunk;
387 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
388 struct radeon_vm *vm = &fpriv->vm;
389 int r;
390
391 if (parser->chunk_ib_idx == -1)
392 return 0;
393
394 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
395 return 0;
396
397 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
398 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
399 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
400 return -EINVAL;
401 }
402 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
403 ib_chunk->length_dw * 4);
404 if (r) {
405 DRM_ERROR("Failed to get ib !\n");
406 return r;
407 }
408 parser->ib->length_dw = ib_chunk->length_dw;
409 /* Copy the packet into the IB */
410 if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
411 ib_chunk->length_dw * 4)) {
412 return -EFAULT;
413 }
414 r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
415 if (r) {
416 return r;
417 }
418
419 mutex_lock(&vm->mutex);
420 r = radeon_vm_bind(rdev, vm);
421 if (r) {
422 goto out;
423 }
424 r = radeon_bo_vm_update_pte(parser, vm);
425 if (r) {
426 goto out;
427 }
428 r = radeon_cs_sync_rings(parser);
429 if (r) {
430 DRM_ERROR("Failed to synchronize rings !\n");
431 }
432 parser->ib->vm_id = vm->id;
433 /* ib pool is bind at 0 in virtual address space to gpu_addr is the
434 * offset inside the pool bo
435 */
436 parser->ib->gpu_addr = parser->ib->sa_bo.offset;
437 r = radeon_ib_schedule(rdev, parser->ib);
438out:
439 if (!r) {
440 if (vm->fence) {
441 radeon_fence_unref(&vm->fence);
442 }
443 vm->fence = radeon_fence_ref(parser->ib->fence);
444 }
445 mutex_unlock(&fpriv->vm.mutex);
446 return r;
447}
448
227int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 449int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
228{ 450{
229 struct radeon_device *rdev = dev->dev_private; 451 struct radeon_device *rdev = dev->dev_private;
230 struct radeon_cs_parser parser; 452 struct radeon_cs_parser parser;
231 struct radeon_cs_chunk *ib_chunk;
232 int r; 453 int r;
233 454
234 radeon_mutex_lock(&rdev->cs_mutex); 455 radeon_mutex_lock(&rdev->cs_mutex);
@@ -245,13 +466,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
245 radeon_mutex_unlock(&rdev->cs_mutex); 466 radeon_mutex_unlock(&rdev->cs_mutex);
246 return r; 467 return r;
247 } 468 }
248 r = radeon_ib_get(rdev, &parser.ib);
249 if (r) {
250 DRM_ERROR("Failed to get ib !\n");
251 radeon_cs_parser_fini(&parser, r);
252 radeon_mutex_unlock(&rdev->cs_mutex);
253 return r;
254 }
255 r = radeon_cs_parser_relocs(&parser); 469 r = radeon_cs_parser_relocs(&parser);
256 if (r) { 470 if (r) {
257 if (r != -ERESTARTSYS) 471 if (r != -ERESTARTSYS)
@@ -260,29 +474,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 radeon_mutex_unlock(&rdev->cs_mutex); 474 radeon_mutex_unlock(&rdev->cs_mutex);
261 return r; 475 return r;
262 } 476 }
263 /* Copy the packet into the IB, the parser will read from the 477 r = radeon_cs_ib_chunk(rdev, &parser);
264 * input memory (cached) and write to the IB (which can be
265 * uncached). */
266 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
267 parser.ib->length_dw = ib_chunk->length_dw;
268 r = radeon_cs_parse(&parser);
269 if (r || parser.parser_error) {
270 DRM_ERROR("Invalid command stream !\n");
271 radeon_cs_parser_fini(&parser, r);
272 radeon_mutex_unlock(&rdev->cs_mutex);
273 return r;
274 }
275 r = radeon_cs_finish_pages(&parser);
276 if (r) { 478 if (r) {
277 DRM_ERROR("Invalid command stream !\n"); 479 goto out;
278 radeon_cs_parser_fini(&parser, r);
279 radeon_mutex_unlock(&rdev->cs_mutex);
280 return r;
281 } 480 }
282 r = radeon_ib_schedule(rdev, parser.ib); 481 r = radeon_cs_ib_vm_chunk(rdev, &parser);
283 if (r) { 482 if (r) {
284 DRM_ERROR("Failed to schedule IB !\n"); 483 goto out;
285 } 484 }
485out:
286 radeon_cs_parser_fini(&parser, r); 486 radeon_cs_parser_fini(&parser, r);
287 radeon_mutex_unlock(&rdev->cs_mutex); 487 radeon_mutex_unlock(&rdev->cs_mutex);
288 return r; 488 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c4d00a17141..0afb13bd8dc 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
224 if (radeon_no_wb == 1) 224 if (radeon_no_wb == 1)
225 rdev->wb.enabled = false; 225 rdev->wb.enabled = false;
226 else { 226 else {
227 /* often unreliable on AGP */
228 if (rdev->flags & RADEON_IS_AGP) { 227 if (rdev->flags & RADEON_IS_AGP) {
228 /* often unreliable on AGP */
229 rdev->wb.enabled = false;
230 } else if (rdev->family < CHIP_R300) {
231 /* often unreliable on pre-r300 */
229 rdev->wb.enabled = false; 232 rdev->wb.enabled = false;
230 } else { 233 } else {
231 rdev->wb.enabled = true; 234 rdev->wb.enabled = true;
@@ -718,17 +721,24 @@ int radeon_device_init(struct radeon_device *rdev,
718 * can recall function without having locking issues */ 721 * can recall function without having locking issues */
719 radeon_mutex_init(&rdev->cs_mutex); 722 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 723 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 724 for (i = 0; i < RADEON_NUM_RINGS; ++i)
725 mutex_init(&rdev->ring[i].mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 726 mutex_init(&rdev->dc_hw_i2c_mutex);
723 if (rdev->family >= CHIP_R600) 727 if (rdev->family >= CHIP_R600)
724 spin_lock_init(&rdev->ih.lock); 728 spin_lock_init(&rdev->ih.lock);
725 mutex_init(&rdev->gem.mutex); 729 mutex_init(&rdev->gem.mutex);
726 mutex_init(&rdev->pm.mutex); 730 mutex_init(&rdev->pm.mutex);
727 mutex_init(&rdev->vram_mutex); 731 mutex_init(&rdev->vram_mutex);
728 rwlock_init(&rdev->fence_drv.lock); 732 rwlock_init(&rdev->fence_lock);
733 rwlock_init(&rdev->semaphore_drv.lock);
729 INIT_LIST_HEAD(&rdev->gem.objects); 734 INIT_LIST_HEAD(&rdev->gem.objects);
730 init_waitqueue_head(&rdev->irq.vblank_queue); 735 init_waitqueue_head(&rdev->irq.vblank_queue);
731 init_waitqueue_head(&rdev->irq.idle_queue); 736 init_waitqueue_head(&rdev->irq.idle_queue);
737 INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
738 /* initialize vm here */
739 rdev->vm_manager.use_bitmap = 1;
740 rdev->vm_manager.max_pfn = 1 << 20;
741 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
732 742
733 /* Set asic functions */ 743 /* Set asic functions */
734 r = radeon_asic_init(rdev); 744 r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
765 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 775 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
766 if (r) { 776 if (r) {
767 rdev->need_dma32 = true; 777 rdev->need_dma32 = true;
778 dma_bits = 32;
768 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 779 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
769 } 780 }
781 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
782 if (r) {
783 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
784 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
785 }
770 786
771 /* Registers mapping */ 787 /* Registers mapping */
772 /* TODO: block userspace mapping of io register */ 788 /* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
814 if (r) 830 if (r)
815 return r; 831 return r;
816 } 832 }
817 if (radeon_testing) { 833 if ((radeon_testing & 1)) {
818 radeon_test_moves(rdev); 834 radeon_test_moves(rdev);
819 } 835 }
836 if ((radeon_testing & 2)) {
837 radeon_test_syncing(rdev);
838 }
820 if (radeon_benchmarking) { 839 if (radeon_benchmarking) {
821 radeon_benchmark(rdev, radeon_benchmarking); 840 radeon_benchmark(rdev, radeon_benchmarking);
822 } 841 }
823 return 0; 842 return 0;
824} 843}
825 844
845static void radeon_debugfs_remove_files(struct radeon_device *rdev);
846
826void radeon_device_fini(struct radeon_device *rdev) 847void radeon_device_fini(struct radeon_device *rdev)
827{ 848{
828 DRM_INFO("radeon: finishing device.\n"); 849 DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
837 rdev->rio_mem = NULL; 858 rdev->rio_mem = NULL;
838 iounmap(rdev->rmmio); 859 iounmap(rdev->rmmio);
839 rdev->rmmio = NULL; 860 rdev->rmmio = NULL;
861 radeon_debugfs_remove_files(rdev);
840} 862}
841 863
842 864
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
848 struct radeon_device *rdev; 870 struct radeon_device *rdev;
849 struct drm_crtc *crtc; 871 struct drm_crtc *crtc;
850 struct drm_connector *connector; 872 struct drm_connector *connector;
851 int r; 873 int i, r;
852 874
853 if (dev == NULL || dev->dev_private == NULL) { 875 if (dev == NULL || dev->dev_private == NULL) {
854 return -ENODEV; 876 return -ENODEV;
@@ -887,7 +909,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
887 /* evict vram memory */ 909 /* evict vram memory */
888 radeon_bo_evict_vram(rdev); 910 radeon_bo_evict_vram(rdev);
889 /* wait for gpu to finish processing current batch */ 911 /* wait for gpu to finish processing current batch */
890 radeon_fence_wait_last(rdev); 912 for (i = 0; i < RADEON_NUM_RINGS; i++)
913 radeon_fence_wait_last(rdev, i);
891 914
892 radeon_save_bios_scratch_regs(rdev); 915 radeon_save_bios_scratch_regs(rdev);
893 916
@@ -986,36 +1009,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
986/* 1009/*
987 * Debugfs 1010 * Debugfs
988 */ 1011 */
989struct radeon_debugfs {
990 struct drm_info_list *files;
991 unsigned num_files;
992};
993static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
994static unsigned _radeon_debugfs_count = 0;
995
996int radeon_debugfs_add_files(struct radeon_device *rdev, 1012int radeon_debugfs_add_files(struct radeon_device *rdev,
997 struct drm_info_list *files, 1013 struct drm_info_list *files,
998 unsigned nfiles) 1014 unsigned nfiles)
999{ 1015{
1000 unsigned i; 1016 unsigned i;
1001 1017
1002 for (i = 0; i < _radeon_debugfs_count; i++) { 1018 for (i = 0; i < rdev->debugfs_count; i++) {
1003 if (_radeon_debugfs[i].files == files) { 1019 if (rdev->debugfs[i].files == files) {
1004 /* Already registered */ 1020 /* Already registered */
1005 return 0; 1021 return 0;
1006 } 1022 }
1007 } 1023 }
1008 1024
1009 i = _radeon_debugfs_count + 1; 1025 i = rdev->debugfs_count + 1;
1010 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1026 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1011 DRM_ERROR("Reached maximum number of debugfs components.\n"); 1027 DRM_ERROR("Reached maximum number of debugfs components.\n");
1012 DRM_ERROR("Report so we increase " 1028 DRM_ERROR("Report so we increase "
1013 "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1029 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1014 return -EINVAL; 1030 return -EINVAL;
1015 } 1031 }
1016 _radeon_debugfs[_radeon_debugfs_count].files = files; 1032 rdev->debugfs[rdev->debugfs_count].files = files;
1017 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 1033 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1018 _radeon_debugfs_count = i; 1034 rdev->debugfs_count = i;
1019#if defined(CONFIG_DEBUG_FS) 1035#if defined(CONFIG_DEBUG_FS)
1020 drm_debugfs_create_files(files, nfiles, 1036 drm_debugfs_create_files(files, nfiles,
1021 rdev->ddev->control->debugfs_root, 1037 rdev->ddev->control->debugfs_root,
@@ -1027,6 +1043,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
1027 return 0; 1043 return 0;
1028} 1044}
1029 1045
1046static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1047{
1048#if defined(CONFIG_DEBUG_FS)
1049 unsigned i;
1050
1051 for (i = 0; i < rdev->debugfs_count; i++) {
1052 drm_debugfs_remove_files(rdev->debugfs[i].files,
1053 rdev->debugfs[i].num_files,
1054 rdev->ddev->control);
1055 drm_debugfs_remove_files(rdev->debugfs[i].files,
1056 rdev->debugfs[i].num_files,
1057 rdev->ddev->primary);
1058 }
1059#endif
1060}
1061
1030#if defined(CONFIG_DEBUG_FS) 1062#if defined(CONFIG_DEBUG_FS)
1031int radeon_debugfs_init(struct drm_minor *minor) 1063int radeon_debugfs_init(struct drm_minor *minor)
1032{ 1064{
@@ -1035,11 +1067,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
1035 1067
1036void radeon_debugfs_cleanup(struct drm_minor *minor) 1068void radeon_debugfs_cleanup(struct drm_minor *minor)
1037{ 1069{
1038 unsigned i;
1039
1040 for (i = 0; i < _radeon_debugfs_count; i++) {
1041 drm_debugfs_remove_files(_radeon_debugfs[i].files,
1042 _radeon_debugfs[i].num_files, minor);
1043 }
1044} 1070}
1045#endif 1071#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a22d6e6a49a..d3ffc18774a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
406 if (!ASIC_IS_AVIVO(rdev)) { 406 if (!ASIC_IS_AVIVO(rdev)) {
407 /* crtc offset is from display base addr not FB location */ 407 /* crtc offset is from display base addr not FB location */
408 base -= radeon_crtc->legacy_display_base_addr; 408 base -= radeon_crtc->legacy_display_base_addr;
409 pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); 409 pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
410 410
411 if (tiling_flags & RADEON_TILING_MACRO) { 411 if (tiling_flags & RADEON_TILING_MACRO) {
412 if (ASIC_IS_R300(rdev)) { 412 if (ASIC_IS_R300(rdev)) {
@@ -1081,7 +1081,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
1081void 1081void
1082radeon_framebuffer_init(struct drm_device *dev, 1082radeon_framebuffer_init(struct drm_device *dev,
1083 struct radeon_framebuffer *rfb, 1083 struct radeon_framebuffer *rfb,
1084 struct drm_mode_fb_cmd *mode_cmd, 1084 struct drm_mode_fb_cmd2 *mode_cmd,
1085 struct drm_gem_object *obj) 1085 struct drm_gem_object *obj)
1086{ 1086{
1087 rfb->obj = obj; 1087 rfb->obj = obj;
@@ -1092,15 +1092,15 @@ radeon_framebuffer_init(struct drm_device *dev,
1092static struct drm_framebuffer * 1092static struct drm_framebuffer *
1093radeon_user_framebuffer_create(struct drm_device *dev, 1093radeon_user_framebuffer_create(struct drm_device *dev,
1094 struct drm_file *file_priv, 1094 struct drm_file *file_priv,
1095 struct drm_mode_fb_cmd *mode_cmd) 1095 struct drm_mode_fb_cmd2 *mode_cmd)
1096{ 1096{
1097 struct drm_gem_object *obj; 1097 struct drm_gem_object *obj;
1098 struct radeon_framebuffer *radeon_fb; 1098 struct radeon_framebuffer *radeon_fb;
1099 1099
1100 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 1100 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
1101 if (obj == NULL) { 1101 if (obj == NULL) {
1102 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 1102 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
1103 "can't create framebuffer\n", mode_cmd->handle); 1103 "can't create framebuffer\n", mode_cmd->handles[0]);
1104 return ERR_PTR(-ENOENT); 1104 return ERR_PTR(-ENOENT);
1105 } 1105 }
1106 1106
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71499fc3daf..31da622eef6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -54,9 +54,10 @@
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS 56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
57 * 2.13.0 - virtual memory support
57 */ 58 */
58#define KMS_DRIVER_MAJOR 2 59#define KMS_DRIVER_MAJOR 2
59#define KMS_DRIVER_MINOR 12 60#define KMS_DRIVER_MINOR 13
60#define KMS_DRIVER_PATCHLEVEL 0 61#define KMS_DRIVER_PATCHLEVEL 0
61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 62int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
62int radeon_driver_unload_kms(struct drm_device *dev); 63int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
84 struct drm_file *file_priv); 85 struct drm_file *file_priv);
85int radeon_gem_object_init(struct drm_gem_object *obj); 86int radeon_gem_object_init(struct drm_gem_object *obj);
86void radeon_gem_object_free(struct drm_gem_object *obj); 87void radeon_gem_object_free(struct drm_gem_object *obj);
88int radeon_gem_object_open(struct drm_gem_object *obj,
89 struct drm_file *file_priv);
90void radeon_gem_object_close(struct drm_gem_object *obj,
91 struct drm_file *file_priv);
87extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 92extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
88 int *vpos, int *hpos); 93 int *vpos, int *hpos);
89extern struct drm_ioctl_desc radeon_ioctls_kms[]; 94extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
206MODULE_DEVICE_TABLE(pci, pciidlist); 211MODULE_DEVICE_TABLE(pci, pciidlist);
207#endif 212#endif
208 213
214static const struct file_operations radeon_driver_old_fops = {
215 .owner = THIS_MODULE,
216 .open = drm_open,
217 .release = drm_release,
218 .unlocked_ioctl = drm_ioctl,
219 .mmap = drm_mmap,
220 .poll = drm_poll,
221 .fasync = drm_fasync,
222 .read = drm_read,
223#ifdef CONFIG_COMPAT
224 .compat_ioctl = radeon_compat_ioctl,
225#endif
226 .llseek = noop_llseek,
227};
228
209static struct drm_driver driver_old = { 229static struct drm_driver driver_old = {
210 .driver_features = 230 .driver_features =
211 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 231 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
232 .reclaim_buffers = drm_core_reclaim_buffers, 252 .reclaim_buffers = drm_core_reclaim_buffers,
233 .ioctls = radeon_ioctls, 253 .ioctls = radeon_ioctls,
234 .dma_ioctl = radeon_cp_buffers, 254 .dma_ioctl = radeon_cp_buffers,
235 .fops = { 255 .fops = &radeon_driver_old_fops,
236 .owner = THIS_MODULE,
237 .open = drm_open,
238 .release = drm_release,
239 .unlocked_ioctl = drm_ioctl,
240 .mmap = drm_mmap,
241 .poll = drm_poll,
242 .fasync = drm_fasync,
243 .read = drm_read,
244#ifdef CONFIG_COMPAT
245 .compat_ioctl = radeon_compat_ioctl,
246#endif
247 .llseek = noop_llseek,
248 },
249
250 .name = DRIVER_NAME, 256 .name = DRIVER_NAME,
251 .desc = DRIVER_DESC, 257 .desc = DRIVER_DESC,
252 .date = DRIVER_DATE, 258 .date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
304 return radeon_resume_kms(dev); 310 return radeon_resume_kms(dev);
305} 311}
306 312
313static const struct file_operations radeon_driver_kms_fops = {
314 .owner = THIS_MODULE,
315 .open = drm_open,
316 .release = drm_release,
317 .unlocked_ioctl = drm_ioctl,
318 .mmap = radeon_mmap,
319 .poll = drm_poll,
320 .fasync = drm_fasync,
321 .read = drm_read,
322#ifdef CONFIG_COMPAT
323 .compat_ioctl = radeon_kms_compat_ioctl,
324#endif
325};
326
307static struct drm_driver kms_driver = { 327static struct drm_driver kms_driver = {
308 .driver_features = 328 .driver_features =
309 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 329 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
335 .ioctls = radeon_ioctls_kms, 355 .ioctls = radeon_ioctls_kms,
336 .gem_init_object = radeon_gem_object_init, 356 .gem_init_object = radeon_gem_object_init,
337 .gem_free_object = radeon_gem_object_free, 357 .gem_free_object = radeon_gem_object_free,
358 .gem_open_object = radeon_gem_object_open,
359 .gem_close_object = radeon_gem_object_close,
338 .dma_ioctl = radeon_dma_ioctl_kms, 360 .dma_ioctl = radeon_dma_ioctl_kms,
339 .dumb_create = radeon_mode_dumb_create, 361 .dumb_create = radeon_mode_dumb_create,
340 .dumb_map_offset = radeon_mode_dumb_mmap, 362 .dumb_map_offset = radeon_mode_dumb_mmap,
341 .dumb_destroy = radeon_mode_dumb_destroy, 363 .dumb_destroy = radeon_mode_dumb_destroy,
342 .fops = { 364 .fops = &radeon_driver_kms_fops,
343 .owner = THIS_MODULE,
344 .open = drm_open,
345 .release = drm_release,
346 .unlocked_ioctl = drm_ioctl,
347 .mmap = radeon_mmap,
348 .poll = drm_poll,
349 .fasync = drm_fasync,
350 .read = drm_read,
351#ifdef CONFIG_COMPAT
352 .compat_ioctl = radeon_kms_compat_ioctl,
353#endif
354 },
355
356 .name = DRIVER_NAME, 365 .name = DRIVER_NAME,
357 .desc = DRIVER_DESC, 366 .desc = DRIVER_DESC,
358 .date = DRIVER_DATE, 367 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0b7b486c97e..cf2bf35b56b 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
103} 103}
104 104
105static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, 105static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
106 struct drm_mode_fb_cmd *mode_cmd, 106 struct drm_mode_fb_cmd2 *mode_cmd,
107 struct drm_gem_object **gobj_p) 107 struct drm_gem_object **gobj_p)
108{ 108{
109 struct radeon_device *rdev = rfbdev->rdev; 109 struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
114 int ret; 114 int ret;
115 int aligned_size, size; 115 int aligned_size, size;
116 int height = mode_cmd->height; 116 int height = mode_cmd->height;
117 u32 bpp, depth;
118
119 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
117 120
118 /* need to align pitch with crtc limits */ 121 /* need to align pitch with crtc limits */
119 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); 122 mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
123 fb_tiled) * ((bpp + 1) / 8);
120 124
121 if (rdev->family >= CHIP_R600) 125 if (rdev->family >= CHIP_R600)
122 height = ALIGN(mode_cmd->height, 8); 126 height = ALIGN(mode_cmd->height, 8);
123 size = mode_cmd->pitch * height; 127 size = mode_cmd->pitches[0] * height;
124 aligned_size = ALIGN(size, PAGE_SIZE); 128 aligned_size = ALIGN(size, PAGE_SIZE);
125 ret = radeon_gem_object_create(rdev, aligned_size, 0, 129 ret = radeon_gem_object_create(rdev, aligned_size, 0,
126 RADEON_GEM_DOMAIN_VRAM, 130 RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
137 tiling_flags = RADEON_TILING_MACRO; 141 tiling_flags = RADEON_TILING_MACRO;
138 142
139#ifdef __BIG_ENDIAN 143#ifdef __BIG_ENDIAN
140 switch (mode_cmd->bpp) { 144 switch (bpp) {
141 case 32: 145 case 32:
142 tiling_flags |= RADEON_TILING_SWAP_32BIT; 146 tiling_flags |= RADEON_TILING_SWAP_32BIT;
143 break; 147 break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
151 if (tiling_flags) { 155 if (tiling_flags) {
152 ret = radeon_bo_set_tiling_flags(rbo, 156 ret = radeon_bo_set_tiling_flags(rbo,
153 tiling_flags | RADEON_TILING_SURFACE, 157 tiling_flags | RADEON_TILING_SURFACE,
154 mode_cmd->pitch); 158 mode_cmd->pitches[0]);
155 if (ret) 159 if (ret)
156 dev_err(rdev->dev, "FB failed to set tiling flags\n"); 160 dev_err(rdev->dev, "FB failed to set tiling flags\n");
157 } 161 }
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
187 struct radeon_device *rdev = rfbdev->rdev; 191 struct radeon_device *rdev = rfbdev->rdev;
188 struct fb_info *info; 192 struct fb_info *info;
189 struct drm_framebuffer *fb = NULL; 193 struct drm_framebuffer *fb = NULL;
190 struct drm_mode_fb_cmd mode_cmd; 194 struct drm_mode_fb_cmd2 mode_cmd;
191 struct drm_gem_object *gobj = NULL; 195 struct drm_gem_object *gobj = NULL;
192 struct radeon_bo *rbo = NULL; 196 struct radeon_bo *rbo = NULL;
193 struct device *device = &rdev->pdev->dev; 197 struct device *device = &rdev->pdev->dev;
@@ -201,8 +205,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
201 if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) 205 if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
202 sizes->surface_bpp = 32; 206 sizes->surface_bpp = 32;
203 207
204 mode_cmd.bpp = sizes->surface_bpp; 208 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
205 mode_cmd.depth = sizes->surface_depth; 209 sizes->surface_depth);
206 210
207 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); 211 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
208 rbo = gem_to_radeon_bo(gobj); 212 rbo = gem_to_radeon_bo(gobj);
@@ -228,7 +232,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
228 232
229 strcpy(info->fix.id, "radeondrmfb"); 233 strcpy(info->fix.id, "radeondrmfb");
230 234
231 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 235 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
232 236
233 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 237 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
234 info->fbops = &radeonfb_ops; 238 info->fbops = &radeonfb_ops;
@@ -271,7 +275,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
271 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); 275 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
272 DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); 276 DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
273 DRM_INFO("fb depth is %d\n", fb->depth); 277 DRM_INFO("fb depth is %d\n", fb->depth);
274 DRM_INFO(" pitch is %d\n", fb->pitch); 278 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
275 279
276 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 280 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
277 return 0; 281 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9ed8a..64ea3dd9e6f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,32 +40,24 @@
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h" 41#include "radeon_trace.h"
42 42
43static void radeon_fence_write(struct radeon_device *rdev, u32 seq) 43static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44{ 44{
45 if (rdev->wb.enabled) { 45 if (rdev->wb.enabled) {
46 u32 scratch_index; 46 *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
47 if (rdev->wb.use_event) 47 } else {
48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 48 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49 else 49 }
50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
52 } else
53 WREG32(rdev->fence_drv.scratch_reg, seq);
54} 50}
55 51
56static u32 radeon_fence_read(struct radeon_device *rdev) 52static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
57{ 53{
58 u32 seq; 54 u32 seq = 0;
59 55
60 if (rdev->wb.enabled) { 56 if (rdev->wb.enabled) {
61 u32 scratch_index; 57 seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
62 if (rdev->wb.use_event) 58 } else {
63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 59 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
64 else 60 }
65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
67 } else
68 seq = RREG32(rdev->fence_drv.scratch_reg);
69 return seq; 61 return seq;
70} 62}
71 63
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
73{ 65{
74 unsigned long irq_flags; 66 unsigned long irq_flags;
75 67
76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 68 write_lock_irqsave(&rdev->fence_lock, irq_flags);
77 if (fence->emited) { 69 if (fence->emitted) {
78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 70 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
79 return 0; 71 return 0;
80 } 72 }
81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 73 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
82 if (!rdev->cp.ready) 74 if (!rdev->ring[fence->ring].ready)
83 /* FIXME: cp is not running assume everythings is done right 75 /* FIXME: cp is not running assume everythings is done right
84 * away 76 * away
85 */ 77 */
86 radeon_fence_write(rdev, fence->seq); 78 radeon_fence_write(rdev, fence->seq, fence->ring);
87 else 79 else
88 radeon_fence_ring_emit(rdev, fence); 80 radeon_fence_ring_emit(rdev, fence->ring, fence);
89 81
90 trace_radeon_fence_emit(rdev->ddev, fence->seq); 82 trace_radeon_fence_emit(rdev->ddev, fence->seq);
91 fence->emited = true; 83 fence->emitted = true;
92 list_move_tail(&fence->list, &rdev->fence_drv.emited); 84 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 85 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
94 return 0; 86 return 0;
95} 87}
96 88
97static bool radeon_fence_poll_locked(struct radeon_device *rdev) 89static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
98{ 90{
99 struct radeon_fence *fence; 91 struct radeon_fence *fence;
100 struct list_head *i, *n; 92 struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
102 bool wake = false; 94 bool wake = false;
103 unsigned long cjiffies; 95 unsigned long cjiffies;
104 96
105 seq = radeon_fence_read(rdev); 97 seq = radeon_fence_read(rdev, ring);
106 if (seq != rdev->fence_drv.last_seq) { 98 if (seq != rdev->fence_drv[ring].last_seq) {
107 rdev->fence_drv.last_seq = seq; 99 rdev->fence_drv[ring].last_seq = seq;
108 rdev->fence_drv.last_jiffies = jiffies; 100 rdev->fence_drv[ring].last_jiffies = jiffies;
109 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 101 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
110 } else { 102 } else {
111 cjiffies = jiffies; 103 cjiffies = jiffies;
112 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { 104 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
113 cjiffies -= rdev->fence_drv.last_jiffies; 105 cjiffies -= rdev->fence_drv[ring].last_jiffies;
114 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { 106 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
115 /* update the timeout */ 107 /* update the timeout */
116 rdev->fence_drv.last_timeout -= cjiffies; 108 rdev->fence_drv[ring].last_timeout -= cjiffies;
117 } else { 109 } else {
118 /* the 500ms timeout is elapsed we should test 110 /* the 500ms timeout is elapsed we should test
119 * for GPU lockup 111 * for GPU lockup
120 */ 112 */
121 rdev->fence_drv.last_timeout = 1; 113 rdev->fence_drv[ring].last_timeout = 1;
122 } 114 }
123 } else { 115 } else {
124 /* wrap around update last jiffies, we will just wait 116 /* wrap around update last jiffies, we will just wait
125 * a little longer 117 * a little longer
126 */ 118 */
127 rdev->fence_drv.last_jiffies = cjiffies; 119 rdev->fence_drv[ring].last_jiffies = cjiffies;
128 } 120 }
129 return false; 121 return false;
130 } 122 }
131 n = NULL; 123 n = NULL;
132 list_for_each(i, &rdev->fence_drv.emited) { 124 list_for_each(i, &rdev->fence_drv[ring].emitted) {
133 fence = list_entry(i, struct radeon_fence, list); 125 fence = list_entry(i, struct radeon_fence, list);
134 if (fence->seq == seq) { 126 if (fence->seq == seq) {
135 n = i; 127 n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
141 i = n; 133 i = n;
142 do { 134 do {
143 n = i->prev; 135 n = i->prev;
144 list_move_tail(i, &rdev->fence_drv.signaled); 136 list_move_tail(i, &rdev->fence_drv[ring].signaled);
145 fence = list_entry(i, struct radeon_fence, list); 137 fence = list_entry(i, struct radeon_fence, list);
146 fence->signaled = true; 138 fence->signaled = true;
147 i = n; 139 i = n;
148 } while (i != &rdev->fence_drv.emited); 140 } while (i != &rdev->fence_drv[ring].emitted);
149 wake = true; 141 wake = true;
150 } 142 }
151 return wake; 143 return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
157 struct radeon_fence *fence; 149 struct radeon_fence *fence;
158 150
159 fence = container_of(kref, struct radeon_fence, kref); 151 fence = container_of(kref, struct radeon_fence, kref);
160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 152 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
161 list_del(&fence->list); 153 list_del(&fence->list);
162 fence->emited = false; 154 fence->emitted = false;
163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 155 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
156 if (fence->semaphore)
157 radeon_semaphore_free(fence->rdev, fence->semaphore);
164 kfree(fence); 158 kfree(fence);
165} 159}
166 160
167int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) 161int radeon_fence_create(struct radeon_device *rdev,
162 struct radeon_fence **fence,
163 int ring)
168{ 164{
169 unsigned long irq_flags; 165 unsigned long irq_flags;
170 166
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
174 } 170 }
175 kref_init(&((*fence)->kref)); 171 kref_init(&((*fence)->kref));
176 (*fence)->rdev = rdev; 172 (*fence)->rdev = rdev;
177 (*fence)->emited = false; 173 (*fence)->emitted = false;
178 (*fence)->signaled = false; 174 (*fence)->signaled = false;
179 (*fence)->seq = 0; 175 (*fence)->seq = 0;
176 (*fence)->ring = ring;
177 (*fence)->semaphore = NULL;
180 INIT_LIST_HEAD(&(*fence)->list); 178 INIT_LIST_HEAD(&(*fence)->list);
181 179
182 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 180 write_lock_irqsave(&rdev->fence_lock, irq_flags);
183 list_add_tail(&(*fence)->list, &rdev->fence_drv.created); 181 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
184 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 182 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
185 return 0; 183 return 0;
186} 184}
187 185
188
189bool radeon_fence_signaled(struct radeon_fence *fence) 186bool radeon_fence_signaled(struct radeon_fence *fence)
190{ 187{
191 unsigned long irq_flags; 188 unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
197 if (fence->rdev->gpu_lockup) 194 if (fence->rdev->gpu_lockup)
198 return true; 195 return true;
199 196
200 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 197 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
201 signaled = fence->signaled; 198 signaled = fence->signaled;
202 /* if we are shuting down report all fence as signaled */ 199 /* if we are shuting down report all fence as signaled */
203 if (fence->rdev->shutdown) { 200 if (fence->rdev->shutdown) {
204 signaled = true; 201 signaled = true;
205 } 202 }
206 if (!fence->emited) { 203 if (!fence->emitted) {
207 WARN(1, "Querying an unemited fence : %p !\n", fence); 204 WARN(1, "Querying an unemitted fence : %p !\n", fence);
208 signaled = true; 205 signaled = true;
209 } 206 }
210 if (!signaled) { 207 if (!signaled) {
211 radeon_fence_poll_locked(fence->rdev); 208 radeon_fence_poll_locked(fence->rdev, fence->ring);
212 signaled = fence->signaled; 209 signaled = fence->signaled;
213 } 210 }
214 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 211 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
215 return signaled; 212 return signaled;
216} 213}
217 214
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
230 if (radeon_fence_signaled(fence)) { 227 if (radeon_fence_signaled(fence)) {
231 return 0; 228 return 0;
232 } 229 }
233 timeout = rdev->fence_drv.last_timeout; 230 timeout = rdev->fence_drv[fence->ring].last_timeout;
234retry: 231retry:
235 /* save current sequence used to check for GPU lockup */ 232 /* save current sequence used to check for GPU lockup */
236 seq = rdev->fence_drv.last_seq; 233 seq = rdev->fence_drv[fence->ring].last_seq;
237 trace_radeon_fence_wait_begin(rdev->ddev, seq); 234 trace_radeon_fence_wait_begin(rdev->ddev, seq);
238 if (intr) { 235 if (intr) {
239 radeon_irq_kms_sw_irq_get(rdev); 236 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
240 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 237 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
241 radeon_fence_signaled(fence), timeout); 238 radeon_fence_signaled(fence), timeout);
242 radeon_irq_kms_sw_irq_put(rdev); 239 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
243 if (unlikely(r < 0)) { 240 if (unlikely(r < 0)) {
244 return r; 241 return r;
245 } 242 }
246 } else { 243 } else {
247 radeon_irq_kms_sw_irq_get(rdev); 244 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
248 r = wait_event_timeout(rdev->fence_drv.queue, 245 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
249 radeon_fence_signaled(fence), timeout); 246 radeon_fence_signaled(fence), timeout);
250 radeon_irq_kms_sw_irq_put(rdev); 247 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
251 } 248 }
252 trace_radeon_fence_wait_end(rdev->ddev, seq); 249 trace_radeon_fence_wait_end(rdev->ddev, seq);
253 if (unlikely(!radeon_fence_signaled(fence))) { 250 if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
258 timeout = r; 255 timeout = r;
259 goto retry; 256 goto retry;
260 } 257 }
261 /* don't protect read access to rdev->fence_drv.last_seq 258 /* don't protect read access to rdev->fence_drv[t].last_seq
262 * if we experiencing a lockup the value doesn't change 259 * if we experiencing a lockup the value doesn't change
263 */ 260 */
264 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 261 if (seq == rdev->fence_drv[fence->ring].last_seq &&
262 radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
265 /* good news we believe it's a lockup */ 263 /* good news we believe it's a lockup */
266 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 264 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
267 fence->seq, seq); 265 fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
272 r = radeon_gpu_reset(rdev); 270 r = radeon_gpu_reset(rdev);
273 if (r) 271 if (r)
274 return r; 272 return r;
275 radeon_fence_write(rdev, fence->seq); 273 radeon_fence_write(rdev, fence->seq, fence->ring);
276 rdev->gpu_lockup = false; 274 rdev->gpu_lockup = false;
277 } 275 }
278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 276 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 277 write_lock_irqsave(&rdev->fence_lock, irq_flags);
280 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 278 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
281 rdev->fence_drv.last_jiffies = jiffies; 279 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
282 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 280 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
283 goto retry; 281 goto retry;
284 } 282 }
285 return 0; 283 return 0;
286} 284}
287 285
288int radeon_fence_wait_next(struct radeon_device *rdev) 286int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
289{ 287{
290 unsigned long irq_flags; 288 unsigned long irq_flags;
291 struct radeon_fence *fence; 289 struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
294 if (rdev->gpu_lockup) { 292 if (rdev->gpu_lockup) {
295 return 0; 293 return 0;
296 } 294 }
297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 295 write_lock_irqsave(&rdev->fence_lock, irq_flags);
298 if (list_empty(&rdev->fence_drv.emited)) { 296 if (list_empty(&rdev->fence_drv[ring].emitted)) {
299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 297 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
300 return 0; 298 return 0;
301 } 299 }
302 fence = list_entry(rdev->fence_drv.emited.next, 300 fence = list_entry(rdev->fence_drv[ring].emitted.next,
303 struct radeon_fence, list); 301 struct radeon_fence, list);
304 radeon_fence_ref(fence); 302 radeon_fence_ref(fence);
305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 303 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
306 r = radeon_fence_wait(fence, false); 304 r = radeon_fence_wait(fence, false);
307 radeon_fence_unref(&fence); 305 radeon_fence_unref(&fence);
308 return r; 306 return r;
309} 307}
310 308
311int radeon_fence_wait_last(struct radeon_device *rdev) 309int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
312{ 310{
313 unsigned long irq_flags; 311 unsigned long irq_flags;
314 struct radeon_fence *fence; 312 struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
317 if (rdev->gpu_lockup) { 315 if (rdev->gpu_lockup) {
318 return 0; 316 return 0;
319 } 317 }
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 318 write_lock_irqsave(&rdev->fence_lock, irq_flags);
321 if (list_empty(&rdev->fence_drv.emited)) { 319 if (list_empty(&rdev->fence_drv[ring].emitted)) {
322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 320 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
323 return 0; 321 return 0;
324 } 322 }
325 fence = list_entry(rdev->fence_drv.emited.prev, 323 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
326 struct radeon_fence, list); 324 struct radeon_fence, list);
327 radeon_fence_ref(fence); 325 radeon_fence_ref(fence);
328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 326 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
329 r = radeon_fence_wait(fence, false); 327 r = radeon_fence_wait(fence, false);
330 radeon_fence_unref(&fence); 328 radeon_fence_unref(&fence);
331 return r; 329 return r;
@@ -347,39 +345,95 @@ void radeon_fence_unref(struct radeon_fence **fence)
347 } 345 }
348} 346}
349 347
350void radeon_fence_process(struct radeon_device *rdev) 348void radeon_fence_process(struct radeon_device *rdev, int ring)
351{ 349{
352 unsigned long irq_flags; 350 unsigned long irq_flags;
353 bool wake; 351 bool wake;
354 352
355 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 353 write_lock_irqsave(&rdev->fence_lock, irq_flags);
356 wake = radeon_fence_poll_locked(rdev); 354 wake = radeon_fence_poll_locked(rdev, ring);
357 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 355 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
358 if (wake) { 356 if (wake) {
359 wake_up_all(&rdev->fence_drv.queue); 357 wake_up_all(&rdev->fence_drv[ring].queue);
360 } 358 }
361} 359}
362 360
363int radeon_fence_driver_init(struct radeon_device *rdev) 361int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
362{
363 unsigned long irq_flags;
364 int not_processed = 0;
365
366 read_lock_irqsave(&rdev->fence_lock, irq_flags);
367 if (!rdev->fence_drv[ring].initialized)
368 return 0;
369
370 if (!list_empty(&rdev->fence_drv[ring].emitted)) {
371 struct list_head *ptr;
372 list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
373 /* count up to 3, that's enought info */
374 if (++not_processed >= 3)
375 break;
376 }
377 }
378 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
379 return not_processed;
380}
381
382int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
364{ 383{
365 unsigned long irq_flags; 384 unsigned long irq_flags;
385 uint64_t index;
366 int r; 386 int r;
367 387
368 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 388 write_lock_irqsave(&rdev->fence_lock, irq_flags);
369 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); 389 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
370 if (r) { 390 if (rdev->wb.use_event) {
371 dev_err(rdev->dev, "fence failed to get scratch register\n"); 391 rdev->fence_drv[ring].scratch_reg = 0;
372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 392 index = R600_WB_EVENT_OFFSET + ring * 4;
373 return r; 393 } else {
394 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
395 if (r) {
396 dev_err(rdev->dev, "fence failed to get scratch register\n");
397 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
398 return r;
399 }
400 index = RADEON_WB_SCRATCH_OFFSET +
401 rdev->fence_drv[ring].scratch_reg -
402 rdev->scratch.reg_base;
374 } 403 }
375 radeon_fence_write(rdev, 0); 404 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
376 atomic_set(&rdev->fence_drv.seq, 0); 405 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
377 INIT_LIST_HEAD(&rdev->fence_drv.created); 406 radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
378 INIT_LIST_HEAD(&rdev->fence_drv.emited); 407 rdev->fence_drv[ring].initialized = true;
379 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 408 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
380 init_waitqueue_head(&rdev->fence_drv.queue); 409 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
381 rdev->fence_drv.initialized = true; 410 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
382 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 411 return 0;
412}
413
414static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
415{
416 rdev->fence_drv[ring].scratch_reg = -1;
417 rdev->fence_drv[ring].cpu_addr = NULL;
418 rdev->fence_drv[ring].gpu_addr = 0;
419 atomic_set(&rdev->fence_drv[ring].seq, 0);
420 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
421 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
422 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
423 init_waitqueue_head(&rdev->fence_drv[ring].queue);
424 rdev->fence_drv[ring].initialized = false;
425}
426
427int radeon_fence_driver_init(struct radeon_device *rdev)
428{
429 unsigned long irq_flags;
430 int ring;
431
432 write_lock_irqsave(&rdev->fence_lock, irq_flags);
433 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
434 radeon_fence_driver_init_ring(rdev, ring);
435 }
436 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
383 if (radeon_debugfs_fence_init(rdev)) { 437 if (radeon_debugfs_fence_init(rdev)) {
384 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 438 dev_err(rdev->dev, "fence debugfs file creation failed\n");
385 } 439 }
@@ -389,14 +443,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
389void radeon_fence_driver_fini(struct radeon_device *rdev) 443void radeon_fence_driver_fini(struct radeon_device *rdev)
390{ 444{
391 unsigned long irq_flags; 445 unsigned long irq_flags;
392 446 int ring;
393 if (!rdev->fence_drv.initialized) 447
394 return; 448 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
395 wake_up_all(&rdev->fence_drv.queue); 449 if (!rdev->fence_drv[ring].initialized)
396 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 450 continue;
397 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); 451 radeon_fence_wait_last(rdev, ring);
398 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 452 wake_up_all(&rdev->fence_drv[ring].queue);
399 rdev->fence_drv.initialized = false; 453 write_lock_irqsave(&rdev->fence_lock, irq_flags);
454 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
455 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
456 rdev->fence_drv[ring].initialized = false;
457 }
400} 458}
401 459
402 460
@@ -410,14 +468,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
410 struct drm_device *dev = node->minor->dev; 468 struct drm_device *dev = node->minor->dev;
411 struct radeon_device *rdev = dev->dev_private; 469 struct radeon_device *rdev = dev->dev_private;
412 struct radeon_fence *fence; 470 struct radeon_fence *fence;
413 471 int i;
414 seq_printf(m, "Last signaled fence 0x%08X\n", 472
415 radeon_fence_read(rdev)); 473 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
416 if (!list_empty(&rdev->fence_drv.emited)) { 474 if (!rdev->fence_drv[i].initialized)
417 fence = list_entry(rdev->fence_drv.emited.prev, 475 continue;
418 struct radeon_fence, list); 476
419 seq_printf(m, "Last emited fence %p with 0x%08X\n", 477 seq_printf(m, "--- ring %d ---\n", i);
420 fence, fence->seq); 478 seq_printf(m, "Last signaled fence 0x%08X\n",
479 radeon_fence_read(rdev, i));
480 if (!list_empty(&rdev->fence_drv[i].emitted)) {
481 fence = list_entry(rdev->fence_drv[i].emitted.prev,
482 struct radeon_fence, list);
483 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
484 fence, fence->seq);
485 }
421 } 486 }
422 return 0; 487 return 0;
423} 488}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index ba7ab79e12c..010dad8b66a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
158 for (i = 0; i < pages; i++, p++) { 158 for (i = 0; i < pages; i++, p++) {
159 if (rdev->gart.pages[p]) { 159 if (rdev->gart.pages[p]) {
160 if (!rdev->gart.ttm_alloced[p])
161 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
162 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
163 rdev->gart.pages[p] = NULL; 160 rdev->gart.pages[p] = NULL;
164 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 161 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
165 page_base = rdev->gart.pages_addr[p]; 162 page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
191 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 188 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
192 189
193 for (i = 0; i < pages; i++, p++) { 190 for (i = 0; i < pages; i++, p++) {
194 /* we reverted the patch using dma_addr in TTM for now but this 191 rdev->gart.pages_addr[p] = dma_addr[i];
195 * code stops building on alpha so just comment it out for now */
196 if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
197 rdev->gart.ttm_alloced[p] = true;
198 rdev->gart.pages_addr[p] = dma_addr[i];
199 } else {
200 /* we need to support large memory configurations */
201 /* assume that unbind have already been call on the range */
202 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
203 0, PAGE_SIZE,
204 PCI_DMA_BIDIRECTIONAL);
205 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
206 /* FIXME: failed to map page (return -ENOMEM?) */
207 radeon_gart_unbind(rdev, offset, pages);
208 return -ENOMEM;
209 }
210 }
211 rdev->gart.pages[p] = pagelist[i]; 192 rdev->gart.pages[p] = pagelist[i];
212 if (rdev->gart.ptr) { 193 if (rdev->gart.ptr) {
213 page_base = rdev->gart.pages_addr[p]; 194 page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
274 radeon_gart_fini(rdev); 255 radeon_gart_fini(rdev);
275 return -ENOMEM; 256 return -ENOMEM;
276 } 257 }
277 rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
278 rdev->gart.num_cpu_pages, GFP_KERNEL);
279 if (rdev->gart.ttm_alloced == NULL) {
280 radeon_gart_fini(rdev);
281 return -ENOMEM;
282 }
283 /* set GART entry to point to the dummy page by default */ 258 /* set GART entry to point to the dummy page by default */
284 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 259 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
285 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 260 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
296 rdev->gart.ready = false; 271 rdev->gart.ready = false;
297 kfree(rdev->gart.pages); 272 kfree(rdev->gart.pages);
298 kfree(rdev->gart.pages_addr); 273 kfree(rdev->gart.pages_addr);
299 kfree(rdev->gart.ttm_alloced);
300 rdev->gart.pages = NULL; 274 rdev->gart.pages = NULL;
301 rdev->gart.pages_addr = NULL; 275 rdev->gart.pages_addr = NULL;
302 rdev->gart.ttm_alloced = NULL;
303 276
304 radeon_dummy_page_fini(rdev); 277 radeon_dummy_page_fini(rdev);
305} 278}
279
280/*
281 * vm helpers
282 *
283 * TODO bind a default page at vm initialization for default address
284 */
285int radeon_vm_manager_init(struct radeon_device *rdev)
286{
287 int r;
288
289 rdev->vm_manager.enabled = false;
290
291 /* mark first vm as always in use, it's the system one */
292 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
293 rdev->vm_manager.max_pfn * 8,
294 RADEON_GEM_DOMAIN_VRAM);
295 if (r) {
296 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
297 (rdev->vm_manager.max_pfn * 8) >> 10);
298 return r;
299 }
300
301 r = rdev->vm_manager.funcs->init(rdev);
302 if (r == 0)
303 rdev->vm_manager.enabled = true;
304
305 return r;
306}
307
308/* cs mutex must be lock */
309static void radeon_vm_unbind_locked(struct radeon_device *rdev,
310 struct radeon_vm *vm)
311{
312 struct radeon_bo_va *bo_va;
313
314 if (vm->id == -1) {
315 return;
316 }
317
318 /* wait for vm use to end */
319 if (vm->fence) {
320 radeon_fence_wait(vm->fence, false);
321 radeon_fence_unref(&vm->fence);
322 }
323
324 /* hw unbind */
325 rdev->vm_manager.funcs->unbind(rdev, vm);
326 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
327 list_del_init(&vm->list);
328 vm->id = -1;
329 radeon_sa_bo_free(rdev, &vm->sa_bo);
330 vm->pt = NULL;
331
332 list_for_each_entry(bo_va, &vm->va, vm_list) {
333 bo_va->valid = false;
334 }
335}
336
337void radeon_vm_manager_fini(struct radeon_device *rdev)
338{
339 if (rdev->vm_manager.sa_manager.bo == NULL)
340 return;
341 radeon_vm_manager_suspend(rdev);
342 rdev->vm_manager.funcs->fini(rdev);
343 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
344 rdev->vm_manager.enabled = false;
345}
346
347int radeon_vm_manager_start(struct radeon_device *rdev)
348{
349 if (rdev->vm_manager.sa_manager.bo == NULL) {
350 return -EINVAL;
351 }
352 return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
353}
354
355int radeon_vm_manager_suspend(struct radeon_device *rdev)
356{
357 struct radeon_vm *vm, *tmp;
358
359 radeon_mutex_lock(&rdev->cs_mutex);
360 /* unbind all active vm */
361 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
362 radeon_vm_unbind_locked(rdev, vm);
363 }
364 rdev->vm_manager.funcs->fini(rdev);
365 radeon_mutex_unlock(&rdev->cs_mutex);
366 return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
367}
368
369/* cs mutex must be lock */
370void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
371{
372 mutex_lock(&vm->mutex);
373 radeon_vm_unbind_locked(rdev, vm);
374 mutex_unlock(&vm->mutex);
375}
376
377/* cs mutex must be lock & vm mutex must be lock */
378int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
379{
380 struct radeon_vm *vm_evict;
381 unsigned i;
382 int id = -1, r;
383
384 if (vm == NULL) {
385 return -EINVAL;
386 }
387
388 if (vm->id != -1) {
389 /* update lru */
390 list_del_init(&vm->list);
391 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
392 return 0;
393 }
394
395retry:
396 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
397 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
398 RADEON_GPU_PAGE_SIZE);
399 if (r) {
400 if (list_empty(&rdev->vm_manager.lru_vm)) {
401 return r;
402 }
403 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
404 radeon_vm_unbind(rdev, vm_evict);
405 goto retry;
406 }
407 vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
408 vm->pt += (vm->sa_bo.offset >> 3);
409 vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
410 vm->pt_gpu_addr += vm->sa_bo.offset;
411 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
412
413retry_id:
414 /* search for free vm */
415 for (i = 0; i < rdev->vm_manager.nvm; i++) {
416 if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
417 id = i;
418 break;
419 }
420 }
421 /* evict vm if necessary */
422 if (id == -1) {
423 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
424 radeon_vm_unbind(rdev, vm_evict);
425 goto retry_id;
426 }
427
428 /* do hw bind */
429 r = rdev->vm_manager.funcs->bind(rdev, vm, id);
430 if (r) {
431 radeon_sa_bo_free(rdev, &vm->sa_bo);
432 return r;
433 }
434 rdev->vm_manager.use_bitmap |= 1 << id;
435 vm->id = id;
436 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
437 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
438 &rdev->ib_pool.sa_manager.bo->tbo.mem);
439}
440
441/* object have to be reserved */
442int radeon_vm_bo_add(struct radeon_device *rdev,
443 struct radeon_vm *vm,
444 struct radeon_bo *bo,
445 uint64_t offset,
446 uint32_t flags)
447{
448 struct radeon_bo_va *bo_va, *tmp;
449 struct list_head *head;
450 uint64_t size = radeon_bo_size(bo), last_offset = 0;
451 unsigned last_pfn;
452
453 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
454 if (bo_va == NULL) {
455 return -ENOMEM;
456 }
457 bo_va->vm = vm;
458 bo_va->bo = bo;
459 bo_va->soffset = offset;
460 bo_va->eoffset = offset + size;
461 bo_va->flags = flags;
462 bo_va->valid = false;
463 INIT_LIST_HEAD(&bo_va->bo_list);
464 INIT_LIST_HEAD(&bo_va->vm_list);
465 /* make sure object fit at this offset */
466 if (bo_va->soffset >= bo_va->eoffset) {
467 kfree(bo_va);
468 return -EINVAL;
469 }
470
471 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
472 if (last_pfn > rdev->vm_manager.max_pfn) {
473 kfree(bo_va);
474 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
475 last_pfn, rdev->vm_manager.max_pfn);
476 return -EINVAL;
477 }
478
479 mutex_lock(&vm->mutex);
480 if (last_pfn > vm->last_pfn) {
481 /* grow va space 32M by 32M */
482 unsigned align = ((32 << 20) >> 12) - 1;
483 radeon_mutex_lock(&rdev->cs_mutex);
484 radeon_vm_unbind_locked(rdev, vm);
485 radeon_mutex_unlock(&rdev->cs_mutex);
486 vm->last_pfn = (last_pfn + align) & ~align;
487 }
488 head = &vm->va;
489 last_offset = 0;
490 list_for_each_entry(tmp, &vm->va, vm_list) {
491 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
492 /* bo can be added before this one */
493 break;
494 }
495 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
496 /* bo and tmp overlap, invalid offset */
497 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
498 bo, (unsigned)bo_va->soffset, tmp->bo,
499 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
500 kfree(bo_va);
501 mutex_unlock(&vm->mutex);
502 return -EINVAL;
503 }
504 last_offset = tmp->eoffset;
505 head = &tmp->vm_list;
506 }
507 list_add(&bo_va->vm_list, head);
508 list_add_tail(&bo_va->bo_list, &bo->va);
509 mutex_unlock(&vm->mutex);
510 return 0;
511}
512
513static u64 radeon_vm_get_addr(struct radeon_device *rdev,
514 struct ttm_mem_reg *mem,
515 unsigned pfn)
516{
517 u64 addr = 0;
518
519 switch (mem->mem_type) {
520 case TTM_PL_VRAM:
521 addr = (mem->start << PAGE_SHIFT);
522 addr += pfn * RADEON_GPU_PAGE_SIZE;
523 addr += rdev->vm_manager.vram_base_offset;
524 break;
525 case TTM_PL_TT:
526 /* offset inside page table */
527 addr = mem->start << PAGE_SHIFT;
528 addr += pfn * RADEON_GPU_PAGE_SIZE;
529 addr = addr >> PAGE_SHIFT;
530 /* page table offset */
531 addr = rdev->gart.pages_addr[addr];
532 /* in case cpu page size != gpu page size*/
533 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
534 break;
535 default:
536 break;
537 }
538 return addr;
539}
540
541/* object have to be reserved & cs mutex took & vm mutex took */
542int radeon_vm_bo_update_pte(struct radeon_device *rdev,
543 struct radeon_vm *vm,
544 struct radeon_bo *bo,
545 struct ttm_mem_reg *mem)
546{
547 struct radeon_bo_va *bo_va;
548 unsigned ngpu_pages, i;
549 uint64_t addr = 0, pfn;
550 uint32_t flags;
551
552 /* nothing to do if vm isn't bound */
553 if (vm->id == -1)
554 return 0;;
555
556 bo_va = radeon_bo_va(bo, vm);
557 if (bo_va == NULL) {
558 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
559 return -EINVAL;
560 }
561
562 if (bo_va->valid)
563 return 0;
564
565 ngpu_pages = radeon_bo_ngpu_pages(bo);
566 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
567 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
568 if (mem) {
569 if (mem->mem_type != TTM_PL_SYSTEM) {
570 bo_va->flags |= RADEON_VM_PAGE_VALID;
571 bo_va->valid = true;
572 }
573 if (mem->mem_type == TTM_PL_TT) {
574 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
575 }
576 }
577 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
578 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
579 for (i = 0, addr = 0; i < ngpu_pages; i++) {
580 if (mem && bo_va->valid) {
581 addr = radeon_vm_get_addr(rdev, mem, i);
582 }
583 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
584 }
585 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
586 return 0;
587}
588
589/* object have to be reserved */
590int radeon_vm_bo_rmv(struct radeon_device *rdev,
591 struct radeon_vm *vm,
592 struct radeon_bo *bo)
593{
594 struct radeon_bo_va *bo_va;
595
596 bo_va = radeon_bo_va(bo, vm);
597 if (bo_va == NULL)
598 return 0;
599
600 list_del(&bo_va->bo_list);
601 mutex_lock(&vm->mutex);
602 radeon_mutex_lock(&rdev->cs_mutex);
603 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
604 radeon_mutex_unlock(&rdev->cs_mutex);
605 list_del(&bo_va->vm_list);
606 mutex_unlock(&vm->mutex);
607
608 kfree(bo_va);
609 return 0;
610}
611
612void radeon_vm_bo_invalidate(struct radeon_device *rdev,
613 struct radeon_bo *bo)
614{
615 struct radeon_bo_va *bo_va;
616
617 BUG_ON(!atomic_read(&bo->tbo.reserved));
618 list_for_each_entry(bo_va, &bo->va, bo_list) {
619 bo_va->valid = false;
620 }
621}
622
623int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
624{
625 int r;
626
627 vm->id = -1;
628 vm->fence = NULL;
629 mutex_init(&vm->mutex);
630 INIT_LIST_HEAD(&vm->list);
631 INIT_LIST_HEAD(&vm->va);
632 vm->last_pfn = 0;
633 /* map the ib pool buffer at 0 in virtual address space, set
634 * read only
635 */
636 r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
637 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
638 return r;
639}
640
641void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
642{
643 struct radeon_bo_va *bo_va, *tmp;
644 int r;
645
646 mutex_lock(&vm->mutex);
647
648 radeon_mutex_lock(&rdev->cs_mutex);
649 radeon_vm_unbind_locked(rdev, vm);
650 radeon_mutex_unlock(&rdev->cs_mutex);
651
652 /* remove all bo */
653 r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
654 if (!r) {
655 bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
656 list_del_init(&bo_va->bo_list);
657 list_del_init(&bo_va->vm_list);
658 radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
659 kfree(bo_va);
660 }
661 if (!list_empty(&vm->va)) {
662 dev_err(rdev->dev, "still active bo inside vm\n");
663 }
664 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
665 list_del_init(&bo_va->vm_list);
666 r = radeon_bo_reserve(bo_va->bo, false);
667 if (!r) {
668 list_del_init(&bo_va->bo_list);
669 radeon_bo_unreserve(bo_va->bo);
670 kfree(bo_va);
671 }
672 }
673 mutex_unlock(&vm->mutex);
674}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa1ca2dea42..7337850af2f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
142 radeon_bo_force_delete(rdev); 142 radeon_bo_force_delete(rdev);
143} 143}
144 144
145/*
146 * Call from drm_gem_handle_create which appear in both new and open ioctl
147 * case.
148 */
149int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
150{
151 return 0;
152}
153
154void radeon_gem_object_close(struct drm_gem_object *obj,
155 struct drm_file *file_priv)
156{
157 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
158 struct radeon_device *rdev = rbo->rdev;
159 struct radeon_fpriv *fpriv = file_priv->driver_priv;
160 struct radeon_vm *vm = &fpriv->vm;
161 struct radeon_bo_va *bo_va, *tmp;
162
163 if (rdev->family < CHIP_CAYMAN) {
164 return;
165 }
166
167 if (radeon_bo_reserve(rbo, false)) {
168 return;
169 }
170 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
171 if (bo_va->vm == vm) {
172 /* remove from this vm address space */
173 mutex_lock(&vm->mutex);
174 list_del(&bo_va->vm_list);
175 mutex_unlock(&vm->mutex);
176 list_del(&bo_va->bo_list);
177 kfree(bo_va);
178 }
179 }
180 radeon_bo_unreserve(rbo);
181}
182
145 183
146/* 184/*
147 * GEM ioctls. 185 * GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
152 struct radeon_device *rdev = dev->dev_private; 190 struct radeon_device *rdev = dev->dev_private;
153 struct drm_radeon_gem_info *args = data; 191 struct drm_radeon_gem_info *args = data;
154 struct ttm_mem_type_manager *man; 192 struct ttm_mem_type_manager *man;
193 unsigned i;
155 194
156 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 195 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
157 196
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
160 if (rdev->stollen_vga_memory) 199 if (rdev->stollen_vga_memory)
161 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 200 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
162 args->vram_visible -= radeon_fbdev_total_size(rdev); 201 args->vram_visible -= radeon_fbdev_total_size(rdev);
163 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - 202 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
164 RADEON_IB_POOL_SIZE*64*1024; 203 for(i = 0; i < RADEON_NUM_RINGS; ++i)
204 args->gart_size -= rdev->ring[i].ring_size;
165 return 0; 205 return 0;
166} 206}
167 207
@@ -352,6 +392,109 @@ out:
352 return r; 392 return r;
353} 393}
354 394
395int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *filp)
397{
398 struct drm_radeon_gem_va *args = data;
399 struct drm_gem_object *gobj;
400 struct radeon_device *rdev = dev->dev_private;
401 struct radeon_fpriv *fpriv = filp->driver_priv;
402 struct radeon_bo *rbo;
403 struct radeon_bo_va *bo_va;
404 u32 invalid_flags;
405 int r = 0;
406
407 if (!rdev->vm_manager.enabled) {
408 args->operation = RADEON_VA_RESULT_ERROR;
409 return -ENOTTY;
410 }
411
412 /* !! DONT REMOVE !!
413 * We don't support vm_id yet, to be sure we don't have have broken
414 * userspace, reject anyone trying to use non 0 value thus moving
415 * forward we can use those fields without breaking existant userspace
416 */
417 if (args->vm_id) {
418 args->operation = RADEON_VA_RESULT_ERROR;
419 return -EINVAL;
420 }
421
422 if (args->offset < RADEON_VA_RESERVED_SIZE) {
423 dev_err(&dev->pdev->dev,
424 "offset 0x%lX is in reserved area 0x%X\n",
425 (unsigned long)args->offset,
426 RADEON_VA_RESERVED_SIZE);
427 args->operation = RADEON_VA_RESULT_ERROR;
428 return -EINVAL;
429 }
430
431 /* don't remove, we need to enforce userspace to set the snooped flag
432 * otherwise we will endup with broken userspace and we won't be able
433 * to enable this feature without adding new interface
434 */
435 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
436 if ((args->flags & invalid_flags)) {
437 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
438 args->flags, invalid_flags);
439 args->operation = RADEON_VA_RESULT_ERROR;
440 return -EINVAL;
441 }
442 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
443 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
444 args->operation = RADEON_VA_RESULT_ERROR;
445 return -EINVAL;
446 }
447
448 switch (args->operation) {
449 case RADEON_VA_MAP:
450 case RADEON_VA_UNMAP:
451 break;
452 default:
453 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
454 args->operation);
455 args->operation = RADEON_VA_RESULT_ERROR;
456 return -EINVAL;
457 }
458
459 gobj = drm_gem_object_lookup(dev, filp, args->handle);
460 if (gobj == NULL) {
461 args->operation = RADEON_VA_RESULT_ERROR;
462 return -ENOENT;
463 }
464 rbo = gem_to_radeon_bo(gobj);
465 r = radeon_bo_reserve(rbo, false);
466 if (r) {
467 args->operation = RADEON_VA_RESULT_ERROR;
468 drm_gem_object_unreference_unlocked(gobj);
469 return r;
470 }
471 switch (args->operation) {
472 case RADEON_VA_MAP:
473 bo_va = radeon_bo_va(rbo, &fpriv->vm);
474 if (bo_va) {
475 args->operation = RADEON_VA_RESULT_VA_EXIST;
476 args->offset = bo_va->soffset;
477 goto out;
478 }
479 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
480 args->offset, args->flags);
481 break;
482 case RADEON_VA_UNMAP:
483 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
484 break;
485 default:
486 break;
487 }
488 args->operation = RADEON_VA_RESULT_OK;
489 if (r) {
490 args->operation = RADEON_VA_RESULT_ERROR;
491 }
492out:
493 radeon_bo_unreserve(rbo);
494 drm_gem_object_unreference_unlocked(gobj);
495 return r;
496}
497
355int radeon_mode_dumb_create(struct drm_file *file_priv, 498int radeon_mode_dumb_create(struct drm_file *file_priv,
356 struct drm_device *dev, 499 struct drm_device *dev,
357 struct drm_mode_create_dumb *args) 500 struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 8f86aeb2669..be38921bf76 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
65 unsigned i; 65 unsigned i;
66 66
67 /* Disable *all* interrupts */ 67 /* Disable *all* interrupts */
68 rdev->irq.sw_int = false; 68 for (i = 0; i < RADEON_NUM_RINGS; i++)
69 rdev->irq.sw_int[i] = false;
69 rdev->irq.gui_idle = false; 70 rdev->irq.gui_idle = false;
70 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 71 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
71 rdev->irq.hpd[i] = false; 72 rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
81int radeon_driver_irq_postinstall_kms(struct drm_device *dev) 82int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
82{ 83{
83 struct radeon_device *rdev = dev->dev_private; 84 struct radeon_device *rdev = dev->dev_private;
85 unsigned i;
84 86
85 dev->max_vblank_count = 0x001fffff; 87 dev->max_vblank_count = 0x001fffff;
86 rdev->irq.sw_int = true; 88 for (i = 0; i < RADEON_NUM_RINGS; i++)
89 rdev->irq.sw_int[i] = true;
87 radeon_irq_set(rdev); 90 radeon_irq_set(rdev);
88 return 0; 91 return 0;
89} 92}
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
97 return; 100 return;
98 } 101 }
99 /* Disable *all* interrupts */ 102 /* Disable *all* interrupts */
100 rdev->irq.sw_int = false; 103 for (i = 0; i < RADEON_NUM_RINGS; i++)
104 rdev->irq.sw_int[i] = false;
101 rdev->irq.gui_idle = false; 105 rdev->irq.gui_idle = false;
102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 106 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
103 rdev->irq.hpd[i] = false; 107 rdev->irq.hpd[i] = false;
@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
194 flush_work_sync(&rdev->hotplug_work); 198 flush_work_sync(&rdev->hotplug_work);
195} 199}
196 200
197void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) 201void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
198{ 202{
199 unsigned long irqflags; 203 unsigned long irqflags;
200 204
201 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); 205 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
202 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { 206 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
203 rdev->irq.sw_int = true; 207 rdev->irq.sw_int[ring] = true;
204 radeon_irq_set(rdev); 208 radeon_irq_set(rdev);
205 } 209 }
206 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); 210 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
207} 211}
208 212
209void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) 213void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
210{ 214{
211 unsigned long irqflags; 215 unsigned long irqflags;
212 216
213 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); 217 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
214 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); 218 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
215 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { 219 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
216 rdev->irq.sw_int = false; 220 rdev->irq.sw_int[ring] = false;
217 radeon_irq_set(rdev); 221 radeon_irq_set(rdev);
218 } 222 }
219 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); 223 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index be2c1224e68..d3352889a87 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
250 return -EINVAL; 250 return -EINVAL;
251 } 251 }
252 break; 252 break;
253 case RADEON_INFO_VA_START:
254 /* this is where we report if vm is supported or not */
255 if (rdev->family < CHIP_CAYMAN)
256 return -EINVAL;
257 value = RADEON_VA_RESERVED_SIZE;
258 break;
259 case RADEON_INFO_IB_VM_MAX_SIZE:
260 /* this is where we report if vm is supported or not */
261 if (rdev->family < CHIP_CAYMAN)
262 return -EINVAL;
263 value = RADEON_IB_VM_MAX_SIZE;
264 break;
253 default: 265 default:
254 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 266 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
255 return -EINVAL; 267 return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
270 return 0; 282 return 0;
271} 283}
272 284
273
274void radeon_driver_lastclose_kms(struct drm_device *dev) 285void radeon_driver_lastclose_kms(struct drm_device *dev)
275{ 286{
276 vga_switcheroo_process_delayed_switch(); 287 vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
278 289
279int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 290int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
280{ 291{
292 struct radeon_device *rdev = dev->dev_private;
293
294 file_priv->driver_priv = NULL;
295
296 /* new gpu have virtual address space support */
297 if (rdev->family >= CHIP_CAYMAN) {
298 struct radeon_fpriv *fpriv;
299 int r;
300
301 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
302 if (unlikely(!fpriv)) {
303 return -ENOMEM;
304 }
305
306 r = radeon_vm_init(rdev, &fpriv->vm);
307 if (r) {
308 radeon_vm_fini(rdev, &fpriv->vm);
309 kfree(fpriv);
310 return r;
311 }
312
313 file_priv->driver_priv = fpriv;
314 }
281 return 0; 315 return 0;
282} 316}
283 317
284void radeon_driver_postclose_kms(struct drm_device *dev, 318void radeon_driver_postclose_kms(struct drm_device *dev,
285 struct drm_file *file_priv) 319 struct drm_file *file_priv)
286{ 320{
321 struct radeon_device *rdev = dev->dev_private;
322
323 /* new gpu have virtual address space support */
324 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
325 struct radeon_fpriv *fpriv = file_priv->driver_priv;
326
327 radeon_vm_fini(rdev, &fpriv->vm);
328 kfree(fpriv);
329 file_priv->driver_priv = NULL;
330 }
287} 331}
288 332
289void radeon_driver_preclose_kms(struct drm_device *dev, 333void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
451 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 495 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
452 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 496 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
453 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 497 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
498 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
454}; 499};
455int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 500int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index daadf211104..25a19c48307 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
437 437
438 crtc_offset_cntl = 0; 438 crtc_offset_cntl = 0;
439 439
440 pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); 440 pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
441 crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + 441 crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
442 ((target_fb->bits_per_pixel * 8) - 1)) / 442 ((target_fb->bits_per_pixel * 8) - 1)) /
443 (target_fb->bits_per_pixel * 8)); 443 (target_fb->bits_per_pixel * 8));
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 2c2e75ef8a3..08ff857c8fd 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -643,7 +643,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
643 u16 *blue, int regno); 643 u16 *blue, int regno);
644void radeon_framebuffer_init(struct drm_device *dev, 644void radeon_framebuffer_init(struct drm_device *dev,
645 struct radeon_framebuffer *rfb, 645 struct radeon_framebuffer *rfb,
646 struct drm_mode_fb_cmd *mode_cmd, 646 struct drm_mode_fb_cmd2 *mode_cmd,
647 struct drm_gem_object *obj); 647 struct drm_gem_object *obj);
648 648
649int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 649int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1c851521f45..d45df176359 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
46 * function are calling it. 46 * function are calling it.
47 */ 47 */
48 48
49void radeon_bo_clear_va(struct radeon_bo *bo)
50{
51 struct radeon_bo_va *bo_va, *tmp;
52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */
55 mutex_lock(&bo_va->vm->mutex);
56 list_del(&bo_va->vm_list);
57 mutex_unlock(&bo_va->vm->mutex);
58 list_del(&bo_va->bo_list);
59 kfree(bo_va);
60 }
61}
62
49static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 63static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
50{ 64{
51 struct radeon_bo *bo; 65 struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
55 list_del_init(&bo->list); 69 list_del_init(&bo->list);
56 mutex_unlock(&bo->rdev->gem.mutex); 70 mutex_unlock(&bo->rdev->gem.mutex);
57 radeon_bo_clear_surface_reg(bo); 71 radeon_bo_clear_surface_reg(bo);
72 radeon_bo_clear_va(bo);
58 drm_gem_object_release(&bo->gem_base); 73 drm_gem_object_release(&bo->gem_base);
59 kfree(bo); 74 kfree(bo);
60} 75}
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
95 enum ttm_bo_type type; 110 enum ttm_bo_type type;
96 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 111 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
97 unsigned long max_size = 0; 112 unsigned long max_size = 0;
113 size_t acc_size;
98 int r; 114 int r;
99 115
100 size = ALIGN(size, PAGE_SIZE); 116 size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
117 return -ENOMEM; 133 return -ENOMEM;
118 } 134 }
119 135
136 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
137 sizeof(struct radeon_bo));
138
120retry: 139retry:
121 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 140 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
122 if (bo == NULL) 141 if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
130 bo->gem_base.driver_private = NULL; 149 bo->gem_base.driver_private = NULL;
131 bo->surface_reg = -1; 150 bo->surface_reg = -1;
132 INIT_LIST_HEAD(&bo->list); 151 INIT_LIST_HEAD(&bo->list);
152 INIT_LIST_HEAD(&bo->va);
133 radeon_ttm_placement_from_domain(bo, domain); 153 radeon_ttm_placement_from_domain(bo, domain);
134 /* Kernel allocation are uninterruptible */ 154 /* Kernel allocation are uninterruptible */
135 mutex_lock(&rdev->vram_mutex); 155 mutex_lock(&rdev->vram_mutex);
136 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 156 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
137 &bo->placement, page_align, 0, !kernel, NULL, size, 157 &bo->placement, page_align, 0, !kernel, NULL,
138 &radeon_ttm_bo_destroy); 158 acc_size, &radeon_ttm_bo_destroy);
139 mutex_unlock(&rdev->vram_mutex); 159 mutex_unlock(&rdev->vram_mutex);
140 if (unlikely(r != 0)) { 160 if (unlikely(r != 0)) {
141 if (r != -ERESTARTSYS) { 161 if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
483 return; 503 return;
484 rbo = container_of(bo, struct radeon_bo, tbo); 504 rbo = container_of(bo, struct radeon_bo, tbo);
485 radeon_bo_check_tiling(rbo, 0, 1); 505 radeon_bo_check_tiling(rbo, 0, 1);
506 radeon_vm_bo_invalidate(rbo->rdev, rbo);
486} 507}
487 508
488int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 509int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
556 } 577 }
557 return 0; 578 return 0;
558} 579}
580
581/* object have to be reserved */
582struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
583{
584 struct radeon_bo_va *bo_va;
585
586 list_for_each_entry(bo_va, &rbo->va, bo_list) {
587 if (bo_va->vm == vm) {
588 return bo_va;
589 }
590 }
591 return NULL;
592}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index b07f0f9b862..cde43030887 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
83 return !!atomic_read(&bo->tbo.reserved); 83 return !!atomic_read(&bo->tbo.reserved);
84} 84}
85 85
86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
87{
88 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
89}
90
91static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
92{
93 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
94}
95
86/** 96/**
87 * radeon_bo_mmap_offset - return mmap offset of bo 97 * radeon_bo_mmap_offset - return mmap offset of bo
88 * @bo: radeon object for which we query the offset 98 * @bo: radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
128 struct ttm_mem_reg *mem); 138 struct ttm_mem_reg *mem);
129extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 139extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
130extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 140extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
141extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
142 struct radeon_vm *vm);
143
144/*
145 * sub allocation
146 */
147extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
148 struct radeon_sa_manager *sa_manager,
149 unsigned size, u32 domain);
150extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
151 struct radeon_sa_manager *sa_manager);
152extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
153 struct radeon_sa_manager *sa_manager);
154extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
155 struct radeon_sa_manager *sa_manager);
156extern int radeon_sa_bo_new(struct radeon_device *rdev,
157 struct radeon_sa_manager *sa_manager,
158 struct radeon_sa_bo *sa_bo,
159 unsigned size, unsigned align);
160extern void radeon_sa_bo_free(struct radeon_device *rdev,
161 struct radeon_sa_bo *sa_bo);
162
131#endif 163#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 78a665bd951..095148e29a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
252 252
253 mutex_lock(&rdev->ddev->struct_mutex); 253 mutex_lock(&rdev->ddev->struct_mutex);
254 mutex_lock(&rdev->vram_mutex); 254 mutex_lock(&rdev->vram_mutex);
255 mutex_lock(&rdev->cp.mutex); 255 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
256 if (rdev->ring[i].ring_obj)
257 mutex_lock(&rdev->ring[i].mutex);
258 }
256 259
257 /* gui idle int has issues on older chips it seems */ 260 /* gui idle int has issues on older chips it seems */
258 if (rdev->family >= CHIP_R600) { 261 if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
268 radeon_irq_set(rdev); 271 radeon_irq_set(rdev);
269 } 272 }
270 } else { 273 } else {
271 if (rdev->cp.ready) { 274 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
275 if (ring->ready) {
272 struct radeon_fence *fence; 276 struct radeon_fence *fence;
273 radeon_ring_alloc(rdev, 64); 277 radeon_ring_alloc(rdev, ring, 64);
274 radeon_fence_create(rdev, &fence); 278 radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
275 radeon_fence_emit(rdev, fence); 279 radeon_fence_emit(rdev, fence);
276 radeon_ring_commit(rdev); 280 radeon_ring_commit(rdev, ring);
277 radeon_fence_wait(fence, false); 281 radeon_fence_wait(fence, false);
278 radeon_fence_unref(&fence); 282 radeon_fence_unref(&fence);
279 } 283 }
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
307 311
308 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 312 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
309 313
310 mutex_unlock(&rdev->cp.mutex); 314 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
315 if (rdev->ring[i].ring_obj)
316 mutex_unlock(&rdev->ring[i].mutex);
317 }
311 mutex_unlock(&rdev->vram_mutex); 318 mutex_unlock(&rdev->vram_mutex);
312 mutex_unlock(&rdev->ddev->struct_mutex); 319 mutex_unlock(&rdev->ddev->struct_mutex);
313} 320}
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
795 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 802 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
796 mutex_lock(&rdev->pm.mutex); 803 mutex_lock(&rdev->pm.mutex);
797 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 804 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
798 unsigned long irq_flags;
799 int not_processed = 0; 805 int not_processed = 0;
806 int i;
800 807
801 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 808 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
802 if (!list_empty(&rdev->fence_drv.emited)) { 809 not_processed += radeon_fence_count_emitted(rdev, i);
803 struct list_head *ptr; 810 if (not_processed >= 3)
804 list_for_each(ptr, &rdev->fence_drv.emited) { 811 break;
805 /* count up to 3, that's enought info */
806 if (++not_processed >= 3)
807 break;
808 }
809 } 812 }
810 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
811 813
812 if (not_processed >= 3) { /* should upclock */ 814 if (not_processed >= 3) { /* should upclock */
813 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 815 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 49d58202202..e8bc70933d1 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,7 @@
34#include "atom.h" 34#include "atom.h"
35 35
36int radeon_debugfs_ib_init(struct radeon_device *rdev); 36int radeon_debugfs_ib_init(struct radeon_device *rdev);
37int radeon_debugfs_ring_init(struct radeon_device *rdev);
37 38
38u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 39u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
39{ 40{
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
60 return idx_value; 61 return idx_value;
61} 62}
62 63
63void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 64void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
64{ 65{
65#if DRM_DEBUG_CODE 66#if DRM_DEBUG_CODE
66 if (rdev->cp.count_dw <= 0) { 67 if (ring->count_dw <= 0) {
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 68 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
68 } 69 }
69#endif 70#endif
70 rdev->cp.ring[rdev->cp.wptr++] = v; 71 ring->ring[ring->wptr++] = v;
71 rdev->cp.wptr &= rdev->cp.ptr_mask; 72 ring->wptr &= ring->ptr_mask;
72 rdev->cp.count_dw--; 73 ring->count_dw--;
73 rdev->cp.ring_free_dw--; 74 ring->ring_free_dw--;
74} 75}
75 76
76void radeon_ib_bogus_cleanup(struct radeon_device *rdev) 77/*
77{ 78 * IB.
78 struct radeon_ib *ib, *n; 79 */
79 80bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
80 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
81 list_del(&ib->list);
82 vfree(ib->ptr);
83 kfree(ib);
84 }
85}
86
87void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
88{ 81{
89 struct radeon_ib *bib; 82 bool done = false;
90 83
91 bib = kmalloc(sizeof(*bib), GFP_KERNEL); 84 /* only free ib which have been emited */
92 if (bib == NULL) 85 if (ib->fence && ib->fence->emitted) {
93 return; 86 if (radeon_fence_signaled(ib->fence)) {
94 bib->ptr = vmalloc(ib->length_dw * 4); 87 radeon_fence_unref(&ib->fence);
95 if (bib->ptr == NULL) { 88 radeon_sa_bo_free(rdev, &ib->sa_bo);
96 kfree(bib); 89 done = true;
97 return; 90 }
98 } 91 }
99 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); 92 return done;
100 bib->length_dw = ib->length_dw;
101 mutex_lock(&rdev->ib_pool.mutex);
102 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
103 mutex_unlock(&rdev->ib_pool.mutex);
104} 93}
105 94
106/* 95int radeon_ib_get(struct radeon_device *rdev, int ring,
107 * IB. 96 struct radeon_ib **ib, unsigned size)
108 */
109int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
110{ 97{
111 struct radeon_fence *fence; 98 struct radeon_fence *fence;
112 struct radeon_ib *nib; 99 unsigned cretry = 0;
113 int r = 0, i, c; 100 int r = 0, i, idx;
114 101
115 *ib = NULL; 102 *ib = NULL;
116 r = radeon_fence_create(rdev, &fence); 103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
105
106 r = radeon_fence_create(rdev, &fence, ring);
117 if (r) { 107 if (r) {
118 dev_err(rdev->dev, "failed to create fence for new IB\n"); 108 dev_err(rdev->dev, "failed to create fence for new IB\n");
119 return r; 109 return r;
120 } 110 }
111
121 mutex_lock(&rdev->ib_pool.mutex); 112 mutex_lock(&rdev->ib_pool.mutex);
122 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { 113 idx = rdev->ib_pool.head_id;
123 i &= (RADEON_IB_POOL_SIZE - 1); 114retry:
124 if (rdev->ib_pool.ibs[i].free) { 115 if (cretry > 5) {
125 nib = &rdev->ib_pool.ibs[i]; 116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
126 break;
127 }
128 }
129 if (nib == NULL) {
130 /* This should never happen, it means we allocated all
131 * IB and haven't scheduled one yet, return EBUSY to
132 * userspace hoping that on ioctl recall we get better
133 * luck
134 */
135 dev_err(rdev->dev, "no free indirect buffer !\n");
136 mutex_unlock(&rdev->ib_pool.mutex); 117 mutex_unlock(&rdev->ib_pool.mutex);
137 radeon_fence_unref(&fence); 118 radeon_fence_unref(&fence);
138 return -EBUSY; 119 return -ENOMEM;
139 } 120 }
140 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); 121 cretry++;
141 nib->free = false; 122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
142 if (nib->fence) { 123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
143 mutex_unlock(&rdev->ib_pool.mutex); 124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
144 r = radeon_fence_wait(nib->fence, false); 125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
145 if (r) { 126 &rdev->ib_pool.ibs[idx].sa_bo,
146 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", 127 size, 256);
147 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); 128 if (!r) {
148 mutex_lock(&rdev->ib_pool.mutex); 129 *ib = &rdev->ib_pool.ibs[idx];
149 nib->free = true; 130 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
150 mutex_unlock(&rdev->ib_pool.mutex); 131 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
151 radeon_fence_unref(&fence); 132 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
152 return r; 133 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 (*ib)->fence = fence;
135 (*ib)->vm_id = 0;
136 /* ib are most likely to be allocated in a ring fashion
137 * thus rdev->ib_pool.head_id should be the id of the
138 * oldest ib
139 */
140 rdev->ib_pool.head_id = (1 + idx);
141 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
142 mutex_unlock(&rdev->ib_pool.mutex);
143 return 0;
144 }
153 } 145 }
154 mutex_lock(&rdev->ib_pool.mutex); 146 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
147 }
148 /* this should be rare event, ie all ib scheduled none signaled yet.
149 */
150 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
151 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
152 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
153 if (!r) {
154 goto retry;
155 }
156 /* an error happened */
157 break;
158 }
159 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
155 } 160 }
156 radeon_fence_unref(&nib->fence);
157 nib->fence = fence;
158 nib->length_dw = 0;
159 mutex_unlock(&rdev->ib_pool.mutex); 161 mutex_unlock(&rdev->ib_pool.mutex);
160 *ib = nib; 162 radeon_fence_unref(&fence);
161 return 0; 163 return r;
162} 164}
163 165
164void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 166void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,255 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
169 if (tmp == NULL) { 171 if (tmp == NULL) {
170 return; 172 return;
171 } 173 }
172 if (!tmp->fence->emited)
173 radeon_fence_unref(&tmp->fence);
174 mutex_lock(&rdev->ib_pool.mutex); 174 mutex_lock(&rdev->ib_pool.mutex);
175 tmp->free = true; 175 if (tmp->fence && !tmp->fence->emitted) {
176 radeon_sa_bo_free(rdev, &tmp->sa_bo);
177 radeon_fence_unref(&tmp->fence);
178 }
176 mutex_unlock(&rdev->ib_pool.mutex); 179 mutex_unlock(&rdev->ib_pool.mutex);
177} 180}
178 181
179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 182int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{ 183{
184 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
181 int r = 0; 185 int r = 0;
182 186
183 if (!ib->length_dw || !rdev->cp.ready) { 187 if (!ib->length_dw || !ring->ready) {
184 /* TODO: Nothings in the ib we should report. */ 188 /* TODO: Nothings in the ib we should report. */
185 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 189 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
186 return -EINVAL; 190 return -EINVAL;
187 } 191 }
188 192
189 /* 64 dwords should be enough for fence too */ 193 /* 64 dwords should be enough for fence too */
190 r = radeon_ring_lock(rdev, 64); 194 r = radeon_ring_lock(rdev, ring, 64);
191 if (r) { 195 if (r) {
192 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); 196 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
193 return r; 197 return r;
194 } 198 }
195 radeon_ring_ib_execute(rdev, ib); 199 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
196 radeon_fence_emit(rdev, ib->fence); 200 radeon_fence_emit(rdev, ib->fence);
197 mutex_lock(&rdev->ib_pool.mutex); 201 radeon_ring_unlock_commit(rdev, ring);
198 /* once scheduled IB is considered free and protected by the fence */
199 ib->free = true;
200 mutex_unlock(&rdev->ib_pool.mutex);
201 radeon_ring_unlock_commit(rdev);
202 return 0; 202 return 0;
203} 203}
204 204
205int radeon_ib_pool_init(struct radeon_device *rdev) 205int radeon_ib_pool_init(struct radeon_device *rdev)
206{ 206{
207 void *ptr; 207 int i, r;
208 uint64_t gpu_addr;
209 int i;
210 int r = 0;
211 208
212 if (rdev->ib_pool.robj) 209 mutex_lock(&rdev->ib_pool.mutex);
210 if (rdev->ib_pool.ready) {
211 mutex_unlock(&rdev->ib_pool.mutex);
213 return 0; 212 return 0;
214 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
215 /* Allocate 1M object buffer */
216 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
217 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
218 &rdev->ib_pool.robj);
219 if (r) {
220 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
221 return r;
222 } 213 }
223 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 214
224 if (unlikely(r != 0)) 215 r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
225 return r; 216 RADEON_IB_POOL_SIZE*64*1024,
226 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 217 RADEON_GEM_DOMAIN_GTT);
227 if (r) {
228 radeon_bo_unreserve(rdev->ib_pool.robj);
229 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
230 return r;
231 }
232 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
233 radeon_bo_unreserve(rdev->ib_pool.robj);
234 if (r) { 218 if (r) {
235 DRM_ERROR("radeon: failed to map ib pool (%d).\n", r); 219 mutex_unlock(&rdev->ib_pool.mutex);
236 return r; 220 return r;
237 } 221 }
238 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
239 unsigned offset;
240 222
241 offset = i * 64 * 1024; 223 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
242 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; 224 rdev->ib_pool.ibs[i].fence = NULL;
243 rdev->ib_pool.ibs[i].ptr = ptr + offset;
244 rdev->ib_pool.ibs[i].idx = i; 225 rdev->ib_pool.ibs[i].idx = i;
245 rdev->ib_pool.ibs[i].length_dw = 0; 226 rdev->ib_pool.ibs[i].length_dw = 0;
246 rdev->ib_pool.ibs[i].free = true; 227 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
247 } 228 }
248 rdev->ib_pool.head_id = 0; 229 rdev->ib_pool.head_id = 0;
249 rdev->ib_pool.ready = true; 230 rdev->ib_pool.ready = true;
250 DRM_INFO("radeon: ib pool ready.\n"); 231 DRM_INFO("radeon: ib pool ready.\n");
232
251 if (radeon_debugfs_ib_init(rdev)) { 233 if (radeon_debugfs_ib_init(rdev)) {
252 DRM_ERROR("Failed to register debugfs file for IB !\n"); 234 DRM_ERROR("Failed to register debugfs file for IB !\n");
253 } 235 }
254 return r; 236 if (radeon_debugfs_ring_init(rdev)) {
237 DRM_ERROR("Failed to register debugfs file for rings !\n");
238 }
239 mutex_unlock(&rdev->ib_pool.mutex);
240 return 0;
255} 241}
256 242
257void radeon_ib_pool_fini(struct radeon_device *rdev) 243void radeon_ib_pool_fini(struct radeon_device *rdev)
258{ 244{
259 int r; 245 unsigned i;
260 struct radeon_bo *robj;
261 246
262 if (!rdev->ib_pool.ready) {
263 return;
264 }
265 mutex_lock(&rdev->ib_pool.mutex); 247 mutex_lock(&rdev->ib_pool.mutex);
266 radeon_ib_bogus_cleanup(rdev); 248 if (rdev->ib_pool.ready) {
267 robj = rdev->ib_pool.robj; 249 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
268 rdev->ib_pool.robj = NULL; 250 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
269 mutex_unlock(&rdev->ib_pool.mutex); 251 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
270
271 if (robj) {
272 r = radeon_bo_reserve(robj, false);
273 if (likely(r == 0)) {
274 radeon_bo_kunmap(robj);
275 radeon_bo_unpin(robj);
276 radeon_bo_unreserve(robj);
277 } 252 }
278 radeon_bo_unref(&robj); 253 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
254 rdev->ib_pool.ready = false;
279 } 255 }
256 mutex_unlock(&rdev->ib_pool.mutex);
280} 257}
281 258
259int radeon_ib_pool_start(struct radeon_device *rdev)
260{
261 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
262}
263
264int radeon_ib_pool_suspend(struct radeon_device *rdev)
265{
266 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
267}
282 268
283/* 269/*
284 * Ring. 270 * Ring.
285 */ 271 */
286void radeon_ring_free_size(struct radeon_device *rdev) 272int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
287{ 273{
288 if (rdev->wb.enabled) 274 /* r1xx-r5xx only has CP ring */
289 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); 275 if (rdev->family < CHIP_R600)
290 else { 276 return RADEON_RING_TYPE_GFX_INDEX;
291 if (rdev->family >= CHIP_R600) 277
292 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 278 if (rdev->family >= CHIP_CAYMAN) {
293 else 279 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
294 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 280 return CAYMAN_RING_TYPE_CP1_INDEX;
281 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
282 return CAYMAN_RING_TYPE_CP2_INDEX;
295 } 283 }
284 return RADEON_RING_TYPE_GFX_INDEX;
285}
286
287void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
288{
289 u32 rptr;
290
291 if (rdev->wb.enabled)
292 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
293 else
294 rptr = RREG32(ring->rptr_reg);
295 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
296 /* This works because ring_size is a power of 2 */ 296 /* This works because ring_size is a power of 2 */
297 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); 297 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
298 rdev->cp.ring_free_dw -= rdev->cp.wptr; 298 ring->ring_free_dw -= ring->wptr;
299 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask; 299 ring->ring_free_dw &= ring->ptr_mask;
300 if (!rdev->cp.ring_free_dw) { 300 if (!ring->ring_free_dw) {
301 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 301 ring->ring_free_dw = ring->ring_size / 4;
302 } 302 }
303} 303}
304 304
305int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) 305
306int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
306{ 307{
307 int r; 308 int r;
308 309
309 /* Align requested size with padding so unlock_commit can 310 /* Align requested size with padding so unlock_commit can
310 * pad safely */ 311 * pad safely */
311 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; 312 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
312 while (ndw > (rdev->cp.ring_free_dw - 1)) { 313 while (ndw > (ring->ring_free_dw - 1)) {
313 radeon_ring_free_size(rdev); 314 radeon_ring_free_size(rdev, ring);
314 if (ndw < rdev->cp.ring_free_dw) { 315 if (ndw < ring->ring_free_dw) {
315 break; 316 break;
316 } 317 }
317 r = radeon_fence_wait_next(rdev); 318 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
318 if (r) 319 if (r)
319 return r; 320 return r;
320 } 321 }
321 rdev->cp.count_dw = ndw; 322 ring->count_dw = ndw;
322 rdev->cp.wptr_old = rdev->cp.wptr; 323 ring->wptr_old = ring->wptr;
323 return 0; 324 return 0;
324} 325}
325 326
326int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) 327int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
327{ 328{
328 int r; 329 int r;
329 330
330 mutex_lock(&rdev->cp.mutex); 331 mutex_lock(&ring->mutex);
331 r = radeon_ring_alloc(rdev, ndw); 332 r = radeon_ring_alloc(rdev, ring, ndw);
332 if (r) { 333 if (r) {
333 mutex_unlock(&rdev->cp.mutex); 334 mutex_unlock(&ring->mutex);
334 return r; 335 return r;
335 } 336 }
336 return 0; 337 return 0;
337} 338}
338 339
339void radeon_ring_commit(struct radeon_device *rdev) 340void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
340{ 341{
341 unsigned count_dw_pad; 342 unsigned count_dw_pad;
342 unsigned i; 343 unsigned i;
343 344
344 /* We pad to match fetch size */ 345 /* We pad to match fetch size */
345 count_dw_pad = (rdev->cp.align_mask + 1) - 346 count_dw_pad = (ring->align_mask + 1) -
346 (rdev->cp.wptr & rdev->cp.align_mask); 347 (ring->wptr & ring->align_mask);
347 for (i = 0; i < count_dw_pad; i++) { 348 for (i = 0; i < count_dw_pad; i++) {
348 radeon_ring_write(rdev, 2 << 30); 349 radeon_ring_write(ring, ring->nop);
349 } 350 }
350 DRM_MEMORYBARRIER(); 351 DRM_MEMORYBARRIER();
351 radeon_cp_commit(rdev); 352 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
353 (void)RREG32(ring->wptr_reg);
352} 354}
353 355
354void radeon_ring_unlock_commit(struct radeon_device *rdev) 356void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
355{ 357{
356 radeon_ring_commit(rdev); 358 radeon_ring_commit(rdev, ring);
357 mutex_unlock(&rdev->cp.mutex); 359 mutex_unlock(&ring->mutex);
358} 360}
359 361
360void radeon_ring_unlock_undo(struct radeon_device *rdev) 362void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
361{ 363{
362 rdev->cp.wptr = rdev->cp.wptr_old; 364 ring->wptr = ring->wptr_old;
363 mutex_unlock(&rdev->cp.mutex); 365 mutex_unlock(&ring->mutex);
364} 366}
365 367
366int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) 368int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
369 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
370 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
367{ 371{
368 int r; 372 int r;
369 373
370 rdev->cp.ring_size = ring_size; 374 ring->ring_size = ring_size;
375 ring->rptr_offs = rptr_offs;
376 ring->rptr_reg = rptr_reg;
377 ring->wptr_reg = wptr_reg;
378 ring->ptr_reg_shift = ptr_reg_shift;
379 ring->ptr_reg_mask = ptr_reg_mask;
380 ring->nop = nop;
371 /* Allocate ring buffer */ 381 /* Allocate ring buffer */
372 if (rdev->cp.ring_obj == NULL) { 382 if (ring->ring_obj == NULL) {
373 r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, 383 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
374 RADEON_GEM_DOMAIN_GTT, 384 RADEON_GEM_DOMAIN_GTT,
375 &rdev->cp.ring_obj); 385 &ring->ring_obj);
376 if (r) { 386 if (r) {
377 dev_err(rdev->dev, "(%d) ring create failed\n", r); 387 dev_err(rdev->dev, "(%d) ring create failed\n", r);
378 return r; 388 return r;
379 } 389 }
380 r = radeon_bo_reserve(rdev->cp.ring_obj, false); 390 r = radeon_bo_reserve(ring->ring_obj, false);
381 if (unlikely(r != 0)) 391 if (unlikely(r != 0))
382 return r; 392 return r;
383 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, 393 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
384 &rdev->cp.gpu_addr); 394 &ring->gpu_addr);
385 if (r) { 395 if (r) {
386 radeon_bo_unreserve(rdev->cp.ring_obj); 396 radeon_bo_unreserve(ring->ring_obj);
387 dev_err(rdev->dev, "(%d) ring pin failed\n", r); 397 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
388 return r; 398 return r;
389 } 399 }
390 r = radeon_bo_kmap(rdev->cp.ring_obj, 400 r = radeon_bo_kmap(ring->ring_obj,
391 (void **)&rdev->cp.ring); 401 (void **)&ring->ring);
392 radeon_bo_unreserve(rdev->cp.ring_obj); 402 radeon_bo_unreserve(ring->ring_obj);
393 if (r) { 403 if (r) {
394 dev_err(rdev->dev, "(%d) ring map failed\n", r); 404 dev_err(rdev->dev, "(%d) ring map failed\n", r);
395 return r; 405 return r;
396 } 406 }
397 } 407 }
398 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; 408 ring->ptr_mask = (ring->ring_size / 4) - 1;
399 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; 409 ring->ring_free_dw = ring->ring_size / 4;
400 return 0; 410 return 0;
401} 411}
402 412
403void radeon_ring_fini(struct radeon_device *rdev) 413void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
404{ 414{
405 int r; 415 int r;
406 struct radeon_bo *ring_obj; 416 struct radeon_bo *ring_obj;
407 417
408 mutex_lock(&rdev->cp.mutex); 418 mutex_lock(&ring->mutex);
409 ring_obj = rdev->cp.ring_obj; 419 ring_obj = ring->ring_obj;
410 rdev->cp.ring = NULL; 420 ring->ring = NULL;
411 rdev->cp.ring_obj = NULL; 421 ring->ring_obj = NULL;
412 mutex_unlock(&rdev->cp.mutex); 422 mutex_unlock(&ring->mutex);
413 423
414 if (ring_obj) { 424 if (ring_obj) {
415 r = radeon_bo_reserve(ring_obj, false); 425 r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +432,83 @@ void radeon_ring_fini(struct radeon_device *rdev)
422 } 432 }
423} 433}
424 434
425
426/* 435/*
427 * Debugfs info 436 * Debugfs info
428 */ 437 */
429#if defined(CONFIG_DEBUG_FS) 438#if defined(CONFIG_DEBUG_FS)
430static int radeon_debugfs_ib_info(struct seq_file *m, void *data) 439
440static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
431{ 441{
432 struct drm_info_node *node = (struct drm_info_node *) m->private; 442 struct drm_info_node *node = (struct drm_info_node *) m->private;
433 struct radeon_ib *ib = node->info_ent->data; 443 struct drm_device *dev = node->minor->dev;
434 unsigned i; 444 struct radeon_device *rdev = dev->dev_private;
435 445 int ridx = *(int*)node->info_ent->data;
436 if (ib == NULL) { 446 struct radeon_ring *ring = &rdev->ring[ridx];
437 return 0; 447 unsigned count, i, j;
438 } 448
439 seq_printf(m, "IB %04u\n", ib->idx); 449 radeon_ring_free_size(rdev, ring);
440 seq_printf(m, "IB fence %p\n", ib->fence); 450 count = (ring->ring_size / 4) - ring->ring_free_dw;
441 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 451 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
442 for (i = 0; i < ib->length_dw; i++) { 452 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
443 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); 453 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
454 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
455 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
456 seq_printf(m, "%u dwords in ring\n", count);
457 i = ring->rptr;
458 for (j = 0; j <= count; j++) {
459 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
460 i = (i + 1) & ring->ptr_mask;
444 } 461 }
445 return 0; 462 return 0;
446} 463}
447 464
448static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) 465static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
466static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
467static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
468
469static struct drm_info_list radeon_debugfs_ring_info_list[] = {
470 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
471 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
472 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
473};
474
475static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
449{ 476{
450 struct drm_info_node *node = (struct drm_info_node *) m->private; 477 struct drm_info_node *node = (struct drm_info_node *) m->private;
451 struct radeon_device *rdev = node->info_ent->data; 478 struct radeon_ib *ib = node->info_ent->data;
452 struct radeon_ib *ib;
453 unsigned i; 479 unsigned i;
454 480
455 mutex_lock(&rdev->ib_pool.mutex); 481 if (ib == NULL) {
456 if (list_empty(&rdev->ib_pool.bogus_ib)) {
457 mutex_unlock(&rdev->ib_pool.mutex);
458 seq_printf(m, "no bogus IB recorded\n");
459 return 0; 482 return 0;
460 } 483 }
461 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); 484 seq_printf(m, "IB %04u\n", ib->idx);
462 list_del_init(&ib->list); 485 seq_printf(m, "IB fence %p\n", ib->fence);
463 mutex_unlock(&rdev->ib_pool.mutex);
464 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 486 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
465 for (i = 0; i < ib->length_dw; i++) { 487 for (i = 0; i < ib->length_dw; i++) {
466 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); 488 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
467 } 489 }
468 vfree(ib->ptr);
469 kfree(ib);
470 return 0; 490 return 0;
471} 491}
472 492
473static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 493static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
474static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 494static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
495#endif
475 496
476static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { 497int radeon_debugfs_ring_init(struct radeon_device *rdev)
477 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, 498{
478}; 499#if defined(CONFIG_DEBUG_FS)
500 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
501 ARRAY_SIZE(radeon_debugfs_ring_info_list));
502#else
503 return 0;
479#endif 504#endif
505}
480 506
481int radeon_debugfs_ib_init(struct radeon_device *rdev) 507int radeon_debugfs_ib_init(struct radeon_device *rdev)
482{ 508{
483#if defined(CONFIG_DEBUG_FS) 509#if defined(CONFIG_DEBUG_FS)
484 unsigned i; 510 unsigned i;
485 int r;
486 511
487 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
488 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
489 if (r)
490 return r;
491 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 512 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
492 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 513 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
493 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 514 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 00000000000..4cce47e7dc0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 */
30#include "drmP.h"
31#include "drm.h"
32#include "radeon.h"
33
34int radeon_sa_bo_manager_init(struct radeon_device *rdev,
35 struct radeon_sa_manager *sa_manager,
36 unsigned size, u32 domain)
37{
38 int r;
39
40 sa_manager->bo = NULL;
41 sa_manager->size = size;
42 sa_manager->domain = domain;
43 INIT_LIST_HEAD(&sa_manager->sa_bo);
44
45 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
46 RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
47 if (r) {
48 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
49 return r;
50 }
51
52 return r;
53}
54
55void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
56 struct radeon_sa_manager *sa_manager)
57{
58 struct radeon_sa_bo *sa_bo, *tmp;
59
60 if (!list_empty(&sa_manager->sa_bo)) {
61 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
62 }
63 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
64 list_del_init(&sa_bo->list);
65 }
66 radeon_bo_unref(&sa_manager->bo);
67 sa_manager->size = 0;
68}
69
70int radeon_sa_bo_manager_start(struct radeon_device *rdev,
71 struct radeon_sa_manager *sa_manager)
72{
73 int r;
74
75 if (sa_manager->bo == NULL) {
76 dev_err(rdev->dev, "no bo for sa manager\n");
77 return -EINVAL;
78 }
79
80 /* map the buffer */
81 r = radeon_bo_reserve(sa_manager->bo, false);
82 if (r) {
83 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
84 return r;
85 }
86 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
87 if (r) {
88 radeon_bo_unreserve(sa_manager->bo);
89 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
90 return r;
91 }
92 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
93 radeon_bo_unreserve(sa_manager->bo);
94 return r;
95}
96
97int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
98 struct radeon_sa_manager *sa_manager)
99{
100 int r;
101
102 if (sa_manager->bo == NULL) {
103 dev_err(rdev->dev, "no bo for sa manager\n");
104 return -EINVAL;
105 }
106
107 r = radeon_bo_reserve(sa_manager->bo, false);
108 if (!r) {
109 radeon_bo_kunmap(sa_manager->bo);
110 radeon_bo_unpin(sa_manager->bo);
111 radeon_bo_unreserve(sa_manager->bo);
112 }
113 return r;
114}
115
116/*
117 * Principe is simple, we keep a list of sub allocation in offset
118 * order (first entry has offset == 0, last entry has the highest
119 * offset).
120 *
121 * When allocating new object we first check if there is room at
122 * the end total_size - (last_object_offset + last_object_size) >=
123 * alloc_size. If so we allocate new object there.
124 *
125 * When there is not enough room at the end, we start waiting for
126 * each sub object until we reach object_offset+object_size >=
127 * alloc_size, this object then become the sub object we return.
128 *
129 * Alignment can't be bigger than page size
130 */
131int radeon_sa_bo_new(struct radeon_device *rdev,
132 struct radeon_sa_manager *sa_manager,
133 struct radeon_sa_bo *sa_bo,
134 unsigned size, unsigned align)
135{
136 struct radeon_sa_bo *tmp;
137 struct list_head *head;
138 unsigned offset = 0, wasted = 0;
139
140 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
141 BUG_ON(size > sa_manager->size);
142
143 /* no one ? */
144 head = sa_manager->sa_bo.prev;
145 if (list_empty(&sa_manager->sa_bo)) {
146 goto out;
147 }
148
149 /* look for a hole big enough */
150 offset = 0;
151 list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
152 /* room before this object ? */
153 if ((tmp->offset - offset) >= size) {
154 head = tmp->list.prev;
155 goto out;
156 }
157 offset = tmp->offset + tmp->size;
158 wasted = offset % align;
159 if (wasted) {
160 wasted = align - wasted;
161 }
162 offset += wasted;
163 }
164 /* room at the end ? */
165 head = sa_manager->sa_bo.prev;
166 tmp = list_entry(head, struct radeon_sa_bo, list);
167 offset = tmp->offset + tmp->size;
168 wasted = offset % align;
169 if (wasted) {
170 wasted = align - wasted;
171 }
172 offset += wasted;
173 if ((sa_manager->size - offset) < size) {
174 /* failed to find somethings big enough */
175 return -ENOMEM;
176 }
177
178out:
179 sa_bo->manager = sa_manager;
180 sa_bo->offset = offset;
181 sa_bo->size = size;
182 list_add(&sa_bo->list, head);
183 return 0;
184}
185
186void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
187{
188 list_del_init(&sa_bo->list);
189}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 00000000000..61dd4e3c920
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2011 Christian König.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30#include "drmP.h"
31#include "drm.h"
32#include "radeon.h"
33
34static int radeon_semaphore_add_bo(struct radeon_device *rdev)
35{
36 struct radeon_semaphore_bo *bo;
37 unsigned long irq_flags;
38 uint64_t gpu_addr;
39 uint32_t *cpu_ptr;
40 int r, i;
41
42
43 bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
44 if (bo == NULL) {
45 return -ENOMEM;
46 }
47 INIT_LIST_HEAD(&bo->free);
48 INIT_LIST_HEAD(&bo->list);
49 bo->nused = 0;
50
51 r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
52 if (r) {
53 dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
54 kfree(bo);
55 return r;
56 }
57 gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
58 gpu_addr += bo->ib->sa_bo.offset;
59 cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
60 cpu_ptr += (bo->ib->sa_bo.offset >> 2);
61 for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
62 bo->semaphores[i].gpu_addr = gpu_addr;
63 bo->semaphores[i].cpu_ptr = cpu_ptr;
64 bo->semaphores[i].bo = bo;
65 list_add_tail(&bo->semaphores[i].list, &bo->free);
66 gpu_addr += 8;
67 cpu_ptr += 2;
68 }
69 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
70 list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
71 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
72 return 0;
73}
74
75static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
76 struct radeon_semaphore_bo *bo)
77{
78 radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
79 radeon_fence_unref(&bo->ib->fence);
80 list_del(&bo->list);
81 kfree(bo);
82}
83
84void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
85{
86 struct radeon_semaphore_bo *bo, *n;
87
88 if (list_empty(&rdev->semaphore_drv.bo)) {
89 return;
90 }
91 /* only shrink if first bo has free semaphore */
92 bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
93 if (list_empty(&bo->free)) {
94 return;
95 }
96 list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
97 if (bo->nused)
98 continue;
99 radeon_semaphore_del_bo_locked(rdev, bo);
100 }
101}
102
103int radeon_semaphore_create(struct radeon_device *rdev,
104 struct radeon_semaphore **semaphore)
105{
106 struct radeon_semaphore_bo *bo;
107 unsigned long irq_flags;
108 bool do_retry = true;
109 int r;
110
111retry:
112 *semaphore = NULL;
113 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
114 list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
115 if (list_empty(&bo->free))
116 continue;
117 *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
118 (*semaphore)->cpu_ptr[0] = 0;
119 (*semaphore)->cpu_ptr[1] = 0;
120 list_del(&(*semaphore)->list);
121 bo->nused++;
122 break;
123 }
124 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
125
126 if (*semaphore == NULL) {
127 if (do_retry) {
128 do_retry = false;
129 r = radeon_semaphore_add_bo(rdev);
130 if (r)
131 return r;
132 goto retry;
133 }
134 return -ENOMEM;
135 }
136
137 return 0;
138}
139
140void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
141 struct radeon_semaphore *semaphore)
142{
143 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
144}
145
146void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
147 struct radeon_semaphore *semaphore)
148{
149 radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
150}
151
152void radeon_semaphore_free(struct radeon_device *rdev,
153 struct radeon_semaphore *semaphore)
154{
155 unsigned long irq_flags;
156
157 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
158 semaphore->bo->nused--;
159 list_add_tail(&semaphore->list, &semaphore->bo->free);
160 radeon_semaphore_shrink_locked(rdev);
161 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
162}
163
164void radeon_semaphore_driver_fini(struct radeon_device *rdev)
165{
166 struct radeon_semaphore_bo *bo, *n;
167 unsigned long irq_flags;
168
169 write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
170 /* we force to free everything */
171 list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
172 if (!list_empty(&bo->free)) {
173 dev_err(rdev->dev, "still in use semaphore\n");
174 }
175 radeon_semaphore_del_bo_locked(rdev, bo);
176 }
177 write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
178}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 602fa3541c4..dc5dcf483aa 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffers) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
46 for (i = 0; i < RADEON_NUM_RINGS; ++i)
47 n -= rdev->ring[i].ring_size;
46 if (rdev->wb.wb_obj) 48 if (rdev->wb.wb_obj)
47 n -= RADEON_GPU_PAGE_SIZE; 49 n -= RADEON_GPU_PAGE_SIZE;
48 if (rdev->ih.ring_obj) 50 if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
104 106
105 radeon_bo_kunmap(gtt_obj[i]); 107 radeon_bo_kunmap(gtt_obj[i]);
106 108
107 r = radeon_fence_create(rdev, &fence); 109 r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
108 if (r) { 110 if (r) {
109 DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); 111 DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
110 goto out_cleanup; 112 goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
153 155
154 radeon_bo_kunmap(vram_obj); 156 radeon_bo_kunmap(vram_obj);
155 157
156 r = radeon_fence_create(rdev, &fence); 158 r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
157 if (r) { 159 if (r) {
158 DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); 160 DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
159 goto out_cleanup; 161 goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
232 printk(KERN_WARNING "Error while testing BO move.\n"); 234 printk(KERN_WARNING "Error while testing BO move.\n");
233 } 235 }
234} 236}
237
238void radeon_test_ring_sync(struct radeon_device *rdev,
239 struct radeon_ring *ringA,
240 struct radeon_ring *ringB)
241{
242 struct radeon_fence *fence1 = NULL, *fence2 = NULL;
243 struct radeon_semaphore *semaphore = NULL;
244 int ridxA = radeon_ring_index(rdev, ringA);
245 int ridxB = radeon_ring_index(rdev, ringB);
246 int r;
247
248 r = radeon_fence_create(rdev, &fence1, ridxA);
249 if (r) {
250 DRM_ERROR("Failed to create sync fence 1\n");
251 goto out_cleanup;
252 }
253 r = radeon_fence_create(rdev, &fence2, ridxA);
254 if (r) {
255 DRM_ERROR("Failed to create sync fence 2\n");
256 goto out_cleanup;
257 }
258
259 r = radeon_semaphore_create(rdev, &semaphore);
260 if (r) {
261 DRM_ERROR("Failed to create semaphore\n");
262 goto out_cleanup;
263 }
264
265 r = radeon_ring_lock(rdev, ringA, 64);
266 if (r) {
267 DRM_ERROR("Failed to lock ring A %d\n", ridxA);
268 goto out_cleanup;
269 }
270 radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
271 radeon_fence_emit(rdev, fence1);
272 radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
273 radeon_fence_emit(rdev, fence2);
274 radeon_ring_unlock_commit(rdev, ringA);
275
276 mdelay(1000);
277
278 if (radeon_fence_signaled(fence1)) {
279 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
280 goto out_cleanup;
281 }
282
283 r = radeon_ring_lock(rdev, ringB, 64);
284 if (r) {
285 DRM_ERROR("Failed to lock ring B %p\n", ringB);
286 goto out_cleanup;
287 }
288 radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
289 radeon_ring_unlock_commit(rdev, ringB);
290
291 r = radeon_fence_wait(fence1, false);
292 if (r) {
293 DRM_ERROR("Failed to wait for sync fence 1\n");
294 goto out_cleanup;
295 }
296
297 mdelay(1000);
298
299 if (radeon_fence_signaled(fence2)) {
300 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
301 goto out_cleanup;
302 }
303
304 r = radeon_ring_lock(rdev, ringB, 64);
305 if (r) {
306 DRM_ERROR("Failed to lock ring B %p\n", ringB);
307 goto out_cleanup;
308 }
309 radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
310 radeon_ring_unlock_commit(rdev, ringB);
311
312 r = radeon_fence_wait(fence2, false);
313 if (r) {
314 DRM_ERROR("Failed to wait for sync fence 1\n");
315 goto out_cleanup;
316 }
317
318out_cleanup:
319 if (semaphore)
320 radeon_semaphore_free(rdev, semaphore);
321
322 if (fence1)
323 radeon_fence_unref(&fence1);
324
325 if (fence2)
326 radeon_fence_unref(&fence2);
327
328 if (r)
329 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
330}
331
332void radeon_test_ring_sync2(struct radeon_device *rdev,
333 struct radeon_ring *ringA,
334 struct radeon_ring *ringB,
335 struct radeon_ring *ringC)
336{
337 struct radeon_fence *fenceA = NULL, *fenceB = NULL;
338 struct radeon_semaphore *semaphore = NULL;
339 int ridxA = radeon_ring_index(rdev, ringA);
340 int ridxB = radeon_ring_index(rdev, ringB);
341 int ridxC = radeon_ring_index(rdev, ringC);
342 bool sigA, sigB;
343 int i, r;
344
345 r = radeon_fence_create(rdev, &fenceA, ridxA);
346 if (r) {
347 DRM_ERROR("Failed to create sync fence 1\n");
348 goto out_cleanup;
349 }
350 r = radeon_fence_create(rdev, &fenceB, ridxB);
351 if (r) {
352 DRM_ERROR("Failed to create sync fence 2\n");
353 goto out_cleanup;
354 }
355
356 r = radeon_semaphore_create(rdev, &semaphore);
357 if (r) {
358 DRM_ERROR("Failed to create semaphore\n");
359 goto out_cleanup;
360 }
361
362 r = radeon_ring_lock(rdev, ringA, 64);
363 if (r) {
364 DRM_ERROR("Failed to lock ring A %d\n", ridxA);
365 goto out_cleanup;
366 }
367 radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
368 radeon_fence_emit(rdev, fenceA);
369 radeon_ring_unlock_commit(rdev, ringA);
370
371 r = radeon_ring_lock(rdev, ringB, 64);
372 if (r) {
373 DRM_ERROR("Failed to lock ring B %d\n", ridxB);
374 goto out_cleanup;
375 }
376 radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
377 radeon_fence_emit(rdev, fenceB);
378 radeon_ring_unlock_commit(rdev, ringB);
379
380 mdelay(1000);
381
382 if (radeon_fence_signaled(fenceA)) {
383 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
384 goto out_cleanup;
385 }
386 if (radeon_fence_signaled(fenceB)) {
387 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
388 goto out_cleanup;
389 }
390
391 r = radeon_ring_lock(rdev, ringC, 64);
392 if (r) {
393 DRM_ERROR("Failed to lock ring B %p\n", ringC);
394 goto out_cleanup;
395 }
396 radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
397 radeon_ring_unlock_commit(rdev, ringC);
398
399 for (i = 0; i < 30; ++i) {
400 mdelay(100);
401 sigA = radeon_fence_signaled(fenceA);
402 sigB = radeon_fence_signaled(fenceB);
403 if (sigA || sigB)
404 break;
405 }
406
407 if (!sigA && !sigB) {
408 DRM_ERROR("Neither fence A nor B has been signaled\n");
409 goto out_cleanup;
410 } else if (sigA && sigB) {
411 DRM_ERROR("Both fence A and B has been signaled\n");
412 goto out_cleanup;
413 }
414
415 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
416
417 r = radeon_ring_lock(rdev, ringC, 64);
418 if (r) {
419 DRM_ERROR("Failed to lock ring B %p\n", ringC);
420 goto out_cleanup;
421 }
422 radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
423 radeon_ring_unlock_commit(rdev, ringC);
424
425 mdelay(1000);
426
427 r = radeon_fence_wait(fenceA, false);
428 if (r) {
429 DRM_ERROR("Failed to wait for sync fence A\n");
430 goto out_cleanup;
431 }
432 r = radeon_fence_wait(fenceB, false);
433 if (r) {
434 DRM_ERROR("Failed to wait for sync fence B\n");
435 goto out_cleanup;
436 }
437
438out_cleanup:
439 if (semaphore)
440 radeon_semaphore_free(rdev, semaphore);
441
442 if (fenceA)
443 radeon_fence_unref(&fenceA);
444
445 if (fenceB)
446 radeon_fence_unref(&fenceB);
447
448 if (r)
449 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
450}
451
452void radeon_test_syncing(struct radeon_device *rdev)
453{
454 int i, j, k;
455
456 for (i = 1; i < RADEON_NUM_RINGS; ++i) {
457 struct radeon_ring *ringA = &rdev->ring[i];
458 if (!ringA->ready)
459 continue;
460
461 for (j = 0; j < i; ++j) {
462 struct radeon_ring *ringB = &rdev->ring[j];
463 if (!ringB->ready)
464 continue;
465
466 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
467 radeon_test_ring_sync(rdev, ringA, ringB);
468
469 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
470 radeon_test_ring_sync(rdev, ringB, ringA);
471
472 for (k = 0; k < j; ++k) {
473 struct radeon_ring *ringC = &rdev->ring[k];
474 if (!ringC->ready)
475 continue;
476
477 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
478 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
479
480 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
481 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
482
483 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
484 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
485
486 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
487 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
488
489 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
490 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
491
492 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
493 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
494 }
495 }
496 }
497}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0b5468bfaf5..c421e77ace7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
114 } 114 }
115} 115}
116 116
117struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
118
119static struct ttm_backend*
120radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
121{
122 struct radeon_device *rdev;
123
124 rdev = radeon_get_rdev(bdev);
125#if __OS_HAS_AGP
126 if (rdev->flags & RADEON_IS_AGP) {
127 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
128 } else
129#endif
130 {
131 return radeon_ttm_backend_create(rdev);
132 }
133}
134
135static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 117static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
136{ 118{
137 return 0; 119 return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
206 rbo = container_of(bo, struct radeon_bo, tbo); 188 rbo = container_of(bo, struct radeon_bo, tbo);
207 switch (bo->mem.mem_type) { 189 switch (bo->mem.mem_type) {
208 case TTM_PL_VRAM: 190 case TTM_PL_VRAM:
209 if (rbo->rdev->cp.ready == false) 191 if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
210 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); 192 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
211 else 193 else
212 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 194 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
241 struct radeon_device *rdev; 223 struct radeon_device *rdev;
242 uint64_t old_start, new_start; 224 uint64_t old_start, new_start;
243 struct radeon_fence *fence; 225 struct radeon_fence *fence;
244 int r; 226 int r, i;
245 227
246 rdev = radeon_get_rdev(bo->bdev); 228 rdev = radeon_get_rdev(bo->bdev);
247 r = radeon_fence_create(rdev, &fence); 229 r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
248 if (unlikely(r)) { 230 if (unlikely(r)) {
249 return r; 231 return r;
250 } 232 }
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
273 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 255 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
274 return -EINVAL; 256 return -EINVAL;
275 } 257 }
276 if (!rdev->cp.ready) { 258 if (!rdev->ring[rdev->copy_ring].ready) {
277 DRM_ERROR("Trying to move memory with CP turned off.\n"); 259 DRM_ERROR("Trying to move memory with ring turned off.\n");
278 return -EINVAL; 260 return -EINVAL;
279 } 261 }
280 262
281 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 263 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
282 264
265 /* sync other rings */
266 if (rdev->family >= CHIP_R600) {
267 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
268 /* no need to sync to our own or unused rings */
269 if (i == rdev->copy_ring || !rdev->ring[i].ready)
270 continue;
271
272 if (!fence->semaphore) {
273 r = radeon_semaphore_create(rdev, &fence->semaphore);
274 /* FIXME: handle semaphore error */
275 if (r)
276 continue;
277 }
278
279 r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
280 /* FIXME: handle ring lock error */
281 if (r)
282 continue;
283 radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
284 radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
285
286 r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
287 /* FIXME: handle ring lock error */
288 if (r)
289 continue;
290 radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
291 radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
292 }
293 }
294
283 r = radeon_copy(rdev, old_start, new_start, 295 r = radeon_copy(rdev, old_start, new_start,
284 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 296 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
285 fence); 297 fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
398 radeon_move_null(bo, new_mem); 410 radeon_move_null(bo, new_mem);
399 return 0; 411 return 0;
400 } 412 }
401 if (!rdev->cp.ready || rdev->asic->copy == NULL) { 413 if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
402 /* use memcpy */ 414 /* use memcpy */
403 goto memcpy; 415 goto memcpy;
404 } 416 }
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
515 return radeon_fence_signaled((struct radeon_fence *)sync_obj); 527 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
516} 528}
517 529
530/*
531 * TTM backend functions.
532 */
533struct radeon_ttm_tt {
534 struct ttm_dma_tt ttm;
535 struct radeon_device *rdev;
536 u64 offset;
537};
538
539static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
540 struct ttm_mem_reg *bo_mem)
541{
542 struct radeon_ttm_tt *gtt = (void*)ttm;
543 int r;
544
545 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
546 if (!ttm->num_pages) {
547 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
548 ttm->num_pages, bo_mem, ttm);
549 }
550 r = radeon_gart_bind(gtt->rdev, gtt->offset,
551 ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
552 if (r) {
553 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
554 ttm->num_pages, (unsigned)gtt->offset);
555 return r;
556 }
557 return 0;
558}
559
560static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
561{
562 struct radeon_ttm_tt *gtt = (void *)ttm;
563
564 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
565 return 0;
566}
567
568static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
569{
570 struct radeon_ttm_tt *gtt = (void *)ttm;
571
572 ttm_dma_tt_fini(&gtt->ttm);
573 kfree(gtt);
574}
575
576static struct ttm_backend_func radeon_backend_func = {
577 .bind = &radeon_ttm_backend_bind,
578 .unbind = &radeon_ttm_backend_unbind,
579 .destroy = &radeon_ttm_backend_destroy,
580};
581
582struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
583 unsigned long size, uint32_t page_flags,
584 struct page *dummy_read_page)
585{
586 struct radeon_device *rdev;
587 struct radeon_ttm_tt *gtt;
588
589 rdev = radeon_get_rdev(bdev);
590#if __OS_HAS_AGP
591 if (rdev->flags & RADEON_IS_AGP) {
592 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
593 size, page_flags, dummy_read_page);
594 }
595#endif
596
597 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
598 if (gtt == NULL) {
599 return NULL;
600 }
601 gtt->ttm.ttm.func = &radeon_backend_func;
602 gtt->rdev = rdev;
603 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
604 kfree(gtt);
605 return NULL;
606 }
607 return &gtt->ttm.ttm;
608}
609
610static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
611{
612 struct radeon_device *rdev;
613 struct radeon_ttm_tt *gtt = (void *)ttm;
614 unsigned i;
615 int r;
616
617 if (ttm->state != tt_unpopulated)
618 return 0;
619
620 rdev = radeon_get_rdev(ttm->bdev);
621#if __OS_HAS_AGP
622 if (rdev->flags & RADEON_IS_AGP) {
623 return ttm_agp_tt_populate(ttm);
624 }
625#endif
626
627#ifdef CONFIG_SWIOTLB
628 if (swiotlb_nr_tbl()) {
629 return ttm_dma_populate(&gtt->ttm, rdev->dev);
630 }
631#endif
632
633 r = ttm_pool_populate(ttm);
634 if (r) {
635 return r;
636 }
637
638 for (i = 0; i < ttm->num_pages; i++) {
639 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
640 0, PAGE_SIZE,
641 PCI_DMA_BIDIRECTIONAL);
642 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
643 while (--i) {
644 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
645 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
646 gtt->ttm.dma_address[i] = 0;
647 }
648 ttm_pool_unpopulate(ttm);
649 return -EFAULT;
650 }
651 }
652 return 0;
653}
654
655static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
656{
657 struct radeon_device *rdev;
658 struct radeon_ttm_tt *gtt = (void *)ttm;
659 unsigned i;
660
661 rdev = radeon_get_rdev(ttm->bdev);
662#if __OS_HAS_AGP
663 if (rdev->flags & RADEON_IS_AGP) {
664 ttm_agp_tt_unpopulate(ttm);
665 return;
666 }
667#endif
668
669#ifdef CONFIG_SWIOTLB
670 if (swiotlb_nr_tbl()) {
671 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
672 return;
673 }
674#endif
675
676 for (i = 0; i < ttm->num_pages; i++) {
677 if (gtt->ttm.dma_address[i]) {
678 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
679 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
680 }
681 }
682
683 ttm_pool_unpopulate(ttm);
684}
685
518static struct ttm_bo_driver radeon_bo_driver = { 686static struct ttm_bo_driver radeon_bo_driver = {
519 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, 687 .ttm_tt_create = &radeon_ttm_tt_create,
688 .ttm_tt_populate = &radeon_ttm_tt_populate,
689 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
520 .invalidate_caches = &radeon_invalidate_caches, 690 .invalidate_caches = &radeon_invalidate_caches,
521 .init_mem_type = &radeon_init_mem_type, 691 .init_mem_type = &radeon_init_mem_type,
522 .evict_flags = &radeon_evict_flags, 692 .evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
680} 850}
681 851
682 852
683/*
684 * TTM backend functions.
685 */
686struct radeon_ttm_backend {
687 struct ttm_backend backend;
688 struct radeon_device *rdev;
689 unsigned long num_pages;
690 struct page **pages;
691 struct page *dummy_read_page;
692 dma_addr_t *dma_addrs;
693 bool populated;
694 bool bound;
695 unsigned offset;
696};
697
698static int radeon_ttm_backend_populate(struct ttm_backend *backend,
699 unsigned long num_pages,
700 struct page **pages,
701 struct page *dummy_read_page,
702 dma_addr_t *dma_addrs)
703{
704 struct radeon_ttm_backend *gtt;
705
706 gtt = container_of(backend, struct radeon_ttm_backend, backend);
707 gtt->pages = pages;
708 gtt->dma_addrs = dma_addrs;
709 gtt->num_pages = num_pages;
710 gtt->dummy_read_page = dummy_read_page;
711 gtt->populated = true;
712 return 0;
713}
714
715static void radeon_ttm_backend_clear(struct ttm_backend *backend)
716{
717 struct radeon_ttm_backend *gtt;
718
719 gtt = container_of(backend, struct radeon_ttm_backend, backend);
720 gtt->pages = NULL;
721 gtt->dma_addrs = NULL;
722 gtt->num_pages = 0;
723 gtt->dummy_read_page = NULL;
724 gtt->populated = false;
725 gtt->bound = false;
726}
727
728
729static int radeon_ttm_backend_bind(struct ttm_backend *backend,
730 struct ttm_mem_reg *bo_mem)
731{
732 struct radeon_ttm_backend *gtt;
733 int r;
734
735 gtt = container_of(backend, struct radeon_ttm_backend, backend);
736 gtt->offset = bo_mem->start << PAGE_SHIFT;
737 if (!gtt->num_pages) {
738 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
739 gtt->num_pages, bo_mem, backend);
740 }
741 r = radeon_gart_bind(gtt->rdev, gtt->offset,
742 gtt->num_pages, gtt->pages, gtt->dma_addrs);
743 if (r) {
744 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
745 gtt->num_pages, gtt->offset);
746 return r;
747 }
748 gtt->bound = true;
749 return 0;
750}
751
752static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
753{
754 struct radeon_ttm_backend *gtt;
755
756 gtt = container_of(backend, struct radeon_ttm_backend, backend);
757 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
758 gtt->bound = false;
759 return 0;
760}
761
762static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
763{
764 struct radeon_ttm_backend *gtt;
765
766 gtt = container_of(backend, struct radeon_ttm_backend, backend);
767 if (gtt->bound) {
768 radeon_ttm_backend_unbind(backend);
769 }
770 kfree(gtt);
771}
772
773static struct ttm_backend_func radeon_backend_func = {
774 .populate = &radeon_ttm_backend_populate,
775 .clear = &radeon_ttm_backend_clear,
776 .bind = &radeon_ttm_backend_bind,
777 .unbind = &radeon_ttm_backend_unbind,
778 .destroy = &radeon_ttm_backend_destroy,
779};
780
781struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
782{
783 struct radeon_ttm_backend *gtt;
784
785 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
786 if (gtt == NULL) {
787 return NULL;
788 }
789 gtt->backend.bdev = &rdev->mman.bdev;
790 gtt->backend.flags = 0;
791 gtt->backend.func = &radeon_backend_func;
792 gtt->rdev = rdev;
793 gtt->pages = NULL;
794 gtt->num_pages = 0;
795 gtt->dummy_read_page = NULL;
796 gtt->populated = false;
797 gtt->bound = false;
798 return &gtt->backend;
799}
800
801#define RADEON_DEBUGFS_MEM_TYPES 2 853#define RADEON_DEBUGFS_MEM_TYPES 2
802 854
803#if defined(CONFIG_DEBUG_FS) 855#if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
820static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 872static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
821{ 873{
822#if defined(CONFIG_DEBUG_FS) 874#if defined(CONFIG_DEBUG_FS)
823 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1]; 875 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
824 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32]; 876 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
825 unsigned i; 877 unsigned i;
826 878
827 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 879 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
843 radeon_mem_types_list[i].name = radeon_mem_types_names[i]; 895 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
844 radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; 896 radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
845 radeon_mem_types_list[i].driver_features = 0; 897 radeon_mem_types_list[i].driver_features = 0;
846 radeon_mem_types_list[i].data = NULL; 898 radeon_mem_types_list[i++].data = NULL;
847 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1); 899#ifdef CONFIG_SWIOTLB
900 if (swiotlb_nr_tbl()) {
901 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
902 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
903 radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
904 radeon_mem_types_list[i].driver_features = 0;
905 radeon_mem_types_list[i++].data = NULL;
906 }
907#endif
908 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
848 909
849#endif 910#endif
850 return 0; 911 return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 06b90c87f8f..b0ce84a20a6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
410 if (r) 410 if (r)
411 return r; 411 return r;
412 412
413 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
414 if (r) {
415 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
416 return r;
417 }
418
413 /* Enable IRQ */ 419 /* Enable IRQ */
414 r100_irq_set(rdev); 420 r100_irq_set(rdev);
415 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 421 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev)
419 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 425 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
420 return r; 426 return r;
421 } 427 }
422 r = r100_ib_init(rdev); 428
429 r = radeon_ib_pool_start(rdev);
430 if (r)
431 return r;
432
433 r = r100_ib_test(rdev);
423 if (r) { 434 if (r) {
424 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 435 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
436 rdev->accel_working = false;
425 return r; 437 return r;
426 } 438 }
439
427 return 0; 440 return 0;
428} 441}
429 442
@@ -447,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev)
447 r300_clock_startup(rdev); 460 r300_clock_startup(rdev);
448 /* Initialize surface registers */ 461 /* Initialize surface registers */
449 radeon_surface_init(rdev); 462 radeon_surface_init(rdev);
463
464 rdev->accel_working = true;
450 return rs400_startup(rdev); 465 return rs400_startup(rdev);
451} 466}
452 467
453int rs400_suspend(struct radeon_device *rdev) 468int rs400_suspend(struct radeon_device *rdev)
454{ 469{
470 radeon_ib_pool_suspend(rdev);
455 r100_cp_disable(rdev); 471 r100_cp_disable(rdev);
456 radeon_wb_disable(rdev); 472 radeon_wb_disable(rdev);
457 r100_irq_disable(rdev); 473 r100_irq_disable(rdev);
@@ -530,7 +546,14 @@ int rs400_init(struct radeon_device *rdev)
530 if (r) 546 if (r)
531 return r; 547 return r;
532 r300_set_reg_safe(rdev); 548 r300_set_reg_safe(rdev);
549
550 r = radeon_ib_pool_init(rdev);
533 rdev->accel_working = true; 551 rdev->accel_working = true;
552 if (r) {
553 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
554 rdev->accel_working = false;
555 }
556
534 r = rs400_startup(rdev); 557 r = rs400_startup(rdev);
535 if (r) { 558 if (r) {
536 /* Somethings want wront with the accel init stop accel */ 559 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b1053d64042..803e0d3c177 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
324 324
325void rs600_bm_disable(struct radeon_device *rdev) 325void rs600_bm_disable(struct radeon_device *rdev)
326{ 326{
327 u32 tmp; 327 u16 tmp;
328 328
329 /* disable bus mastering */ 329 /* disable bus mastering */
330 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); 330 pci_read_config_word(rdev->pdev, 0x4, &tmp);
331 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); 331 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
332 mdelay(1); 332 mdelay(1);
333} 333}
@@ -549,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev)
549 WREG32(R_000040_GEN_INT_CNTL, 0); 549 WREG32(R_000040_GEN_INT_CNTL, 0);
550 return -EINVAL; 550 return -EINVAL;
551 } 551 }
552 if (rdev->irq.sw_int) { 552 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
553 tmp |= S_000040_SW_INT_EN(1); 553 tmp |= S_000040_SW_INT_EN(1);
554 } 554 }
555 if (rdev->irq.gui_idle) { 555 if (rdev->irq.gui_idle) {
@@ -642,7 +642,7 @@ int rs600_irq_process(struct radeon_device *rdev)
642 while (status || rdev->irq.stat_regs.r500.disp_int) { 642 while (status || rdev->irq.stat_regs.r500.disp_int) {
643 /* SW interrupt */ 643 /* SW interrupt */
644 if (G_000044_SW_INT(status)) { 644 if (G_000044_SW_INT(status)) {
645 radeon_fence_process(rdev); 645 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
646 } 646 }
647 /* GUI idle */ 647 /* GUI idle */
648 if (G_000040_GUI_IDLE(status)) { 648 if (G_000040_GUI_IDLE(status)) {
@@ -849,6 +849,12 @@ static int rs600_startup(struct radeon_device *rdev)
849 if (r) 849 if (r)
850 return r; 850 return r;
851 851
852 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
853 if (r) {
854 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
855 return r;
856 }
857
852 /* Enable IRQ */ 858 /* Enable IRQ */
853 rs600_irq_set(rdev); 859 rs600_irq_set(rdev);
854 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 860 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -858,15 +864,21 @@ static int rs600_startup(struct radeon_device *rdev)
858 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 864 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
859 return r; 865 return r;
860 } 866 }
861 r = r100_ib_init(rdev); 867
868 r = r600_audio_init(rdev);
862 if (r) { 869 if (r) {
863 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 870 dev_err(rdev->dev, "failed initializing audio\n");
864 return r; 871 return r;
865 } 872 }
866 873
867 r = r600_audio_init(rdev); 874 r = radeon_ib_pool_start(rdev);
875 if (r)
876 return r;
877
878 r = r100_ib_test(rdev);
868 if (r) { 879 if (r) {
869 dev_err(rdev->dev, "failed initializing audio\n"); 880 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
881 rdev->accel_working = false;
870 return r; 882 return r;
871 } 883 }
872 884
@@ -891,11 +903,14 @@ int rs600_resume(struct radeon_device *rdev)
891 rv515_clock_startup(rdev); 903 rv515_clock_startup(rdev);
892 /* Initialize surface registers */ 904 /* Initialize surface registers */
893 radeon_surface_init(rdev); 905 radeon_surface_init(rdev);
906
907 rdev->accel_working = true;
894 return rs600_startup(rdev); 908 return rs600_startup(rdev);
895} 909}
896 910
897int rs600_suspend(struct radeon_device *rdev) 911int rs600_suspend(struct radeon_device *rdev)
898{ 912{
913 radeon_ib_pool_suspend(rdev);
899 r600_audio_fini(rdev); 914 r600_audio_fini(rdev);
900 r100_cp_disable(rdev); 915 r100_cp_disable(rdev);
901 radeon_wb_disable(rdev); 916 radeon_wb_disable(rdev);
@@ -976,7 +991,14 @@ int rs600_init(struct radeon_device *rdev)
976 if (r) 991 if (r)
977 return r; 992 return r;
978 rs600_set_safe_registers(rdev); 993 rs600_set_safe_registers(rdev);
994
995 r = radeon_ib_pool_init(rdev);
979 rdev->accel_working = true; 996 rdev->accel_working = true;
997 if (r) {
998 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
999 rdev->accel_working = false;
1000 }
1001
980 r = rs600_startup(rdev); 1002 r = rs600_startup(rdev);
981 if (r) { 1003 if (r) {
982 /* Somethings want wront with the accel init stop accel */ 1004 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index a9049ed1a51..4f24a0fa8c8 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
621 if (r) 621 if (r)
622 return r; 622 return r;
623 623
624 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
625 if (r) {
626 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
627 return r;
628 }
629
624 /* Enable IRQ */ 630 /* Enable IRQ */
625 rs600_irq_set(rdev); 631 rs600_irq_set(rdev);
626 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 632 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
630 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 636 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
631 return r; 637 return r;
632 } 638 }
633 r = r100_ib_init(rdev); 639
640 r = r600_audio_init(rdev);
634 if (r) { 641 if (r) {
635 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 642 dev_err(rdev->dev, "failed initializing audio\n");
636 return r; 643 return r;
637 } 644 }
638 645
639 r = r600_audio_init(rdev); 646 r = radeon_ib_pool_start(rdev);
647 if (r)
648 return r;
649
650 r = r100_ib_test(rdev);
640 if (r) { 651 if (r) {
641 dev_err(rdev->dev, "failed initializing audio\n"); 652 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
653 rdev->accel_working = false;
642 return r; 654 return r;
643 } 655 }
644 656
@@ -663,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev)
663 rv515_clock_startup(rdev); 675 rv515_clock_startup(rdev);
664 /* Initialize surface registers */ 676 /* Initialize surface registers */
665 radeon_surface_init(rdev); 677 radeon_surface_init(rdev);
678
679 rdev->accel_working = true;
666 return rs690_startup(rdev); 680 return rs690_startup(rdev);
667} 681}
668 682
669int rs690_suspend(struct radeon_device *rdev) 683int rs690_suspend(struct radeon_device *rdev)
670{ 684{
685 radeon_ib_pool_suspend(rdev);
671 r600_audio_fini(rdev); 686 r600_audio_fini(rdev);
672 r100_cp_disable(rdev); 687 r100_cp_disable(rdev);
673 radeon_wb_disable(rdev); 688 radeon_wb_disable(rdev);
@@ -749,7 +764,14 @@ int rs690_init(struct radeon_device *rdev)
749 if (r) 764 if (r)
750 return r; 765 return r;
751 rs600_set_safe_registers(rdev); 766 rs600_set_safe_registers(rdev);
767
768 r = radeon_ib_pool_init(rdev);
752 rdev->accel_working = true; 769 rdev->accel_working = true;
770 if (r) {
771 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
772 rdev->accel_working = false;
773 }
774
753 r = rs690_startup(rdev); 775 r = rs690_startup(rdev);
754 if (r) { 776 if (r) {
755 /* Somethings want wront with the accel init stop accel */ 777 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 6613ee9ecca..880637fd194 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
55 55
56void rv515_ring_start(struct radeon_device *rdev) 56void rv515_ring_start(struct radeon_device *rdev)
57{ 57{
58 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
58 int r; 59 int r;
59 60
60 r = radeon_ring_lock(rdev, 64); 61 r = radeon_ring_lock(rdev, ring, 64);
61 if (r) { 62 if (r) {
62 return; 63 return;
63 } 64 }
64 radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); 65 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
65 radeon_ring_write(rdev, 66 radeon_ring_write(ring,
66 ISYNC_ANY2D_IDLE3D | 67 ISYNC_ANY2D_IDLE3D |
67 ISYNC_ANY3D_IDLE2D | 68 ISYNC_ANY3D_IDLE2D |
68 ISYNC_WAIT_IDLEGUI | 69 ISYNC_WAIT_IDLEGUI |
69 ISYNC_CPSCRATCH_IDLEGUI); 70 ISYNC_CPSCRATCH_IDLEGUI);
70 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); 71 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
71 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 72 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
72 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); 73 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
73 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); 74 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
74 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); 75 radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
75 radeon_ring_write(rdev, 0); 76 radeon_ring_write(ring, 0);
76 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); 77 radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
77 radeon_ring_write(rdev, 0); 78 radeon_ring_write(ring, 0);
78 radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0)); 79 radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
79 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); 80 radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
80 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); 81 radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
81 radeon_ring_write(rdev, 0); 82 radeon_ring_write(ring, 0);
82 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); 83 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
83 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); 84 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
84 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); 85 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
85 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); 86 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
86 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); 87 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
87 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 88 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
88 radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); 89 radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
89 radeon_ring_write(rdev, 0); 90 radeon_ring_write(ring, 0);
90 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); 91 radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
91 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); 92 radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
92 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); 93 radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
93 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); 94 radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
94 radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); 95 radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
95 radeon_ring_write(rdev, 96 radeon_ring_write(ring,
96 ((6 << MS_X0_SHIFT) | 97 ((6 << MS_X0_SHIFT) |
97 (6 << MS_Y0_SHIFT) | 98 (6 << MS_Y0_SHIFT) |
98 (6 << MS_X1_SHIFT) | 99 (6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
101 (6 << MS_Y2_SHIFT) | 102 (6 << MS_Y2_SHIFT) |
102 (6 << MSBD0_Y_SHIFT) | 103 (6 << MSBD0_Y_SHIFT) |
103 (6 << MSBD0_X_SHIFT))); 104 (6 << MSBD0_X_SHIFT)));
104 radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); 105 radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
105 radeon_ring_write(rdev, 106 radeon_ring_write(ring,
106 ((6 << MS_X3_SHIFT) | 107 ((6 << MS_X3_SHIFT) |
107 (6 << MS_Y3_SHIFT) | 108 (6 << MS_Y3_SHIFT) |
108 (6 << MS_X4_SHIFT) | 109 (6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
110 (6 << MS_X5_SHIFT) | 111 (6 << MS_X5_SHIFT) |
111 (6 << MS_Y5_SHIFT) | 112 (6 << MS_Y5_SHIFT) |
112 (6 << MSBD1_SHIFT))); 113 (6 << MSBD1_SHIFT)));
113 radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); 114 radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
114 radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); 115 radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
115 radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); 116 radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
116 radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); 117 radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
117 radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); 118 radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
118 radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); 119 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
119 radeon_ring_write(rdev, PACKET0(0x20C8, 0)); 120 radeon_ring_write(ring, PACKET0(0x20C8, 0));
120 radeon_ring_write(rdev, 0); 121 radeon_ring_write(ring, 0);
121 radeon_ring_unlock_commit(rdev); 122 radeon_ring_unlock_commit(rdev, ring);
122} 123}
123 124
124int rv515_mc_wait_for_idle(struct radeon_device *rdev) 125int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
392 if (r) 393 if (r)
393 return r; 394 return r;
394 395
396 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
397 if (r) {
398 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
399 return r;
400 }
401
395 /* Enable IRQ */ 402 /* Enable IRQ */
396 rs600_irq_set(rdev); 403 rs600_irq_set(rdev);
397 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 404 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
401 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 408 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
402 return r; 409 return r;
403 } 410 }
404 r = r100_ib_init(rdev); 411
412 r = radeon_ib_pool_start(rdev);
413 if (r)
414 return r;
415
416 r = r100_ib_test(rdev);
405 if (r) { 417 if (r) {
406 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 418 dev_err(rdev->dev, "failed testing IB (%d).\n", r);
419 rdev->accel_working = false;
407 return r; 420 return r;
408 } 421 }
409 return 0; 422 return 0;
@@ -428,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev)
428 rv515_clock_startup(rdev); 441 rv515_clock_startup(rdev);
429 /* Initialize surface registers */ 442 /* Initialize surface registers */
430 radeon_surface_init(rdev); 443 radeon_surface_init(rdev);
444
445 rdev->accel_working = true;
431 return rv515_startup(rdev); 446 return rv515_startup(rdev);
432} 447}
433 448
@@ -524,7 +539,14 @@ int rv515_init(struct radeon_device *rdev)
524 if (r) 539 if (r)
525 return r; 540 return r;
526 rv515_set_safe_registers(rdev); 541 rv515_set_safe_registers(rdev);
542
543 r = radeon_ib_pool_init(rdev);
527 rdev->accel_working = true; 544 rdev->accel_working = true;
545 if (r) {
546 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
547 rdev->accel_working = false;
548 }
549
528 r = rv515_startup(rdev); 550 r = rv515_startup(rdev);
529 if (r) { 551 if (r) {
530 /* Somethings want wront with the accel init stop accel */ 552 /* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 23ae1c60ab3..a1668b659dd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
357void r700_cp_fini(struct radeon_device *rdev) 357void r700_cp_fini(struct radeon_device *rdev)
358{ 358{
359 r700_cp_stop(rdev); 359 r700_cp_stop(rdev);
360 radeon_ring_fini(rdev); 360 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
361} 361}
362 362
363/* 363/*
@@ -1043,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
1043 1043
1044static int rv770_startup(struct radeon_device *rdev) 1044static int rv770_startup(struct radeon_device *rdev)
1045{ 1045{
1046 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1046 int r; 1047 int r;
1047 1048
1048 /* enable pcie gen2 link */ 1049 /* enable pcie gen2 link */
@@ -1082,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
1082 if (r) 1083 if (r)
1083 return r; 1084 return r;
1084 1085
1086 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1087 if (r) {
1088 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1089 return r;
1090 }
1091
1085 /* Enable IRQ */ 1092 /* Enable IRQ */
1086 r = r600_irq_init(rdev); 1093 r = r600_irq_init(rdev);
1087 if (r) { 1094 if (r) {
@@ -1091,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
1091 } 1098 }
1092 r600_irq_set(rdev); 1099 r600_irq_set(rdev);
1093 1100
1094 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1101 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1102 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
1103 0, 0xfffff, RADEON_CP_PACKET2);
1095 if (r) 1104 if (r)
1096 return r; 1105 return r;
1097 r = rv770_cp_load_microcode(rdev); 1106 r = rv770_cp_load_microcode(rdev);
@@ -1101,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
1101 if (r) 1110 if (r)
1102 return r; 1111 return r;
1103 1112
1113 r = radeon_ib_pool_start(rdev);
1114 if (r)
1115 return r;
1116
1117 r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
1118 if (r) {
1119 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1120 rdev->accel_working = false;
1121 return r;
1122 }
1123
1104 return 0; 1124 return 0;
1105} 1125}
1106 1126
@@ -1115,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev)
1115 /* post card */ 1135 /* post card */
1116 atom_asic_init(rdev->mode_info.atom_context); 1136 atom_asic_init(rdev->mode_info.atom_context);
1117 1137
1138 rdev->accel_working = true;
1118 r = rv770_startup(rdev); 1139 r = rv770_startup(rdev);
1119 if (r) { 1140 if (r) {
1120 DRM_ERROR("r600 startup failed on resume\n"); 1141 DRM_ERROR("r600 startup failed on resume\n");
1121 return r; 1142 return r;
1122 } 1143 }
1123 1144
1124 r = r600_ib_test(rdev);
1125 if (r) {
1126 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1127 return r;
1128 }
1129
1130 r = r600_audio_init(rdev); 1145 r = r600_audio_init(rdev);
1131 if (r) { 1146 if (r) {
1132 dev_err(rdev->dev, "radeon: audio init failed\n"); 1147 dev_err(rdev->dev, "radeon: audio init failed\n");
@@ -1140,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev)
1140int rv770_suspend(struct radeon_device *rdev) 1155int rv770_suspend(struct radeon_device *rdev)
1141{ 1156{
1142 r600_audio_fini(rdev); 1157 r600_audio_fini(rdev);
1158 radeon_ib_pool_suspend(rdev);
1159 r600_blit_suspend(rdev);
1143 /* FIXME: we should wait for ring to be empty */ 1160 /* FIXME: we should wait for ring to be empty */
1144 r700_cp_stop(rdev); 1161 r700_cp_stop(rdev);
1145 rdev->cp.ready = false; 1162 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1146 r600_irq_suspend(rdev); 1163 r600_irq_suspend(rdev);
1147 radeon_wb_disable(rdev); 1164 radeon_wb_disable(rdev);
1148 rv770_pcie_gart_disable(rdev); 1165 rv770_pcie_gart_disable(rdev);
1149 r600_blit_suspend(rdev);
1150 1166
1151 return 0; 1167 return 0;
1152} 1168}
@@ -1215,8 +1231,8 @@ int rv770_init(struct radeon_device *rdev)
1215 if (r) 1231 if (r)
1216 return r; 1232 return r;
1217 1233
1218 rdev->cp.ring_obj = NULL; 1234 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1219 r600_ring_init(rdev, 1024 * 1024); 1235 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1220 1236
1221 rdev->ih.ring_obj = NULL; 1237 rdev->ih.ring_obj = NULL;
1222 r600_ih_ring_init(rdev, 64 * 1024); 1238 r600_ih_ring_init(rdev, 64 * 1024);
@@ -1225,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev)
1225 if (r) 1241 if (r)
1226 return r; 1242 return r;
1227 1243
1244 r = radeon_ib_pool_init(rdev);
1228 rdev->accel_working = true; 1245 rdev->accel_working = true;
1246 if (r) {
1247 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1248 rdev->accel_working = false;
1249 }
1250
1229 r = rv770_startup(rdev); 1251 r = rv770_startup(rdev);
1230 if (r) { 1252 if (r) {
1231 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1253 dev_err(rdev->dev, "disabling GPU acceleration\n");
1232 r700_cp_fini(rdev); 1254 r700_cp_fini(rdev);
1233 r600_irq_fini(rdev); 1255 r600_irq_fini(rdev);
1234 radeon_wb_fini(rdev); 1256 radeon_wb_fini(rdev);
1257 r100_ib_fini(rdev);
1235 radeon_irq_kms_fini(rdev); 1258 radeon_irq_kms_fini(rdev);
1236 rv770_pcie_gart_fini(rdev); 1259 rv770_pcie_gart_fini(rdev);
1237 rdev->accel_working = false; 1260 rdev->accel_working = false;
1238 } 1261 }
1239 if (rdev->accel_working) {
1240 r = radeon_ib_pool_init(rdev);
1241 if (r) {
1242 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1243 rdev->accel_working = false;
1244 } else {
1245 r = r600_ib_test(rdev);
1246 if (r) {
1247 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1248 rdev->accel_working = false;
1249 }
1250 }
1251 }
1252 1262
1253 r = r600_audio_init(rdev); 1263 r = r600_audio_init(rdev);
1254 if (r) { 1264 if (r) {
@@ -1265,11 +1275,12 @@ void rv770_fini(struct radeon_device *rdev)
1265 r700_cp_fini(rdev); 1275 r700_cp_fini(rdev);
1266 r600_irq_fini(rdev); 1276 r600_irq_fini(rdev);
1267 radeon_wb_fini(rdev); 1277 radeon_wb_fini(rdev);
1268 radeon_ib_pool_fini(rdev); 1278 r100_ib_fini(rdev);
1269 radeon_irq_kms_fini(rdev); 1279 radeon_irq_kms_fini(rdev);
1270 rv770_pcie_gart_fini(rdev); 1280 rv770_pcie_gart_fini(rdev);
1271 r600_vram_scratch_fini(rdev); 1281 r600_vram_scratch_fini(rdev);
1272 radeon_gem_fini(rdev); 1282 radeon_gem_fini(rdev);
1283 radeon_semaphore_driver_fini(rdev);
1273 radeon_fence_driver_fini(rdev); 1284 radeon_fence_driver_fini(rdev);
1274 radeon_agp_fini(rdev); 1285 radeon_agp_fini(rdev);
1275 radeon_bo_fini(rdev); 1286 radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 5468d1cd329..89afe0b8364 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
35 savage_PCI_IDS 35 savage_PCI_IDS
36}; 36};
37 37
38static const struct file_operations savage_driver_fops = {
39 .owner = THIS_MODULE,
40 .open = drm_open,
41 .release = drm_release,
42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap,
44 .poll = drm_poll,
45 .fasync = drm_fasync,
46 .llseek = noop_llseek,
47};
48
38static struct drm_driver driver = { 49static struct drm_driver driver = {
39 .driver_features = 50 .driver_features =
40 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 51 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
46 .reclaim_buffers = savage_reclaim_buffers, 57 .reclaim_buffers = savage_reclaim_buffers,
47 .ioctls = savage_ioctls, 58 .ioctls = savage_ioctls,
48 .dma_ioctl = savage_bci_buffers, 59 .dma_ioctl = savage_bci_buffers,
49 .fops = { 60 .fops = &savage_driver_fops,
50 .owner = THIS_MODULE,
51 .open = drm_open,
52 .release = drm_release,
53 .unlocked_ioctl = drm_ioctl,
54 .mmap = drm_mmap,
55 .poll = drm_poll,
56 .fasync = drm_fasync,
57 .llseek = noop_llseek,
58 },
59
60 .name = DRIVER_NAME, 61 .name = DRIVER_NAME,
61 .desc = DRIVER_DESC, 62 .desc = DRIVER_DESC,
62 .date = DRIVER_DATE, 63 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index a9c5716bea4..06da063ece2 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
48 48
49 dev->dev_private = (void *)dev_priv; 49 dev->dev_private = (void *)dev_priv;
50 dev_priv->chipset = chipset; 50 dev_priv->chipset = chipset;
51 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 51 idr_init(&dev->object_name_idr);
52 if (ret)
53 kfree(dev_priv);
54 52
55 return ret; 53 return ret;
56} 54}
@@ -59,32 +57,60 @@ static int sis_driver_unload(struct drm_device *dev)
59{ 57{
60 drm_sis_private_t *dev_priv = dev->dev_private; 58 drm_sis_private_t *dev_priv = dev->dev_private;
61 59
62 drm_sman_takedown(&dev_priv->sman); 60 idr_remove_all(&dev_priv->object_idr);
61 idr_destroy(&dev_priv->object_idr);
62
63 kfree(dev_priv); 63 kfree(dev_priv);
64 64
65 return 0; 65 return 0;
66} 66}
67 67
68static const struct file_operations sis_driver_fops = {
69 .owner = THIS_MODULE,
70 .open = drm_open,
71 .release = drm_release,
72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap,
74 .poll = drm_poll,
75 .fasync = drm_fasync,
76 .llseek = noop_llseek,
77};
78
79static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
80{
81 struct sis_file_private *file_priv;
82
83 DRM_DEBUG_DRIVER("\n");
84 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
85 if (!file_priv)
86 return -ENOMEM;
87
88 file->driver_priv = file_priv;
89
90 INIT_LIST_HEAD(&file_priv->obj_list);
91
92 return 0;
93}
94
95void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
96{
97 struct sis_file_private *file_priv = file->driver_priv;
98
99 kfree(file_priv);
100}
101
68static struct drm_driver driver = { 102static struct drm_driver driver = {
69 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, 103 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
70 .load = sis_driver_load, 104 .load = sis_driver_load,
71 .unload = sis_driver_unload, 105 .unload = sis_driver_unload,
106 .open = sis_driver_open,
107 .postclose = sis_driver_postclose,
72 .dma_quiescent = sis_idle, 108 .dma_quiescent = sis_idle,
73 .reclaim_buffers = NULL, 109 .reclaim_buffers = NULL,
74 .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, 110 .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
75 .lastclose = sis_lastclose, 111 .lastclose = sis_lastclose,
76 .ioctls = sis_ioctls, 112 .ioctls = sis_ioctls,
77 .fops = { 113 .fops = &sis_driver_fops,
78 .owner = THIS_MODULE,
79 .open = drm_open,
80 .release = drm_release,
81 .unlocked_ioctl = drm_ioctl,
82 .mmap = drm_mmap,
83 .poll = drm_poll,
84 .fasync = drm_fasync,
85 .llseek = noop_llseek,
86 },
87
88 .name = DRIVER_NAME, 114 .name = DRIVER_NAME,
89 .desc = DRIVER_DESC, 115 .desc = DRIVER_DESC,
90 .date = DRIVER_DATE, 116 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 194303c177a..573758b2d2d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -44,7 +44,7 @@ enum sis_family {
44 SIS_CHIP_315 = 1, 44 SIS_CHIP_315 = 1,
45}; 45};
46 46
47#include "drm_sman.h" 47#include "drm_mm.h"
48 48
49 49
50#define SIS_BASE (dev_priv->mmio) 50#define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
54typedef struct drm_sis_private { 54typedef struct drm_sis_private {
55 drm_local_map_t *mmio; 55 drm_local_map_t *mmio;
56 unsigned int idle_fault; 56 unsigned int idle_fault;
57 struct drm_sman sman;
58 unsigned int chipset; 57 unsigned int chipset;
59 int vram_initialized; 58 int vram_initialized;
60 int agp_initialized; 59 int agp_initialized;
61 unsigned long vram_offset; 60 unsigned long vram_offset;
62 unsigned long agp_offset; 61 unsigned long agp_offset;
62 struct drm_mm vram_mm;
63 struct drm_mm agp_mm;
64 /** Mapping of userspace keys to mm objects */
65 struct idr object_idr;
63} drm_sis_private_t; 66} drm_sis_private_t;
64 67
65extern int sis_idle(struct drm_device *dev); 68extern int sis_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 7fe2b63412c..dd4a316c3d7 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,40 +41,18 @@
41#define AGP_TYPE 1 41#define AGP_TYPE 1
42 42
43 43
44struct sis_memblock {
45 struct drm_mm_node mm_node;
46 struct sis_memreq req;
47 struct list_head owner_list;
48};
49
44#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 50#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
45/* fb management via fb device */ 51/* fb management via fb device */
46 52
47#define SIS_MM_ALIGN_SHIFT 0 53#define SIS_MM_ALIGN_SHIFT 0
48#define SIS_MM_ALIGN_MASK 0 54#define SIS_MM_ALIGN_MASK 0
49 55
50static void *sis_sman_mm_allocate(void *private, unsigned long size,
51 unsigned alignment)
52{
53 struct sis_memreq req;
54
55 req.size = size;
56 sis_malloc(&req);
57 if (req.size == 0)
58 return NULL;
59 else
60 return (void *)(unsigned long)~req.offset;
61}
62
63static void sis_sman_mm_free(void *private, void *ref)
64{
65 sis_free(~((unsigned long)ref));
66}
67
68static void sis_sman_mm_destroy(void *private)
69{
70 ;
71}
72
73static unsigned long sis_sman_mm_offset(void *private, void *ref)
74{
75 return ~((unsigned long)ref);
76}
77
78#else /* CONFIG_FB_SIS[_MODULE] */ 56#else /* CONFIG_FB_SIS[_MODULE] */
79 57
80#define SIS_MM_ALIGN_SHIFT 4 58#define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
86{ 64{
87 drm_sis_private_t *dev_priv = dev->dev_private; 65 drm_sis_private_t *dev_priv = dev->dev_private;
88 drm_sis_fb_t *fb = data; 66 drm_sis_fb_t *fb = data;
89 int ret;
90 67
91 mutex_lock(&dev->struct_mutex); 68 mutex_lock(&dev->struct_mutex);
92#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 69 /* Unconditionally init the drm_mm, even though we don't use it when the
93 { 70 * fb sis driver is available - make cleanup easier. */
94 struct drm_sman_mm sman_mm; 71 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
95 sman_mm.private = (void *)0xFFFFFFFF;
96 sman_mm.allocate = sis_sman_mm_allocate;
97 sman_mm.free = sis_sman_mm_free;
98 sman_mm.destroy = sis_sman_mm_destroy;
99 sman_mm.offset = sis_sman_mm_offset;
100 ret =
101 drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
102 }
103#else
104 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
105 fb->size >> SIS_MM_ALIGN_SHIFT);
106#endif
107
108 if (ret) {
109 DRM_ERROR("VRAM memory manager initialisation error\n");
110 mutex_unlock(&dev->struct_mutex);
111 return ret;
112 }
113 72
114 dev_priv->vram_initialized = 1; 73 dev_priv->vram_initialized = 1;
115 dev_priv->vram_offset = fb->offset; 74 dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
120 return 0; 79 return 0;
121} 80}
122 81
123static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, 82static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
124 void *data, int pool) 83 void *data, int pool)
125{ 84{
126 drm_sis_private_t *dev_priv = dev->dev_private; 85 drm_sis_private_t *dev_priv = dev->dev_private;
127 drm_sis_mem_t *mem = data; 86 drm_sis_mem_t *mem = data;
128 int retval = 0; 87 int retval = 0, user_key;
129 struct drm_memblock_item *item; 88 struct sis_memblock *item;
89 struct sis_file_private *file_priv = file->driver_priv;
90 unsigned long offset;
130 91
131 mutex_lock(&dev->struct_mutex); 92 mutex_lock(&dev->struct_mutex);
132 93
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
138 return -EINVAL; 99 return -EINVAL;
139 } 100 }
140 101
141 mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; 102 item = kzalloc(sizeof(*item), GFP_KERNEL);
142 item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, 103 if (!item) {
143 (unsigned long)file_priv); 104 retval = -ENOMEM;
105 goto fail_alloc;
106 }
144 107
145 mutex_unlock(&dev->struct_mutex); 108 mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
146 if (item) { 109 if (pool == AGP_TYPE) {
147 mem->offset = ((pool == 0) ? 110 retval = drm_mm_insert_node(&dev_priv->agp_mm,
148 dev_priv->vram_offset : dev_priv->agp_offset) + 111 &item->mm_node,
149 (item->mm-> 112 mem->size, 0);
150 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); 113 offset = item->mm_node.start;
151 mem->free = item->user_hash.key;
152 mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
153 } else { 114 } else {
154 mem->offset = 0; 115#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
155 mem->size = 0; 116 item->req.size = mem->size;
156 mem->free = 0; 117 sis_malloc(&item->req);
118 if (item->req.size == 0)
119 retval = -ENOMEM;
120 offset = item->req.offset;
121#else
122 retval = drm_mm_insert_node(&dev_priv->vram_mm,
123 &item->mm_node,
124 mem->size, 0);
125 offset = item->mm_node.start;
126#endif
127 }
128 if (retval)
129 goto fail_alloc;
130
131again:
132 if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
157 retval = -ENOMEM; 133 retval = -ENOMEM;
134 goto fail_idr;
158 } 135 }
159 136
137 retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
138 if (retval == -EAGAIN)
139 goto again;
140 if (retval)
141 goto fail_idr;
142
143 list_add(&item->owner_list, &file_priv->obj_list);
144 mutex_unlock(&dev->struct_mutex);
145
146 mem->offset = ((pool == 0) ?
147 dev_priv->vram_offset : dev_priv->agp_offset) +
148 (offset << SIS_MM_ALIGN_SHIFT);
149 mem->free = user_key;
150 mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
151
152 return 0;
153
154fail_idr:
155 drm_mm_remove_node(&item->mm_node);
156fail_alloc:
157 kfree(item);
158 mutex_unlock(&dev->struct_mutex);
159
160 mem->offset = 0;
161 mem->size = 0;
162 mem->free = 0;
163
160 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, 164 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
161 mem->offset); 165 mem->offset);
162 166
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
167{ 171{
168 drm_sis_private_t *dev_priv = dev->dev_private; 172 drm_sis_private_t *dev_priv = dev->dev_private;
169 drm_sis_mem_t *mem = data; 173 drm_sis_mem_t *mem = data;
170 int ret; 174 struct sis_memblock *obj;
171 175
172 mutex_lock(&dev->struct_mutex); 176 mutex_lock(&dev->struct_mutex);
173 ret = drm_sman_free_key(&dev_priv->sman, mem->free); 177 obj = idr_find(&dev_priv->object_idr, mem->free);
178 if (obj == NULL) {
179 mutex_unlock(&dev->struct_mutex);
180 return -EINVAL;
181 }
182
183 idr_remove(&dev_priv->object_idr, mem->free);
184 list_del(&obj->owner_list);
185 if (drm_mm_node_allocated(&obj->mm_node))
186 drm_mm_remove_node(&obj->mm_node);
187#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
188 else
189 sis_free(obj->req.offset);
190#endif
191 kfree(obj);
174 mutex_unlock(&dev->struct_mutex); 192 mutex_unlock(&dev->struct_mutex);
175 DRM_DEBUG("free = 0x%lx\n", mem->free); 193 DRM_DEBUG("free = 0x%lx\n", mem->free);
176 194
177 return ret; 195 return 0;
178} 196}
179 197
180static int sis_fb_alloc(struct drm_device *dev, void *data, 198static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
188{ 206{
189 drm_sis_private_t *dev_priv = dev->dev_private; 207 drm_sis_private_t *dev_priv = dev->dev_private;
190 drm_sis_agp_t *agp = data; 208 drm_sis_agp_t *agp = data;
191 int ret;
192 dev_priv = dev->dev_private; 209 dev_priv = dev->dev_private;
193 210
194 mutex_lock(&dev->struct_mutex); 211 mutex_lock(&dev->struct_mutex);
195 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, 212 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
196 agp->size >> SIS_MM_ALIGN_SHIFT);
197
198 if (ret) {
199 DRM_ERROR("AGP memory manager initialisation error\n");
200 mutex_unlock(&dev->struct_mutex);
201 return ret;
202 }
203 213
204 dev_priv->agp_initialized = 1; 214 dev_priv->agp_initialized = 1;
205 dev_priv->agp_offset = agp->offset; 215 dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
293 return; 303 return;
294 304
295 mutex_lock(&dev->struct_mutex); 305 mutex_lock(&dev->struct_mutex);
296 drm_sman_cleanup(&dev_priv->sman); 306 if (dev_priv->vram_initialized) {
297 dev_priv->vram_initialized = 0; 307 drm_mm_takedown(&dev_priv->vram_mm);
298 dev_priv->agp_initialized = 0; 308 dev_priv->vram_initialized = 0;
309 }
310 if (dev_priv->agp_initialized) {
311 drm_mm_takedown(&dev_priv->agp_mm);
312 dev_priv->agp_initialized = 0;
313 }
299 dev_priv->mmio = NULL; 314 dev_priv->mmio = NULL;
300 mutex_unlock(&dev->struct_mutex); 315 mutex_unlock(&dev->struct_mutex);
301} 316}
302 317
303void sis_reclaim_buffers_locked(struct drm_device *dev, 318void sis_reclaim_buffers_locked(struct drm_device *dev,
304 struct drm_file *file_priv) 319 struct drm_file *file)
305{ 320{
306 drm_sis_private_t *dev_priv = dev->dev_private; 321 struct sis_file_private *file_priv = file->driver_priv;
322 struct sis_memblock *entry, *next;
307 323
308 mutex_lock(&dev->struct_mutex); 324 mutex_lock(&dev->struct_mutex);
309 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { 325 if (list_empty(&file_priv->obj_list)) {
310 mutex_unlock(&dev->struct_mutex); 326 mutex_unlock(&dev->struct_mutex);
311 return; 327 return;
312 } 328 }
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
314 if (dev->driver->dma_quiescent) 330 if (dev->driver->dma_quiescent)
315 dev->driver->dma_quiescent(dev); 331 dev->driver->dma_quiescent(dev);
316 332
317 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); 333
334 list_for_each_entry_safe(entry, next, &file_priv->obj_list,
335 owner_list) {
336 list_del(&entry->owner_list);
337 if (drm_mm_node_allocated(&entry->mm_node))
338 drm_mm_remove_node(&entry->mm_node);
339#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
340 else
341 sis_free(entry->req.offset);
342#endif
343 kfree(entry);
344 }
318 mutex_unlock(&dev->struct_mutex); 345 mutex_unlock(&dev->struct_mutex);
319 return; 346 return;
320} 347}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index cda29911e33..1613c78544c 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
41 tdfx_PCI_IDS 41 tdfx_PCI_IDS
42}; 42};
43 43
44static const struct file_operations tdfx_driver_fops = {
45 .owner = THIS_MODULE,
46 .open = drm_open,
47 .release = drm_release,
48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap,
50 .poll = drm_poll,
51 .fasync = drm_fasync,
52 .llseek = noop_llseek,
53};
54
44static struct drm_driver driver = { 55static struct drm_driver driver = {
45 .driver_features = DRIVER_USE_MTRR, 56 .driver_features = DRIVER_USE_MTRR,
46 .reclaim_buffers = drm_core_reclaim_buffers, 57 .reclaim_buffers = drm_core_reclaim_buffers,
47 .fops = { 58 .fops = &tdfx_driver_fops,
48 .owner = THIS_MODULE,
49 .open = drm_open,
50 .release = drm_release,
51 .unlocked_ioctl = drm_ioctl,
52 .mmap = drm_mmap,
53 .poll = drm_poll,
54 .fasync = drm_fasync,
55 .llseek = noop_llseek,
56 },
57
58 .name = DRIVER_NAME, 59 .name = DRIVER_NAME,
59 .desc = DRIVER_DESC, 60 .desc = DRIVER_DESC,
60 .date = DRIVER_DATE, 61 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f3cf6f02c99..b2b33dde2af 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o 8 ttm_bo_manager.o
9 9
10ifeq ($(CONFIG_SWIOTLB),y)
11ttm-y += ttm_page_alloc_dma.o
12endif
13
10obj-$(CONFIG_DRM_TTM) += ttm.o 14obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 1c4a72f681c..747c1413fc9 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -31,6 +31,7 @@
31 31
32#include "ttm/ttm_module.h" 32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h" 33#include "ttm/ttm_bo_driver.h"
34#include "ttm/ttm_page_alloc.h"
34#ifdef TTM_HAS_AGP 35#ifdef TTM_HAS_AGP
35#include "ttm/ttm_placement.h" 36#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h> 37#include <linux/agp_backend.h>
@@ -40,45 +41,33 @@
40#include <asm/agp.h> 41#include <asm/agp.h>
41 42
42struct ttm_agp_backend { 43struct ttm_agp_backend {
43 struct ttm_backend backend; 44 struct ttm_tt ttm;
44 struct agp_memory *mem; 45 struct agp_memory *mem;
45 struct agp_bridge_data *bridge; 46 struct agp_bridge_data *bridge;
46}; 47};
47 48
48static int ttm_agp_populate(struct ttm_backend *backend, 49static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
49 unsigned long num_pages, struct page **pages,
50 struct page *dummy_read_page,
51 dma_addr_t *dma_addrs)
52{ 50{
53 struct ttm_agp_backend *agp_be = 51 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
54 container_of(backend, struct ttm_agp_backend, backend); 52 struct drm_mm_node *node = bo_mem->mm_node;
55 struct page **cur_page, **last_page = pages + num_pages;
56 struct agp_memory *mem; 53 struct agp_memory *mem;
54 int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
55 unsigned i;
57 56
58 mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); 57 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
59 if (unlikely(mem == NULL)) 58 if (unlikely(mem == NULL))
60 return -ENOMEM; 59 return -ENOMEM;
61 60
62 mem->page_count = 0; 61 mem->page_count = 0;
63 for (cur_page = pages; cur_page < last_page; ++cur_page) { 62 for (i = 0; i < ttm->num_pages; i++) {
64 struct page *page = *cur_page; 63 struct page *page = ttm->pages[i];
64
65 if (!page) 65 if (!page)
66 page = dummy_read_page; 66 page = ttm->dummy_read_page;
67 67
68 mem->pages[mem->page_count++] = page; 68 mem->pages[mem->page_count++] = page;
69 } 69 }
70 agp_be->mem = mem; 70 agp_be->mem = mem;
71 return 0;
72}
73
74static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
75{
76 struct ttm_agp_backend *agp_be =
77 container_of(backend, struct ttm_agp_backend, backend);
78 struct drm_mm_node *node = bo_mem->mm_node;
79 struct agp_memory *mem = agp_be->mem;
80 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
81 int ret;
82 71
83 mem->is_flushed = 1; 72 mem->is_flushed = 1;
84 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 73 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
90 return ret; 79 return ret;
91} 80}
92 81
93static int ttm_agp_unbind(struct ttm_backend *backend) 82static int ttm_agp_unbind(struct ttm_tt *ttm)
94{ 83{
95 struct ttm_agp_backend *agp_be = 84 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
96 container_of(backend, struct ttm_agp_backend, backend);
97 85
98 if (agp_be->mem->is_bound) 86 if (agp_be->mem) {
99 return agp_unbind_memory(agp_be->mem); 87 if (agp_be->mem->is_bound)
100 else 88 return agp_unbind_memory(agp_be->mem);
101 return 0; 89 agp_free_memory(agp_be->mem);
102} 90 agp_be->mem = NULL;
103
104static void ttm_agp_clear(struct ttm_backend *backend)
105{
106 struct ttm_agp_backend *agp_be =
107 container_of(backend, struct ttm_agp_backend, backend);
108 struct agp_memory *mem = agp_be->mem;
109
110 if (mem) {
111 ttm_agp_unbind(backend);
112 agp_free_memory(mem);
113 } 91 }
114 agp_be->mem = NULL; 92 return 0;
115} 93}
116 94
117static void ttm_agp_destroy(struct ttm_backend *backend) 95static void ttm_agp_destroy(struct ttm_tt *ttm)
118{ 96{
119 struct ttm_agp_backend *agp_be = 97 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
120 container_of(backend, struct ttm_agp_backend, backend);
121 98
122 if (agp_be->mem) 99 if (agp_be->mem)
123 ttm_agp_clear(backend); 100 ttm_agp_unbind(ttm);
101 ttm_tt_fini(ttm);
124 kfree(agp_be); 102 kfree(agp_be);
125} 103}
126 104
127static struct ttm_backend_func ttm_agp_func = { 105static struct ttm_backend_func ttm_agp_func = {
128 .populate = ttm_agp_populate,
129 .clear = ttm_agp_clear,
130 .bind = ttm_agp_bind, 106 .bind = ttm_agp_bind,
131 .unbind = ttm_agp_unbind, 107 .unbind = ttm_agp_unbind,
132 .destroy = ttm_agp_destroy, 108 .destroy = ttm_agp_destroy,
133}; 109};
134 110
135struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 111struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
136 struct agp_bridge_data *bridge) 112 struct agp_bridge_data *bridge,
113 unsigned long size, uint32_t page_flags,
114 struct page *dummy_read_page)
137{ 115{
138 struct ttm_agp_backend *agp_be; 116 struct ttm_agp_backend *agp_be;
139 117
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
143 121
144 agp_be->mem = NULL; 122 agp_be->mem = NULL;
145 agp_be->bridge = bridge; 123 agp_be->bridge = bridge;
146 agp_be->backend.func = &ttm_agp_func; 124 agp_be->ttm.func = &ttm_agp_func;
147 agp_be->backend.bdev = bdev; 125
148 return &agp_be->backend; 126 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
127 return NULL;
128 }
129
130 return &agp_be->ttm;
131}
132EXPORT_SYMBOL(ttm_agp_tt_create);
133
134int ttm_agp_tt_populate(struct ttm_tt *ttm)
135{
136 if (ttm->state != tt_unpopulated)
137 return 0;
138
139 return ttm_pool_populate(ttm);
140}
141EXPORT_SYMBOL(ttm_agp_tt_populate);
142
143void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
144{
145 ttm_pool_unpopulate(ttm);
149} 146}
150EXPORT_SYMBOL(ttm_agp_backend_init); 147EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
151 148
152#endif 149#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0bb0f5f713e..2f0eab66ece 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
137 struct ttm_buffer_object *bo = 137 struct ttm_buffer_object *bo =
138 container_of(list_kref, struct ttm_buffer_object, list_kref); 138 container_of(list_kref, struct ttm_buffer_object, list_kref);
139 struct ttm_bo_device *bdev = bo->bdev; 139 struct ttm_bo_device *bdev = bo->bdev;
140 size_t acc_size = bo->acc_size;
140 141
141 BUG_ON(atomic_read(&bo->list_kref.refcount)); 142 BUG_ON(atomic_read(&bo->list_kref.refcount));
142 BUG_ON(atomic_read(&bo->kref.refcount)); 143 BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
152 if (bo->destroy) 153 if (bo->destroy)
153 bo->destroy(bo); 154 bo->destroy(bo);
154 else { 155 else {
155 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156 kfree(bo); 156 kfree(bo);
157 } 157 }
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
158} 159}
159 160
160int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) 161int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
337 if (zero_alloc) 338 if (zero_alloc)
338 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 339 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
339 case ttm_bo_type_kernel: 340 case ttm_bo_type_kernel:
340 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 341 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
341 page_flags, glob->dummy_read_page); 342 page_flags, glob->dummy_read_page);
342 if (unlikely(bo->ttm == NULL)) 343 if (unlikely(bo->ttm == NULL))
343 ret = -ENOMEM; 344 ret = -ENOMEM;
344 break; 345 break;
345 case ttm_bo_type_user:
346 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
347 page_flags | TTM_PAGE_FLAG_USER,
348 glob->dummy_read_page);
349 if (unlikely(bo->ttm == NULL)) {
350 ret = -ENOMEM;
351 break;
352 }
353
354 ret = ttm_tt_set_user(bo->ttm, current,
355 bo->buffer_start, bo->num_pages);
356 if (unlikely(ret != 0)) {
357 ttm_tt_destroy(bo->ttm);
358 bo->ttm = NULL;
359 }
360 break;
361 default: 346 default:
362 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); 347 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
363 ret = -EINVAL; 348 ret = -EINVAL;
@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
419 } 404 }
420 } 405 }
421 406
422 if (bdev->driver->move_notify)
423 bdev->driver->move_notify(bo, mem);
424
425 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 407 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
426 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 408 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
427 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); 409 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
434 if (ret) 416 if (ret)
435 goto out_err; 417 goto out_err;
436 418
419 if (bdev->driver->move_notify)
420 bdev->driver->move_notify(bo, mem);
421
437moved: 422moved:
438 if (bo->evicted) { 423 if (bo->evicted) {
439 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 424 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
@@ -472,6 +457,9 @@ out_err:
472 457
473static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 458static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
474{ 459{
460 if (bo->bdev->driver->move_notify)
461 bo->bdev->driver->move_notify(bo, NULL);
462
475 if (bo->ttm) { 463 if (bo->ttm) {
476 ttm_tt_unbind(bo->ttm); 464 ttm_tt_unbind(bo->ttm);
477 ttm_tt_destroy(bo->ttm); 465 ttm_tt_destroy(bo->ttm);
@@ -913,16 +901,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
913} 901}
914 902
915static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 903static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
916 bool disallow_fixed,
917 uint32_t mem_type, 904 uint32_t mem_type,
918 uint32_t proposed_placement, 905 uint32_t proposed_placement,
919 uint32_t *masked_placement) 906 uint32_t *masked_placement)
920{ 907{
921 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 908 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
922 909
923 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
924 return false;
925
926 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 910 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
927 return false; 911 return false;
928 912
@@ -967,7 +951,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
967 man = &bdev->man[mem_type]; 951 man = &bdev->man[mem_type];
968 952
969 type_ok = ttm_bo_mt_compatible(man, 953 type_ok = ttm_bo_mt_compatible(man,
970 bo->type == ttm_bo_type_user,
971 mem_type, 954 mem_type,
972 placement->placement[i], 955 placement->placement[i],
973 &cur_flags); 956 &cur_flags);
@@ -1015,7 +998,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1015 if (!man->has_type) 998 if (!man->has_type)
1016 continue; 999 continue;
1017 if (!ttm_bo_mt_compatible(man, 1000 if (!ttm_bo_mt_compatible(man,
1018 bo->type == ttm_bo_type_user,
1019 mem_type, 1001 mem_type,
1020 placement->busy_placement[i], 1002 placement->busy_placement[i],
1021 &cur_flags)) 1003 &cur_flags))
@@ -1185,6 +1167,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1185{ 1167{
1186 int ret = 0; 1168 int ret = 0;
1187 unsigned long num_pages; 1169 unsigned long num_pages;
1170 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1171
1172 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1173 if (ret) {
1174 printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
1175 if (destroy)
1176 (*destroy)(bo);
1177 else
1178 kfree(bo);
1179 return -ENOMEM;
1180 }
1188 1181
1189 size += buffer_start & ~PAGE_MASK; 1182 size += buffer_start & ~PAGE_MASK;
1190 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1183 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1248,34 @@ out_err:
1255} 1248}
1256EXPORT_SYMBOL(ttm_bo_init); 1249EXPORT_SYMBOL(ttm_bo_init);
1257 1250
1258static inline size_t ttm_bo_size(struct ttm_bo_global *glob, 1251size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1259 unsigned long num_pages) 1252 unsigned long bo_size,
1253 unsigned struct_size)
1260{ 1254{
1261 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & 1255 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1262 PAGE_MASK; 1256 size_t size = 0;
1263 1257
1264 return glob->ttm_bo_size + 2 * page_array_size; 1258 size += ttm_round_pot(struct_size);
1259 size += PAGE_ALIGN(npages * sizeof(void *));
1260 size += ttm_round_pot(sizeof(struct ttm_tt));
1261 return size;
1265} 1262}
1263EXPORT_SYMBOL(ttm_bo_acc_size);
1264
1265size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1266 unsigned long bo_size,
1267 unsigned struct_size)
1268{
1269 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1270 size_t size = 0;
1271
1272 size += ttm_round_pot(struct_size);
1273 size += PAGE_ALIGN(npages * sizeof(void *));
1274 size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1275 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1276 return size;
1277}
1278EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1266 1279
1267int ttm_bo_create(struct ttm_bo_device *bdev, 1280int ttm_bo_create(struct ttm_bo_device *bdev,
1268 unsigned long size, 1281 unsigned long size,
@@ -1276,10 +1289,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1276{ 1289{
1277 struct ttm_buffer_object *bo; 1290 struct ttm_buffer_object *bo;
1278 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1291 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1292 size_t acc_size;
1279 int ret; 1293 int ret;
1280 1294
1281 size_t acc_size = 1295 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1282 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1283 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1296 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1284 if (unlikely(ret != 0)) 1297 if (unlikely(ret != 0))
1285 return ret; 1298 return ret;
@@ -1465,13 +1478,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
1465 goto out_no_shrink; 1478 goto out_no_shrink;
1466 } 1479 }
1467 1480
1468 glob->ttm_bo_extra_size =
1469 ttm_round_pot(sizeof(struct ttm_tt)) +
1470 ttm_round_pot(sizeof(struct ttm_backend));
1471
1472 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1473 ttm_round_pot(sizeof(struct ttm_buffer_object));
1474
1475 atomic_set(&glob->bo_count, 0); 1481 atomic_set(&glob->bo_count, 0);
1476 1482
1477 ret = kobject_init_and_add( 1483 ret = kobject_init_and_add(
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 082fcaea583..f8187ead7b3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244 unsigned long page, 244 unsigned long page,
245 pgprot_t prot) 245 pgprot_t prot)
246{ 246{
247 struct page *d = ttm_tt_get_page(ttm, page); 247 struct page *d = ttm->pages[page];
248 void *dst; 248 void *dst;
249 249
250 if (!d) 250 if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281 unsigned long page, 281 unsigned long page,
282 pgprot_t prot) 282 pgprot_t prot)
283{ 283{
284 struct page *s = ttm_tt_get_page(ttm, page); 284 struct page *s = ttm->pages[page];
285 void *src; 285 void *src;
286 286
287 if (!s) 287 if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
342 if (old_iomap == NULL && ttm == NULL) 342 if (old_iomap == NULL && ttm == NULL)
343 goto out2; 343 goto out2;
344 344
345 if (ttm->state == tt_unpopulated) {
346 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347 if (ret)
348 goto out1;
349 }
350
345 add = 0; 351 add = 0;
346 dir = 1; 352 dir = 1;
347 353
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
439 kref_init(&fbo->list_kref); 445 kref_init(&fbo->list_kref);
440 kref_init(&fbo->kref); 446 kref_init(&fbo->kref);
441 fbo->destroy = &ttm_transfered_destroy; 447 fbo->destroy = &ttm_transfered_destroy;
448 fbo->acc_size = 0;
442 449
443 *new_obj = fbo; 450 *new_obj = fbo;
444 return 0; 451 return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
502{ 509{
503 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; 510 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
504 struct ttm_tt *ttm = bo->ttm; 511 struct ttm_tt *ttm = bo->ttm;
505 struct page *d; 512 int ret;
506 int i;
507 513
508 BUG_ON(!ttm); 514 BUG_ON(!ttm);
515
516 if (ttm->state == tt_unpopulated) {
517 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
518 if (ret)
519 return ret;
520 }
521
509 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 522 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510 /* 523 /*
511 * We're mapping a single page, and the desired 524 * We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
513 */ 526 */
514 527
515 map->bo_kmap_type = ttm_bo_map_kmap; 528 map->bo_kmap_type = ttm_bo_map_kmap;
516 map->page = ttm_tt_get_page(ttm, start_page); 529 map->page = ttm->pages[start_page];
517 map->virtual = kmap(map->page); 530 map->virtual = kmap(map->page);
518 } else { 531 } else {
519 /*
520 * Populate the part we're mapping;
521 */
522 for (i = start_page; i < start_page + num_pages; ++i) {
523 d = ttm_tt_get_page(ttm, i);
524 if (!d)
525 return -ENOMEM;
526 }
527
528 /* 532 /*
529 * We need to use vmap to get the desired page protection 533 * We need to use vmap to get the desired page protection
530 * or to make the buffer object look contiguous. 534 * or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 221b924aceb..54412848de8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
174 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 174 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
175 vm_get_page_prot(vma->vm_flags) : 175 vm_get_page_prot(vma->vm_flags) :
176 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); 176 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
177
178 /* Allocate all page at once, most common usage */
179 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
180 retval = VM_FAULT_OOM;
181 goto out_io_unlock;
182 }
177 } 183 }
178 184
179 /* 185 /*
180 * Speculatively prefault a number of pages. Only error on 186 * Speculatively prefault a number of pages. Only error on
181 * first page. 187 * first page.
182 */ 188 */
183
184 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 189 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
185 if (bo->mem.bus.is_iomem) 190 if (bo->mem.bus.is_iomem)
186 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; 191 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
187 else { 192 else {
188 page = ttm_tt_get_page(ttm, page_offset); 193 page = ttm->pages[page_offset];
189 if (unlikely(!page && i == 0)) { 194 if (unlikely(!page && i == 0)) {
190 retval = VM_FAULT_OOM; 195 retval = VM_FAULT_OOM;
191 goto out_io_unlock; 196 goto out_io_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd82dc0..9eba8e9a4e9 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
395 zone->name, (unsigned long long) zone->max_mem >> 10); 395 zone->name, (unsigned long long) zone->max_mem >> 10);
396 } 396 }
397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); 397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
398 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
398 return 0; 399 return 0;
399out_no_zone: 400out_no_zone:
400 ttm_mem_global_release(glob); 401 ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
409 410
410 /* let the page allocator first stop the shrink work. */ 411 /* let the page allocator first stop the shrink work. */
411 ttm_page_alloc_fini(); 412 ttm_page_alloc_fini();
413 ttm_dma_page_alloc_fini();
412 414
413 flush_workqueue(glob->swap_queue); 415 flush_workqueue(glob->swap_queue);
414 destroy_workqueue(glob->swap_queue); 416 destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93daac3..499debda791 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
619 * @return count of pages still required to fulfill the request. 619 * @return count of pages still required to fulfill the request.
620 */ 620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 struct list_head *pages, int ttm_flags, 622 struct list_head *pages,
623 enum ttm_caching_state cstate, unsigned count) 623 int ttm_flags,
624 enum ttm_caching_state cstate,
625 unsigned count)
624{ 626{
625 unsigned long irq_flags; 627 unsigned long irq_flags;
626 struct list_head *p; 628 struct list_head *p;
@@ -660,17 +662,67 @@ out:
660 return count; 662 return count;
661} 663}
662 664
665/* Put all pages in pages list to correct pool to wait for reuse */
666static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
667 enum ttm_caching_state cstate)
668{
669 unsigned long irq_flags;
670 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671 unsigned i;
672
673 if (pool == NULL) {
674 /* No pool for this memory type so free the pages */
675 for (i = 0; i < npages; i++) {
676 if (pages[i]) {
677 if (page_count(pages[i]) != 1)
678 printk(KERN_ERR TTM_PFX
679 "Erroneous page count. "
680 "Leaking pages.\n");
681 __free_page(pages[i]);
682 pages[i] = NULL;
683 }
684 }
685 return;
686 }
687
688 spin_lock_irqsave(&pool->lock, irq_flags);
689 for (i = 0; i < npages; i++) {
690 if (pages[i]) {
691 if (page_count(pages[i]) != 1)
692 printk(KERN_ERR TTM_PFX
693 "Erroneous page count. "
694 "Leaking pages.\n");
695 list_add_tail(&pages[i]->lru, &pool->list);
696 pages[i] = NULL;
697 pool->npages++;
698 }
699 }
700 /* Check that we don't go over the pool limit */
701 npages = 0;
702 if (pool->npages > _manager->options.max_size) {
703 npages = pool->npages - _manager->options.max_size;
704 /* free at least NUM_PAGES_TO_ALLOC number of pages
705 * to reduce calls to set_memory_wb */
706 if (npages < NUM_PAGES_TO_ALLOC)
707 npages = NUM_PAGES_TO_ALLOC;
708 }
709 spin_unlock_irqrestore(&pool->lock, irq_flags);
710 if (npages)
711 ttm_page_pool_free(pool, npages);
712}
713
663/* 714/*
664 * On success pages list will hold count number of correctly 715 * On success pages list will hold count number of correctly
665 * cached pages. 716 * cached pages.
666 */ 717 */
667int ttm_get_pages(struct list_head *pages, int flags, 718static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
668 enum ttm_caching_state cstate, unsigned count, 719 enum ttm_caching_state cstate)
669 dma_addr_t *dma_address)
670{ 720{
671 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 721 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
722 struct list_head plist;
672 struct page *p = NULL; 723 struct page *p = NULL;
673 gfp_t gfp_flags = GFP_USER; 724 gfp_t gfp_flags = GFP_USER;
725 unsigned count;
674 int r; 726 int r;
675 727
676 /* set zero flag for page allocation if required */ 728 /* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
684 else 736 else
685 gfp_flags |= GFP_HIGHUSER; 737 gfp_flags |= GFP_HIGHUSER;
686 738
687 for (r = 0; r < count; ++r) { 739 for (r = 0; r < npages; ++r) {
688 p = alloc_page(gfp_flags); 740 p = alloc_page(gfp_flags);
689 if (!p) { 741 if (!p) {
690 742
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
693 return -ENOMEM; 745 return -ENOMEM;
694 } 746 }
695 747
696 list_add(&p->lru, pages); 748 pages[r] = p;
697 } 749 }
698 return 0; 750 return 0;
699 } 751 }
700 752
701
702 /* combine zero flag to pool flags */ 753 /* combine zero flag to pool flags */
703 gfp_flags |= pool->gfp_flags; 754 gfp_flags |= pool->gfp_flags;
704 755
705 /* First we take pages from the pool */ 756 /* First we take pages from the pool */
706 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); 757 INIT_LIST_HEAD(&plist);
758 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
759 count = 0;
760 list_for_each_entry(p, &plist, lru) {
761 pages[count++] = p;
762 }
707 763
708 /* clear the pages coming from the pool if requested */ 764 /* clear the pages coming from the pool if requested */
709 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 765 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
710 list_for_each_entry(p, pages, lru) { 766 list_for_each_entry(p, &plist, lru) {
711 clear_page(page_address(p)); 767 clear_page(page_address(p));
712 } 768 }
713 } 769 }
714 770
715 /* If pool didn't have enough pages allocate new one. */ 771 /* If pool didn't have enough pages allocate new one. */
716 if (count > 0) { 772 if (npages > 0) {
717 /* ttm_alloc_new_pages doesn't reference pool so we can run 773 /* ttm_alloc_new_pages doesn't reference pool so we can run
718 * multiple requests in parallel. 774 * multiple requests in parallel.
719 **/ 775 **/
720 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); 776 INIT_LIST_HEAD(&plist);
777 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
778 list_for_each_entry(p, &plist, lru) {
779 pages[count++] = p;
780 }
721 if (r) { 781 if (r) {
722 /* If there is any pages in the list put them back to 782 /* If there is any pages in the list put them back to
723 * the pool. */ 783 * the pool. */
724 printk(KERN_ERR TTM_PFX 784 printk(KERN_ERR TTM_PFX
725 "Failed to allocate extra pages " 785 "Failed to allocate extra pages "
726 "for large request."); 786 "for large request.");
727 ttm_put_pages(pages, 0, flags, cstate, NULL); 787 ttm_put_pages(pages, count, flags, cstate);
728 return r; 788 return r;
729 } 789 }
730 } 790 }
731 791
732
733 return 0; 792 return 0;
734} 793}
735 794
736/* Put all pages in pages list to correct pool to wait for reuse */
737void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
738 enum ttm_caching_state cstate, dma_addr_t *dma_address)
739{
740 unsigned long irq_flags;
741 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
742 struct page *p, *tmp;
743
744 if (pool == NULL) {
745 /* No pool for this memory type so free the pages */
746
747 list_for_each_entry_safe(p, tmp, pages, lru) {
748 __free_page(p);
749 }
750 /* Make the pages list empty */
751 INIT_LIST_HEAD(pages);
752 return;
753 }
754 if (page_count == 0) {
755 list_for_each_entry_safe(p, tmp, pages, lru) {
756 ++page_count;
757 }
758 }
759
760 spin_lock_irqsave(&pool->lock, irq_flags);
761 list_splice_init(pages, &pool->list);
762 pool->npages += page_count;
763 /* Check that we don't go over the pool limit */
764 page_count = 0;
765 if (pool->npages > _manager->options.max_size) {
766 page_count = pool->npages - _manager->options.max_size;
767 /* free at least NUM_PAGES_TO_ALLOC number of pages
768 * to reduce calls to set_memory_wb */
769 if (page_count < NUM_PAGES_TO_ALLOC)
770 page_count = NUM_PAGES_TO_ALLOC;
771 }
772 spin_unlock_irqrestore(&pool->lock, irq_flags);
773 if (page_count)
774 ttm_page_pool_free(pool, page_count);
775}
776
777static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 795static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
778 char *name) 796 char *name)
779{ 797{
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
836 _manager = NULL; 854 _manager = NULL;
837} 855}
838 856
857int ttm_pool_populate(struct ttm_tt *ttm)
858{
859 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
860 unsigned i;
861 int ret;
862
863 if (ttm->state != tt_unpopulated)
864 return 0;
865
866 for (i = 0; i < ttm->num_pages; ++i) {
867 ret = ttm_get_pages(&ttm->pages[i], 1,
868 ttm->page_flags,
869 ttm->caching_state);
870 if (ret != 0) {
871 ttm_pool_unpopulate(ttm);
872 return -ENOMEM;
873 }
874
875 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
876 false, false);
877 if (unlikely(ret != 0)) {
878 ttm_pool_unpopulate(ttm);
879 return -ENOMEM;
880 }
881 }
882
883 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
884 ret = ttm_tt_swapin(ttm);
885 if (unlikely(ret != 0)) {
886 ttm_pool_unpopulate(ttm);
887 return ret;
888 }
889 }
890
891 ttm->state = tt_unbound;
892 return 0;
893}
894EXPORT_SYMBOL(ttm_pool_populate);
895
896void ttm_pool_unpopulate(struct ttm_tt *ttm)
897{
898 unsigned i;
899
900 for (i = 0; i < ttm->num_pages; ++i) {
901 if (ttm->pages[i]) {
902 ttm_mem_global_free_page(ttm->glob->mem_glob,
903 ttm->pages[i]);
904 ttm_put_pages(&ttm->pages[i], 1,
905 ttm->page_flags,
906 ttm->caching_state);
907 }
908 }
909 ttm->state = tt_unpopulated;
910}
911EXPORT_SYMBOL(ttm_pool_unpopulate);
912
839int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 913int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
840{ 914{
841 struct ttm_page_pool *p; 915 struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 00000000000..37ead6995c8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1143 @@
1/*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26/*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
36#include <linux/dma-mapping.h>
37#include <linux/list.h>
38#include <linux/seq_file.h> /* for seq_printf */
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/highmem.h>
42#include <linux/mm_types.h>
43#include <linux/module.h>
44#include <linux/mm.h>
45#include <linux/atomic.h>
46#include <linux/device.h>
47#include <linux/kthread.h>
48#include "ttm/ttm_bo_driver.h"
49#include "ttm/ttm_page_alloc.h"
50#ifdef TTM_HAS_AGP
51#include <asm/agp.h>
52#endif
53
54#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
55#define SMALL_ALLOCATION 4
56#define FREE_ALL_PAGES (~0U)
57/* times are in msecs */
58#define IS_UNDEFINED (0)
59#define IS_WC (1<<1)
60#define IS_UC (1<<2)
61#define IS_CACHED (1<<3)
62#define IS_DMA32 (1<<4)
63
64enum pool_type {
65 POOL_IS_UNDEFINED,
66 POOL_IS_WC = IS_WC,
67 POOL_IS_UC = IS_UC,
68 POOL_IS_CACHED = IS_CACHED,
69 POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
70 POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
71 POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
72};
73/*
74 * The pool structure. There are usually six pools:
75 * - generic (not restricted to DMA32):
76 * - write combined, uncached, cached.
77 * - dma32 (up to 2^32 - so up 4GB):
78 * - write combined, uncached, cached.
79 * for each 'struct device'. The 'cached' is for pages that are actively used.
80 * The other ones can be shrunk by the shrinker API if neccessary.
81 * @pools: The 'struct device->dma_pools' link.
82 * @type: Type of the pool
83 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
84 * used with irqsave/irqrestore variants because pool allocator maybe called
85 * from delayed work.
86 * @inuse_list: Pool of pages that are in use. The order is very important and
87 * it is in the order that the TTM pages that are put back are in.
88 * @free_list: Pool of pages that are free to be used. No order requirements.
89 * @dev: The device that is associated with these pools.
90 * @size: Size used during DMA allocation.
91 * @npages_free: Count of available pages for re-use.
92 * @npages_in_use: Count of pages that are in use.
93 * @nfrees: Stats when pool is shrinking.
94 * @nrefills: Stats when the pool is grown.
95 * @gfp_flags: Flags to pass for alloc_page.
96 * @name: Name of the pool.
97 * @dev_name: Name derieved from dev - similar to how dev_info works.
98 * Used during shutdown as the dev_info during release is unavailable.
99 */
100struct dma_pool {
101 struct list_head pools; /* The 'struct device->dma_pools link */
102 enum pool_type type;
103 spinlock_t lock;
104 struct list_head inuse_list;
105 struct list_head free_list;
106 struct device *dev;
107 unsigned size;
108 unsigned npages_free;
109 unsigned npages_in_use;
110 unsigned long nfrees; /* Stats when shrunk. */
111 unsigned long nrefills; /* Stats when grown. */
112 gfp_t gfp_flags;
113 char name[13]; /* "cached dma32" */
114 char dev_name[64]; /* Constructed from dev */
115};
116
117/*
118 * The accounting page keeping track of the allocated page along with
119 * the DMA address.
120 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
121 * @vaddr: The virtual address of the page
122 * @dma: The bus address of the page. If the page is not allocated
123 * via the DMA API, it will be -1.
124 */
125struct dma_page {
126 struct list_head page_list;
127 void *vaddr;
128 struct page *p;
129 dma_addr_t dma;
130};
131
132/*
133 * Limits for the pool. They are handled without locks because only place where
134 * they may change is in sysfs store. They won't have immediate effect anyway
135 * so forcing serialization to access them is pointless.
136 */
137
138struct ttm_pool_opts {
139 unsigned alloc_size;
140 unsigned max_size;
141 unsigned small;
142};
143
144/*
145 * Contains the list of all of the 'struct device' and their corresponding
146 * DMA pools. Guarded by _mutex->lock.
147 * @pools: The link to 'struct ttm_pool_manager->pools'
148 * @dev: The 'struct device' associated with the 'pool'
149 * @pool: The 'struct dma_pool' associated with the 'dev'
150 */
151struct device_pools {
152 struct list_head pools;
153 struct device *dev;
154 struct dma_pool *pool;
155};
156
157/*
158 * struct ttm_pool_manager - Holds memory pools for fast allocation
159 *
160 * @lock: Lock used when adding/removing from pools
161 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
162 * @options: Limits for the pool.
163 * @npools: Total amount of pools in existence.
164 * @shrinker: The structure used by [un|]register_shrinker
165 */
166struct ttm_pool_manager {
167 struct mutex lock;
168 struct list_head pools;
169 struct ttm_pool_opts options;
170 unsigned npools;
171 struct shrinker mm_shrink;
172 struct kobject kobj;
173};
174
175static struct ttm_pool_manager *_manager;
176
177static struct attribute ttm_page_pool_max = {
178 .name = "pool_max_size",
179 .mode = S_IRUGO | S_IWUSR
180};
181static struct attribute ttm_page_pool_small = {
182 .name = "pool_small_allocation",
183 .mode = S_IRUGO | S_IWUSR
184};
185static struct attribute ttm_page_pool_alloc_size = {
186 .name = "pool_allocation_size",
187 .mode = S_IRUGO | S_IWUSR
188};
189
190static struct attribute *ttm_pool_attrs[] = {
191 &ttm_page_pool_max,
192 &ttm_page_pool_small,
193 &ttm_page_pool_alloc_size,
194 NULL
195};
196
197static void ttm_pool_kobj_release(struct kobject *kobj)
198{
199 struct ttm_pool_manager *m =
200 container_of(kobj, struct ttm_pool_manager, kobj);
201 kfree(m);
202}
203
204static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
205 const char *buffer, size_t size)
206{
207 struct ttm_pool_manager *m =
208 container_of(kobj, struct ttm_pool_manager, kobj);
209 int chars;
210 unsigned val;
211 chars = sscanf(buffer, "%u", &val);
212 if (chars == 0)
213 return size;
214
215 /* Convert kb to number of pages */
216 val = val / (PAGE_SIZE >> 10);
217
218 if (attr == &ttm_page_pool_max)
219 m->options.max_size = val;
220 else if (attr == &ttm_page_pool_small)
221 m->options.small = val;
222 else if (attr == &ttm_page_pool_alloc_size) {
223 if (val > NUM_PAGES_TO_ALLOC*8) {
224 printk(KERN_ERR TTM_PFX
225 "Setting allocation size to %lu "
226 "is not allowed. Recommended size is "
227 "%lu\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230 return size;
231 } else if (val > NUM_PAGES_TO_ALLOC) {
232 printk(KERN_WARNING TTM_PFX
233 "Setting allocation size to "
234 "larger than %lu is not recommended.\n",
235 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
236 }
237 m->options.alloc_size = val;
238 }
239
240 return size;
241}
242
243static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244 char *buffer)
245{
246 struct ttm_pool_manager *m =
247 container_of(kobj, struct ttm_pool_manager, kobj);
248 unsigned val = 0;
249
250 if (attr == &ttm_page_pool_max)
251 val = m->options.max_size;
252 else if (attr == &ttm_page_pool_small)
253 val = m->options.small;
254 else if (attr == &ttm_page_pool_alloc_size)
255 val = m->options.alloc_size;
256
257 val = val * (PAGE_SIZE >> 10);
258
259 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260}
261
262static const struct sysfs_ops ttm_pool_sysfs_ops = {
263 .show = &ttm_pool_show,
264 .store = &ttm_pool_store,
265};
266
267static struct kobj_type ttm_pool_kobj_type = {
268 .release = &ttm_pool_kobj_release,
269 .sysfs_ops = &ttm_pool_sysfs_ops,
270 .default_attrs = ttm_pool_attrs,
271};
272
273#ifndef CONFIG_X86
274static int set_pages_array_wb(struct page **pages, int addrinarray)
275{
276#ifdef TTM_HAS_AGP
277 int i;
278
279 for (i = 0; i < addrinarray; i++)
280 unmap_page_from_agp(pages[i]);
281#endif
282 return 0;
283}
284
285static int set_pages_array_wc(struct page **pages, int addrinarray)
286{
287#ifdef TTM_HAS_AGP
288 int i;
289
290 for (i = 0; i < addrinarray; i++)
291 map_page_into_agp(pages[i]);
292#endif
293 return 0;
294}
295
296static int set_pages_array_uc(struct page **pages, int addrinarray)
297{
298#ifdef TTM_HAS_AGP
299 int i;
300
301 for (i = 0; i < addrinarray; i++)
302 map_page_into_agp(pages[i]);
303#endif
304 return 0;
305}
306#endif /* for !CONFIG_X86 */
307
308static int ttm_set_pages_caching(struct dma_pool *pool,
309 struct page **pages, unsigned cpages)
310{
311 int r = 0;
312 /* Set page caching */
313 if (pool->type & IS_UC) {
314 r = set_pages_array_uc(pages, cpages);
315 if (r)
316 pr_err(TTM_PFX
317 "%s: Failed to set %d pages to uc!\n",
318 pool->dev_name, cpages);
319 }
320 if (pool->type & IS_WC) {
321 r = set_pages_array_wc(pages, cpages);
322 if (r)
323 pr_err(TTM_PFX
324 "%s: Failed to set %d pages to wc!\n",
325 pool->dev_name, cpages);
326 }
327 return r;
328}
329
330static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
331{
332 dma_addr_t dma = d_page->dma;
333 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
334
335 kfree(d_page);
336 d_page = NULL;
337}
338static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
339{
340 struct dma_page *d_page;
341
342 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
343 if (!d_page)
344 return NULL;
345
346 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
347 &d_page->dma,
348 pool->gfp_flags);
349 if (d_page->vaddr)
350 d_page->p = virt_to_page(d_page->vaddr);
351 else {
352 kfree(d_page);
353 d_page = NULL;
354 }
355 return d_page;
356}
357static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
358{
359 enum pool_type type = IS_UNDEFINED;
360
361 if (flags & TTM_PAGE_FLAG_DMA32)
362 type |= IS_DMA32;
363 if (cstate == tt_cached)
364 type |= IS_CACHED;
365 else if (cstate == tt_uncached)
366 type |= IS_UC;
367 else
368 type |= IS_WC;
369
370 return type;
371}
372
373static void ttm_pool_update_free_locked(struct dma_pool *pool,
374 unsigned freed_pages)
375{
376 pool->npages_free -= freed_pages;
377 pool->nfrees += freed_pages;
378
379}
380
381/* set memory back to wb and free the pages. */
382static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
383 struct page *pages[], unsigned npages)
384{
385 struct dma_page *d_page, *tmp;
386
387 /* Don't set WB on WB page pool. */
388 if (npages && !(pool->type & IS_CACHED) &&
389 set_pages_array_wb(pages, npages))
390 pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
391 pool->dev_name, npages);
392
393 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
394 list_del(&d_page->page_list);
395 __ttm_dma_free_page(pool, d_page);
396 }
397}
398
399static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
400{
401 /* Don't set WB on WB page pool. */
402 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
403 pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
404 pool->dev_name, 1);
405
406 list_del(&d_page->page_list);
407 __ttm_dma_free_page(pool, d_page);
408}
409
410/*
411 * Free pages from pool.
412 *
413 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
414 * number of pages in one go.
415 *
416 * @pool: to free the pages from
417 * @nr_free: If set to true will free all pages in pool
418 **/
419static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
420{
421 unsigned long irq_flags;
422 struct dma_page *dma_p, *tmp;
423 struct page **pages_to_free;
424 struct list_head d_pages;
425 unsigned freed_pages = 0,
426 npages_to_free = nr_free;
427
428 if (NUM_PAGES_TO_ALLOC < nr_free)
429 npages_to_free = NUM_PAGES_TO_ALLOC;
430#if 0
431 if (nr_free > 1) {
432 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
433 pool->dev_name, pool->name, current->pid,
434 npages_to_free, nr_free);
435 }
436#endif
437 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
438 GFP_KERNEL);
439
440 if (!pages_to_free) {
441 pr_err(TTM_PFX
442 "%s: Failed to allocate memory for pool free operation.\n",
443 pool->dev_name);
444 return 0;
445 }
446 INIT_LIST_HEAD(&d_pages);
447restart:
448 spin_lock_irqsave(&pool->lock, irq_flags);
449
450 /* We picking the oldest ones off the list */
451 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
452 page_list) {
453 if (freed_pages >= npages_to_free)
454 break;
455
456 /* Move the dma_page from one list to another. */
457 list_move(&dma_p->page_list, &d_pages);
458
459 pages_to_free[freed_pages++] = dma_p->p;
460 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
461 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
462
463 ttm_pool_update_free_locked(pool, freed_pages);
464 /**
465 * Because changing page caching is costly
466 * we unlock the pool to prevent stalling.
467 */
468 spin_unlock_irqrestore(&pool->lock, irq_flags);
469
470 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
471 freed_pages);
472
473 INIT_LIST_HEAD(&d_pages);
474
475 if (likely(nr_free != FREE_ALL_PAGES))
476 nr_free -= freed_pages;
477
478 if (NUM_PAGES_TO_ALLOC >= nr_free)
479 npages_to_free = nr_free;
480 else
481 npages_to_free = NUM_PAGES_TO_ALLOC;
482
483 freed_pages = 0;
484
485 /* free all so restart the processing */
486 if (nr_free)
487 goto restart;
488
489 /* Not allowed to fall through or break because
490 * following context is inside spinlock while we are
491 * outside here.
492 */
493 goto out;
494
495 }
496 }
497
498 /* remove range of pages from the pool */
499 if (freed_pages) {
500 ttm_pool_update_free_locked(pool, freed_pages);
501 nr_free -= freed_pages;
502 }
503
504 spin_unlock_irqrestore(&pool->lock, irq_flags);
505
506 if (freed_pages)
507 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
508out:
509 kfree(pages_to_free);
510 return nr_free;
511}
512
513static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
514{
515 struct device_pools *p;
516 struct dma_pool *pool;
517
518 if (!dev)
519 return;
520
521 mutex_lock(&_manager->lock);
522 list_for_each_entry_reverse(p, &_manager->pools, pools) {
523 if (p->dev != dev)
524 continue;
525 pool = p->pool;
526 if (pool->type != type)
527 continue;
528
529 list_del(&p->pools);
530 kfree(p);
531 _manager->npools--;
532 break;
533 }
534 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
535 if (pool->type != type)
536 continue;
537 /* Takes a spinlock.. */
538 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
539 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
540 /* This code path is called after _all_ references to the
541 * struct device has been dropped - so nobody should be
542 * touching it. In case somebody is trying to _add_ we are
543 * guarded by the mutex. */
544 list_del(&pool->pools);
545 kfree(pool);
546 break;
547 }
548 mutex_unlock(&_manager->lock);
549}
550
551/*
552 * On free-ing of the 'struct device' this deconstructor is run.
553 * Albeit the pool might have already been freed earlier.
554 */
555static void ttm_dma_pool_release(struct device *dev, void *res)
556{
557 struct dma_pool *pool = *(struct dma_pool **)res;
558
559 if (pool)
560 ttm_dma_free_pool(dev, pool->type);
561}
562
563static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
564{
565 return *(struct dma_pool **)res == match_data;
566}
567
568static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
569 enum pool_type type)
570{
571 char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
572 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
573 struct device_pools *sec_pool = NULL;
574 struct dma_pool *pool = NULL, **ptr;
575 unsigned i;
576 int ret = -ENODEV;
577 char *p;
578
579 if (!dev)
580 return NULL;
581
582 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
583 if (!ptr)
584 return NULL;
585
586 ret = -ENOMEM;
587
588 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
589 dev_to_node(dev));
590 if (!pool)
591 goto err_mem;
592
593 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
594 dev_to_node(dev));
595 if (!sec_pool)
596 goto err_mem;
597
598 INIT_LIST_HEAD(&sec_pool->pools);
599 sec_pool->dev = dev;
600 sec_pool->pool = pool;
601
602 INIT_LIST_HEAD(&pool->free_list);
603 INIT_LIST_HEAD(&pool->inuse_list);
604 INIT_LIST_HEAD(&pool->pools);
605 spin_lock_init(&pool->lock);
606 pool->dev = dev;
607 pool->npages_free = pool->npages_in_use = 0;
608 pool->nfrees = 0;
609 pool->gfp_flags = flags;
610 pool->size = PAGE_SIZE;
611 pool->type = type;
612 pool->nrefills = 0;
613 p = pool->name;
614 for (i = 0; i < 5; i++) {
615 if (type & t[i]) {
616 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
617 "%s", n[i]);
618 }
619 }
620 *p = 0;
621 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
622 * - the kobj->name has already been deallocated.*/
623 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
624 dev_driver_string(dev), dev_name(dev));
625 mutex_lock(&_manager->lock);
626 /* You can get the dma_pool from either the global: */
627 list_add(&sec_pool->pools, &_manager->pools);
628 _manager->npools++;
629 /* or from 'struct device': */
630 list_add(&pool->pools, &dev->dma_pools);
631 mutex_unlock(&_manager->lock);
632
633 *ptr = pool;
634 devres_add(dev, ptr);
635
636 return pool;
637err_mem:
638 devres_free(ptr);
639 kfree(sec_pool);
640 kfree(pool);
641 return ERR_PTR(ret);
642}
643
644static struct dma_pool *ttm_dma_find_pool(struct device *dev,
645 enum pool_type type)
646{
647 struct dma_pool *pool, *tmp, *found = NULL;
648
649 if (type == IS_UNDEFINED)
650 return found;
651
652 /* NB: We iterate on the 'struct dev' which has no spinlock, but
653 * it does have a kref which we have taken. The kref is taken during
654 * graphic driver loading - in the drm_pci_init it calls either
655 * pci_dev_get or pci_register_driver which both end up taking a kref
656 * on 'struct device'.
657 *
658 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
659 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
660 * thing is at that point of time there are no pages associated with the
661 * driver so this function will not be called.
662 */
663 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
664 if (pool->type != type)
665 continue;
666 found = pool;
667 break;
668 }
669 return found;
670}
671
672/*
673 * Free pages the pages that failed to change the caching state. If there
674 * are pages that have changed their caching state already put them to the
675 * pool.
676 */
677static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
678 struct list_head *d_pages,
679 struct page **failed_pages,
680 unsigned cpages)
681{
682 struct dma_page *d_page, *tmp;
683 struct page *p;
684 unsigned i = 0;
685
686 p = failed_pages[0];
687 if (!p)
688 return;
689 /* Find the failed page. */
690 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
691 if (d_page->p != p)
692 continue;
693 /* .. and then progress over the full list. */
694 list_del(&d_page->page_list);
695 __ttm_dma_free_page(pool, d_page);
696 if (++i < cpages)
697 p = failed_pages[i];
698 else
699 break;
700 }
701
702}
703
704/*
705 * Allocate 'count' pages, and put 'need' number of them on the
706 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
707 * The full list of pages should also be on 'd_pages'.
708 * We return zero for success, and negative numbers as errors.
709 */
710static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
711 struct list_head *d_pages,
712 unsigned count)
713{
714 struct page **caching_array;
715 struct dma_page *dma_p;
716 struct page *p;
717 int r = 0;
718 unsigned i, cpages;
719 unsigned max_cpages = min(count,
720 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
721
722 /* allocate array for page caching change */
723 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
724
725 if (!caching_array) {
726 pr_err(TTM_PFX
727 "%s: Unable to allocate table for new pages.",
728 pool->dev_name);
729 return -ENOMEM;
730 }
731
732 if (count > 1) {
733 pr_debug("%s: (%s:%d) Getting %d pages\n",
734 pool->dev_name, pool->name, current->pid,
735 count);
736 }
737
738 for (i = 0, cpages = 0; i < count; ++i) {
739 dma_p = __ttm_dma_alloc_page(pool);
740 if (!dma_p) {
741 pr_err(TTM_PFX "%s: Unable to get page %u.\n",
742 pool->dev_name, i);
743
744 /* store already allocated pages in the pool after
745 * setting the caching state */
746 if (cpages) {
747 r = ttm_set_pages_caching(pool, caching_array,
748 cpages);
749 if (r)
750 ttm_dma_handle_caching_state_failure(
751 pool, d_pages, caching_array,
752 cpages);
753 }
754 r = -ENOMEM;
755 goto out;
756 }
757 p = dma_p->p;
758#ifdef CONFIG_HIGHMEM
759 /* gfp flags of highmem page should never be dma32 so we
760 * we should be fine in such case
761 */
762 if (!PageHighMem(p))
763#endif
764 {
765 caching_array[cpages++] = p;
766 if (cpages == max_cpages) {
767 /* Note: Cannot hold the spinlock */
768 r = ttm_set_pages_caching(pool, caching_array,
769 cpages);
770 if (r) {
771 ttm_dma_handle_caching_state_failure(
772 pool, d_pages, caching_array,
773 cpages);
774 goto out;
775 }
776 cpages = 0;
777 }
778 }
779 list_add(&dma_p->page_list, d_pages);
780 }
781
782 if (cpages) {
783 r = ttm_set_pages_caching(pool, caching_array, cpages);
784 if (r)
785 ttm_dma_handle_caching_state_failure(pool, d_pages,
786 caching_array, cpages);
787 }
788out:
789 kfree(caching_array);
790 return r;
791}
792
793/*
794 * @return count of pages still required to fulfill the request.
795 */
796static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
797 unsigned long *irq_flags)
798{
799 unsigned count = _manager->options.small;
800 int r = pool->npages_free;
801
802 if (count > pool->npages_free) {
803 struct list_head d_pages;
804
805 INIT_LIST_HEAD(&d_pages);
806
807 spin_unlock_irqrestore(&pool->lock, *irq_flags);
808
809 /* Returns how many more are neccessary to fulfill the
810 * request. */
811 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
812
813 spin_lock_irqsave(&pool->lock, *irq_flags);
814 if (!r) {
815 /* Add the fresh to the end.. */
816 list_splice(&d_pages, &pool->free_list);
817 ++pool->nrefills;
818 pool->npages_free += count;
819 r = count;
820 } else {
821 struct dma_page *d_page;
822 unsigned cpages = 0;
823
824 pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
825 pool->dev_name, pool->name, r);
826
827 list_for_each_entry(d_page, &d_pages, page_list) {
828 cpages++;
829 }
830 list_splice_tail(&d_pages, &pool->free_list);
831 pool->npages_free += cpages;
832 r = cpages;
833 }
834 }
835 return r;
836}
837
838/*
839 * @return count of pages still required to fulfill the request.
840 * The populate list is actually a stack (not that is matters as TTM
841 * allocates one page at a time.
842 */
843static int ttm_dma_pool_get_pages(struct dma_pool *pool,
844 struct ttm_dma_tt *ttm_dma,
845 unsigned index)
846{
847 struct dma_page *d_page;
848 struct ttm_tt *ttm = &ttm_dma->ttm;
849 unsigned long irq_flags;
850 int count, r = -ENOMEM;
851
852 spin_lock_irqsave(&pool->lock, irq_flags);
853 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
854 if (count) {
855 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
856 ttm->pages[index] = d_page->p;
857 ttm_dma->dma_address[index] = d_page->dma;
858 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
859 r = 0;
860 pool->npages_in_use += 1;
861 pool->npages_free -= 1;
862 }
863 spin_unlock_irqrestore(&pool->lock, irq_flags);
864 return r;
865}
866
867/*
868 * On success pages list will hold count number of correctly
869 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
870 */
871int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
872{
873 struct ttm_tt *ttm = &ttm_dma->ttm;
874 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
875 struct dma_pool *pool;
876 enum pool_type type;
877 unsigned i;
878 gfp_t gfp_flags;
879 int ret;
880
881 if (ttm->state != tt_unpopulated)
882 return 0;
883
884 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
885 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
886 gfp_flags = GFP_USER | GFP_DMA32;
887 else
888 gfp_flags = GFP_HIGHUSER;
889 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
890 gfp_flags |= __GFP_ZERO;
891
892 pool = ttm_dma_find_pool(dev, type);
893 if (!pool) {
894 pool = ttm_dma_pool_init(dev, gfp_flags, type);
895 if (IS_ERR_OR_NULL(pool)) {
896 return -ENOMEM;
897 }
898 }
899
900 INIT_LIST_HEAD(&ttm_dma->pages_list);
901 for (i = 0; i < ttm->num_pages; ++i) {
902 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
903 if (ret != 0) {
904 ttm_dma_unpopulate(ttm_dma, dev);
905 return -ENOMEM;
906 }
907
908 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
909 false, false);
910 if (unlikely(ret != 0)) {
911 ttm_dma_unpopulate(ttm_dma, dev);
912 return -ENOMEM;
913 }
914 }
915
916 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
917 ret = ttm_tt_swapin(ttm);
918 if (unlikely(ret != 0)) {
919 ttm_dma_unpopulate(ttm_dma, dev);
920 return ret;
921 }
922 }
923
924 ttm->state = tt_unbound;
925 return 0;
926}
927EXPORT_SYMBOL_GPL(ttm_dma_populate);
928
929/* Get good estimation how many pages are free in pools */
930static int ttm_dma_pool_get_num_unused_pages(void)
931{
932 struct device_pools *p;
933 unsigned total = 0;
934
935 mutex_lock(&_manager->lock);
936 list_for_each_entry(p, &_manager->pools, pools)
937 total += p->pool->npages_free;
938 mutex_unlock(&_manager->lock);
939 return total;
940}
941
942/* Put all pages in pages list to correct pool to wait for reuse */
943void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
944{
945 struct ttm_tt *ttm = &ttm_dma->ttm;
946 struct dma_pool *pool;
947 struct dma_page *d_page, *next;
948 enum pool_type type;
949 bool is_cached = false;
950 unsigned count = 0, i, npages = 0;
951 unsigned long irq_flags;
952
953 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
954 pool = ttm_dma_find_pool(dev, type);
955 if (!pool) {
956 WARN_ON(!pool);
957 return;
958 }
959 is_cached = (ttm_dma_find_pool(pool->dev,
960 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
961
962 /* make sure pages array match list and count number of pages */
963 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
964 ttm->pages[count] = d_page->p;
965 count++;
966 }
967
968 spin_lock_irqsave(&pool->lock, irq_flags);
969 pool->npages_in_use -= count;
970 if (is_cached) {
971 pool->nfrees += count;
972 } else {
973 pool->npages_free += count;
974 list_splice(&ttm_dma->pages_list, &pool->free_list);
975 npages = count;
976 if (pool->npages_free > _manager->options.max_size) {
977 npages = pool->npages_free - _manager->options.max_size;
978 /* free at least NUM_PAGES_TO_ALLOC number of pages
979 * to reduce calls to set_memory_wb */
980 if (npages < NUM_PAGES_TO_ALLOC)
981 npages = NUM_PAGES_TO_ALLOC;
982 }
983 }
984 spin_unlock_irqrestore(&pool->lock, irq_flags);
985
986 if (is_cached) {
987 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
988 ttm_mem_global_free_page(ttm->glob->mem_glob,
989 d_page->p);
990 ttm_dma_page_put(pool, d_page);
991 }
992 } else {
993 for (i = 0; i < count; i++) {
994 ttm_mem_global_free_page(ttm->glob->mem_glob,
995 ttm->pages[i]);
996 }
997 }
998
999 INIT_LIST_HEAD(&ttm_dma->pages_list);
1000 for (i = 0; i < ttm->num_pages; i++) {
1001 ttm->pages[i] = NULL;
1002 ttm_dma->dma_address[i] = 0;
1003 }
1004
1005 /* shrink pool if necessary (only on !is_cached pools)*/
1006 if (npages)
1007 ttm_dma_page_pool_free(pool, npages);
1008 ttm->state = tt_unpopulated;
1009}
1010EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1011
1012/**
1013 * Callback for mm to request pool to reduce number of page held.
1014 */
1015static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1016 struct shrink_control *sc)
1017{
1018 static atomic_t start_pool = ATOMIC_INIT(0);
1019 unsigned idx = 0;
1020 unsigned pool_offset = atomic_add_return(1, &start_pool);
1021 unsigned shrink_pages = sc->nr_to_scan;
1022 struct device_pools *p;
1023
1024 if (list_empty(&_manager->pools))
1025 return 0;
1026
1027 mutex_lock(&_manager->lock);
1028 pool_offset = pool_offset % _manager->npools;
1029 list_for_each_entry(p, &_manager->pools, pools) {
1030 unsigned nr_free;
1031
1032 if (!p->dev)
1033 continue;
1034 if (shrink_pages == 0)
1035 break;
1036 /* Do it in round-robin fashion. */
1037 if (++idx < pool_offset)
1038 continue;
1039 nr_free = shrink_pages;
1040 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1041 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1042 p->pool->dev_name, p->pool->name, current->pid, nr_free,
1043 shrink_pages);
1044 }
1045 mutex_unlock(&_manager->lock);
1046 /* return estimated number of unused pages in pool */
1047 return ttm_dma_pool_get_num_unused_pages();
1048}
1049
1050static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1051{
1052 manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1053 manager->mm_shrink.seeks = 1;
1054 register_shrinker(&manager->mm_shrink);
1055}
1056
1057static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1058{
1059 unregister_shrinker(&manager->mm_shrink);
1060}
1061
1062int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1063{
1064 int ret = -ENOMEM;
1065
1066 WARN_ON(_manager);
1067
1068 printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
1069
1070 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1071 if (!_manager)
1072 goto err_manager;
1073
1074 mutex_init(&_manager->lock);
1075 INIT_LIST_HEAD(&_manager->pools);
1076
1077 _manager->options.max_size = max_pages;
1078 _manager->options.small = SMALL_ALLOCATION;
1079 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1080
1081 /* This takes care of auto-freeing the _manager */
1082 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1083 &glob->kobj, "dma_pool");
1084 if (unlikely(ret != 0)) {
1085 kobject_put(&_manager->kobj);
1086 goto err;
1087 }
1088 ttm_dma_pool_mm_shrink_init(_manager);
1089 return 0;
1090err_manager:
1091 kfree(_manager);
1092 _manager = NULL;
1093err:
1094 return ret;
1095}
1096
1097void ttm_dma_page_alloc_fini(void)
1098{
1099 struct device_pools *p, *t;
1100
1101 printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
1102 ttm_dma_pool_mm_shrink_fini(_manager);
1103
1104 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1105 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1106 current->pid);
1107 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1108 ttm_dma_pool_match, p->pool));
1109 ttm_dma_free_pool(p->dev, p->pool->type);
1110 }
1111 kobject_put(&_manager->kobj);
1112 _manager = NULL;
1113}
1114
1115int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1116{
1117 struct device_pools *p;
1118 struct dma_pool *pool = NULL;
1119 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1120 "name", "virt", "busaddr"};
1121
1122 if (!_manager) {
1123 seq_printf(m, "No pool allocator running.\n");
1124 return 0;
1125 }
1126 seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1127 h[0], h[1], h[2], h[3], h[4], h[5]);
1128 mutex_lock(&_manager->lock);
1129 list_for_each_entry(p, &_manager->pools, pools) {
1130 struct device *dev = p->dev;
1131 if (!dev)
1132 continue;
1133 pool = p->pool;
1134 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1135 pool->name, pool->nrefills,
1136 pool->nfrees, pool->npages_in_use,
1137 pool->npages_free,
1138 pool->dev_name);
1139 }
1140 mutex_unlock(&_manager->lock);
1141 return 0;
1142}
1143EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f9cc548d6d9..2f75d203a2b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -43,139 +43,20 @@
43#include "ttm/ttm_placement.h" 43#include "ttm/ttm_placement.h"
44#include "ttm/ttm_page_alloc.h" 44#include "ttm/ttm_page_alloc.h"
45 45
46static int ttm_tt_swapin(struct ttm_tt *ttm);
47
48/** 46/**
49 * Allocates storage for pointers to the pages that back the ttm. 47 * Allocates storage for pointers to the pages that back the ttm.
50 */ 48 */
51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52{ 50{
53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); 51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
54 ttm->dma_address = drm_calloc_large(ttm->num_pages,
55 sizeof(*ttm->dma_address));
56}
57
58static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
59{
60 drm_free_large(ttm->pages);
61 ttm->pages = NULL;
62 drm_free_large(ttm->dma_address);
63 ttm->dma_address = NULL;
64}
65
66static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
67{
68 int write;
69 int dirty;
70 struct page *page;
71 int i;
72 struct ttm_backend *be = ttm->be;
73
74 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
75 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
76 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
77
78 if (be)
79 be->func->clear(be);
80
81 for (i = 0; i < ttm->num_pages; ++i) {
82 page = ttm->pages[i];
83 if (page == NULL)
84 continue;
85
86 if (page == ttm->dummy_read_page) {
87 BUG_ON(write);
88 continue;
89 }
90
91 if (write && dirty && !PageReserved(page))
92 set_page_dirty_lock(page);
93
94 ttm->pages[i] = NULL;
95 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
96 put_page(page);
97 }
98 ttm->state = tt_unpopulated;
99 ttm->first_himem_page = ttm->num_pages;
100 ttm->last_lomem_page = -1;
101} 52}
102 53
103static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 54static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
104{ 55{
105 struct page *p; 56 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
106 struct list_head h; 57 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
107 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 58 sizeof(*ttm->dma_address));
108 int ret;
109
110 while (NULL == (p = ttm->pages[index])) {
111
112 INIT_LIST_HEAD(&h);
113
114 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
115 &ttm->dma_address[index]);
116
117 if (ret != 0)
118 return NULL;
119
120 p = list_first_entry(&h, struct page, lru);
121
122 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
123 if (unlikely(ret != 0))
124 goto out_err;
125
126 if (PageHighMem(p))
127 ttm->pages[--ttm->first_himem_page] = p;
128 else
129 ttm->pages[++ttm->last_lomem_page] = p;
130 }
131 return p;
132out_err:
133 put_page(p);
134 return NULL;
135}
136
137struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
138{
139 int ret;
140
141 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
142 ret = ttm_tt_swapin(ttm);
143 if (unlikely(ret != 0))
144 return NULL;
145 }
146 return __ttm_tt_get_page(ttm, index);
147}
148
149int ttm_tt_populate(struct ttm_tt *ttm)
150{
151 struct page *page;
152 unsigned long i;
153 struct ttm_backend *be;
154 int ret;
155
156 if (ttm->state != tt_unpopulated)
157 return 0;
158
159 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
160 ret = ttm_tt_swapin(ttm);
161 if (unlikely(ret != 0))
162 return ret;
163 }
164
165 be = ttm->be;
166
167 for (i = 0; i < ttm->num_pages; ++i) {
168 page = __ttm_tt_get_page(ttm, i);
169 if (!page)
170 return -ENOMEM;
171 }
172
173 be->func->populate(be, ttm->num_pages, ttm->pages,
174 ttm->dummy_read_page, ttm->dma_address);
175 ttm->state = tt_unbound;
176 return 0;
177} 59}
178EXPORT_SYMBOL(ttm_tt_populate);
179 60
180#ifdef CONFIG_X86 61#ifdef CONFIG_X86
181static inline int ttm_tt_set_page_caching(struct page *p, 62static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
278} 159}
279EXPORT_SYMBOL(ttm_tt_set_placement_caching); 160EXPORT_SYMBOL(ttm_tt_set_placement_caching);
280 161
281static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
282{
283 int i;
284 unsigned count = 0;
285 struct list_head h;
286 struct page *cur_page;
287 struct ttm_backend *be = ttm->be;
288
289 INIT_LIST_HEAD(&h);
290
291 if (be)
292 be->func->clear(be);
293 for (i = 0; i < ttm->num_pages; ++i) {
294
295 cur_page = ttm->pages[i];
296 ttm->pages[i] = NULL;
297 if (cur_page) {
298 if (page_count(cur_page) != 1)
299 printk(KERN_ERR TTM_PFX
300 "Erroneous page count. "
301 "Leaking pages.\n");
302 ttm_mem_global_free_page(ttm->glob->mem_glob,
303 cur_page);
304 list_add(&cur_page->lru, &h);
305 count++;
306 }
307 }
308 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
309 ttm->dma_address);
310 ttm->state = tt_unpopulated;
311 ttm->first_himem_page = ttm->num_pages;
312 ttm->last_lomem_page = -1;
313}
314
315void ttm_tt_destroy(struct ttm_tt *ttm) 162void ttm_tt_destroy(struct ttm_tt *ttm)
316{ 163{
317 struct ttm_backend *be;
318
319 if (unlikely(ttm == NULL)) 164 if (unlikely(ttm == NULL))
320 return; 165 return;
321 166
322 be = ttm->be; 167 if (ttm->state == tt_bound) {
323 if (likely(be != NULL)) { 168 ttm_tt_unbind(ttm);
324 be->func->destroy(be);
325 ttm->be = NULL;
326 } 169 }
327 170
328 if (likely(ttm->pages != NULL)) { 171 if (likely(ttm->pages != NULL)) {
329 if (ttm->page_flags & TTM_PAGE_FLAG_USER) 172 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
330 ttm_tt_free_user_pages(ttm);
331 else
332 ttm_tt_free_alloced_pages(ttm);
333
334 ttm_tt_free_page_directory(ttm);
335 } 173 }
336 174
337 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 175 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
338 ttm->swap_storage) 176 ttm->swap_storage)
339 fput(ttm->swap_storage); 177 fput(ttm->swap_storage);
340 178
341 kfree(ttm); 179 ttm->swap_storage = NULL;
180 ttm->func->destroy(ttm);
342} 181}
343 182
344int ttm_tt_set_user(struct ttm_tt *ttm, 183int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
345 struct task_struct *tsk, 184 unsigned long size, uint32_t page_flags,
346 unsigned long start, unsigned long num_pages) 185 struct page *dummy_read_page)
347{ 186{
348 struct mm_struct *mm = tsk->mm; 187 ttm->bdev = bdev;
349 int ret; 188 ttm->glob = bdev->glob;
350 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; 189 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
351 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 190 ttm->caching_state = tt_cached;
352 191 ttm->page_flags = page_flags;
353 BUG_ON(num_pages != ttm->num_pages); 192 ttm->dummy_read_page = dummy_read_page;
354 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); 193 ttm->state = tt_unpopulated;
355 194 ttm->swap_storage = NULL;
356 /**
357 * Account user pages as lowmem pages for now.
358 */
359
360 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
361 false, false);
362 if (unlikely(ret != 0))
363 return ret;
364
365 down_read(&mm->mmap_sem);
366 ret = get_user_pages(tsk, mm, start, num_pages,
367 write, 0, ttm->pages, NULL);
368 up_read(&mm->mmap_sem);
369 195
370 if (ret != num_pages && write) { 196 ttm_tt_alloc_page_directory(ttm);
371 ttm_tt_free_user_pages(ttm); 197 if (!ttm->pages) {
372 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); 198 ttm_tt_destroy(ttm);
199 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
373 return -ENOMEM; 200 return -ENOMEM;
374 } 201 }
375
376 ttm->tsk = tsk;
377 ttm->start = start;
378 ttm->state = tt_unbound;
379
380 return 0; 202 return 0;
381} 203}
204EXPORT_SYMBOL(ttm_tt_init);
382 205
383struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 206void ttm_tt_fini(struct ttm_tt *ttm)
384 uint32_t page_flags, struct page *dummy_read_page)
385{ 207{
386 struct ttm_bo_driver *bo_driver = bdev->driver; 208 drm_free_large(ttm->pages);
387 struct ttm_tt *ttm; 209 ttm->pages = NULL;
388 210}
389 if (!bo_driver) 211EXPORT_SYMBOL(ttm_tt_fini);
390 return NULL;
391 212
392 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); 213int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
393 if (!ttm) 214 unsigned long size, uint32_t page_flags,
394 return NULL; 215 struct page *dummy_read_page)
216{
217 struct ttm_tt *ttm = &ttm_dma->ttm;
395 218
219 ttm->bdev = bdev;
396 ttm->glob = bdev->glob; 220 ttm->glob = bdev->glob;
397 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 221 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
398 ttm->first_himem_page = ttm->num_pages;
399 ttm->last_lomem_page = -1;
400 ttm->caching_state = tt_cached; 222 ttm->caching_state = tt_cached;
401 ttm->page_flags = page_flags; 223 ttm->page_flags = page_flags;
402
403 ttm->dummy_read_page = dummy_read_page; 224 ttm->dummy_read_page = dummy_read_page;
225 ttm->state = tt_unpopulated;
226 ttm->swap_storage = NULL;
404 227
405 ttm_tt_alloc_page_directory(ttm); 228 INIT_LIST_HEAD(&ttm_dma->pages_list);
406 if (!ttm->pages) { 229 ttm_dma_tt_alloc_page_directory(ttm_dma);
230 if (!ttm->pages || !ttm_dma->dma_address) {
407 ttm_tt_destroy(ttm); 231 ttm_tt_destroy(ttm);
408 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 232 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
409 return NULL; 233 return -ENOMEM;
410 }
411 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
412 if (!ttm->be) {
413 ttm_tt_destroy(ttm);
414 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
415 return NULL;
416 } 234 }
417 ttm->state = tt_unpopulated; 235 return 0;
418 return ttm;
419} 236}
237EXPORT_SYMBOL(ttm_dma_tt_init);
238
239void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
240{
241 struct ttm_tt *ttm = &ttm_dma->ttm;
242
243 drm_free_large(ttm->pages);
244 ttm->pages = NULL;
245 drm_free_large(ttm_dma->dma_address);
246 ttm_dma->dma_address = NULL;
247}
248EXPORT_SYMBOL(ttm_dma_tt_fini);
420 249
421void ttm_tt_unbind(struct ttm_tt *ttm) 250void ttm_tt_unbind(struct ttm_tt *ttm)
422{ 251{
423 int ret; 252 int ret;
424 struct ttm_backend *be = ttm->be;
425 253
426 if (ttm->state == tt_bound) { 254 if (ttm->state == tt_bound) {
427 ret = be->func->unbind(be); 255 ret = ttm->func->unbind(ttm);
428 BUG_ON(ret); 256 BUG_ON(ret);
429 ttm->state = tt_unbound; 257 ttm->state = tt_unbound;
430 } 258 }
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
433int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 261int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
434{ 262{
435 int ret = 0; 263 int ret = 0;
436 struct ttm_backend *be;
437 264
438 if (!ttm) 265 if (!ttm)
439 return -EINVAL; 266 return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
441 if (ttm->state == tt_bound) 268 if (ttm->state == tt_bound)
442 return 0; 269 return 0;
443 270
444 be = ttm->be; 271 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
445
446 ret = ttm_tt_populate(ttm);
447 if (ret) 272 if (ret)
448 return ret; 273 return ret;
449 274
450 ret = be->func->bind(be, bo_mem); 275 ret = ttm->func->bind(ttm, bo_mem);
451 if (unlikely(ret != 0)) 276 if (unlikely(ret != 0))
452 return ret; 277 return ret;
453 278
454 ttm->state = tt_bound; 279 ttm->state = tt_bound;
455 280
456 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
457 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
458 return 0; 281 return 0;
459} 282}
460EXPORT_SYMBOL(ttm_tt_bind); 283EXPORT_SYMBOL(ttm_tt_bind);
461 284
462static int ttm_tt_swapin(struct ttm_tt *ttm) 285int ttm_tt_swapin(struct ttm_tt *ttm)
463{ 286{
464 struct address_space *swap_space; 287 struct address_space *swap_space;
465 struct file *swap_storage; 288 struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
470 int i; 293 int i;
471 int ret = -ENOMEM; 294 int ret = -ENOMEM;
472 295
473 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
474 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
475 ttm->num_pages);
476 if (unlikely(ret != 0))
477 return ret;
478
479 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
480 return 0;
481 }
482
483 swap_storage = ttm->swap_storage; 296 swap_storage = ttm->swap_storage;
484 BUG_ON(swap_storage == NULL); 297 BUG_ON(swap_storage == NULL);
485 298
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
491 ret = PTR_ERR(from_page); 304 ret = PTR_ERR(from_page);
492 goto out_err; 305 goto out_err;
493 } 306 }
494 to_page = __ttm_tt_get_page(ttm, i); 307 to_page = ttm->pages[i];
495 if (unlikely(to_page == NULL)) 308 if (unlikely(to_page == NULL))
496 goto out_err; 309 goto out_err;
497 310
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
512 325
513 return 0; 326 return 0;
514out_err: 327out_err:
515 ttm_tt_free_alloced_pages(ttm);
516 return ret; 328 return ret;
517} 329}
518 330
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
530 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 342 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
531 BUG_ON(ttm->caching_state != tt_cached); 343 BUG_ON(ttm->caching_state != tt_cached);
532 344
533 /*
534 * For user buffers, just unpin the pages, as there should be
535 * vma references.
536 */
537
538 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
539 ttm_tt_free_user_pages(ttm);
540 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
541 ttm->swap_storage = NULL;
542 return 0;
543 }
544
545 if (!persistent_swap_storage) { 345 if (!persistent_swap_storage) {
546 swap_storage = shmem_file_setup("ttm swap", 346 swap_storage = shmem_file_setup("ttm swap",
547 ttm->num_pages << PAGE_SHIFT, 347 ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
576 page_cache_release(to_page); 376 page_cache_release(to_page);
577 } 377 }
578 378
579 ttm_tt_free_alloced_pages(ttm); 379 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
580 ttm->swap_storage = swap_storage; 380 ttm->swap_storage = swap_storage;
581 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 381 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
582 if (persistent_swap_storage) 382 if (persistent_swap_storage)
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index a83e86d3956..02661f35f7a 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -30,16 +30,52 @@
30 30
31#include "drm_pciids.h" 31#include "drm_pciids.h"
32 32
33static int via_driver_open(struct drm_device *dev, struct drm_file *file)
34{
35 struct via_file_private *file_priv;
36
37 DRM_DEBUG_DRIVER("\n");
38 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
39 if (!file_priv)
40 return -ENOMEM;
41
42 file->driver_priv = file_priv;
43
44 INIT_LIST_HEAD(&file_priv->obj_list);
45
46 return 0;
47}
48
49void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
50{
51 struct via_file_private *file_priv = file->driver_priv;
52
53 kfree(file_priv);
54}
55
33static struct pci_device_id pciidlist[] = { 56static struct pci_device_id pciidlist[] = {
34 viadrv_PCI_IDS 57 viadrv_PCI_IDS
35}; 58};
36 59
60static const struct file_operations via_driver_fops = {
61 .owner = THIS_MODULE,
62 .open = drm_open,
63 .release = drm_release,
64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap,
66 .poll = drm_poll,
67 .fasync = drm_fasync,
68 .llseek = noop_llseek,
69};
70
37static struct drm_driver driver = { 71static struct drm_driver driver = {
38 .driver_features = 72 .driver_features =
39 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 73 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
40 DRIVER_IRQ_SHARED, 74 DRIVER_IRQ_SHARED,
41 .load = via_driver_load, 75 .load = via_driver_load,
42 .unload = via_driver_unload, 76 .unload = via_driver_unload,
77 .open = via_driver_open,
78 .postclose = via_driver_postclose,
43 .context_dtor = via_final_context, 79 .context_dtor = via_final_context,
44 .get_vblank_counter = via_get_vblank_counter, 80 .get_vblank_counter = via_get_vblank_counter,
45 .enable_vblank = via_enable_vblank, 81 .enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
54 .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, 90 .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
55 .lastclose = via_lastclose, 91 .lastclose = via_lastclose,
56 .ioctls = via_ioctls, 92 .ioctls = via_ioctls,
57 .fops = { 93 .fops = &via_driver_fops,
58 .owner = THIS_MODULE,
59 .open = drm_open,
60 .release = drm_release,
61 .unlocked_ioctl = drm_ioctl,
62 .mmap = drm_mmap,
63 .poll = drm_poll,
64 .fasync = drm_fasync,
65 .llseek = noop_llseek,
66 },
67
68 .name = DRIVER_NAME, 94 .name = DRIVER_NAME,
69 .desc = DRIVER_DESC, 95 .desc = DRIVER_DESC,
70 .date = DRIVER_DATE, 96 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 9cf87d91232..88edacc9300 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,7 +24,7 @@
24#ifndef _VIA_DRV_H_ 24#ifndef _VIA_DRV_H_
25#define _VIA_DRV_H_ 25#define _VIA_DRV_H_
26 26
27#include "drm_sman.h" 27#include "drm_mm.h"
28#define DRIVER_AUTHOR "Various" 28#define DRIVER_AUTHOR "Various"
29 29
30#define DRIVER_NAME "via" 30#define DRIVER_NAME "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
88 uint32_t irq_pending_mask; 88 uint32_t irq_pending_mask;
89 int *irq_map; 89 int *irq_map;
90 unsigned int idle_fault; 90 unsigned int idle_fault;
91 struct drm_sman sman;
92 int vram_initialized; 91 int vram_initialized;
92 struct drm_mm vram_mm;
93 int agp_initialized; 93 int agp_initialized;
94 struct drm_mm agp_mm;
95 /** Mapping of userspace keys to mm objects */
96 struct idr object_idr;
94 unsigned long vram_offset; 97 unsigned long vram_offset;
95 unsigned long agp_offset; 98 unsigned long agp_offset;
96 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; 99 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6cca9a709f7..a2ab3436515 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
104 104
105 dev_priv->chipset = chipset; 105 dev_priv->chipset = chipset;
106 106
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 107 idr_init(&dev->object_name_idr);
108 if (ret) {
109 kfree(dev_priv);
110 return ret;
111 }
112 108
113 ret = drm_vblank_init(dev, 1); 109 ret = drm_vblank_init(dev, 1);
114 if (ret) { 110 if (ret) {
115 drm_sman_takedown(&dev_priv->sman);
116 kfree(dev_priv); 111 kfree(dev_priv);
117 return ret; 112 return ret;
118 } 113 }
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
124{ 119{
125 drm_via_private_t *dev_priv = dev->dev_private; 120 drm_via_private_t *dev_priv = dev->dev_private;
126 121
127 drm_sman_takedown(&dev_priv->sman); 122 idr_remove_all(&dev_priv->object_idr);
123 idr_destroy(&dev_priv->object_idr);
128 124
129 kfree(dev_priv); 125 kfree(dev_priv);
130 126
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 6cc2dadae3e..a3574d09a07 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -28,26 +28,22 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "via_drm.h" 29#include "via_drm.h"
30#include "via_drv.h" 30#include "via_drv.h"
31#include "drm_sman.h"
32 31
33#define VIA_MM_ALIGN_SHIFT 4 32#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1) 33#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
35 34
35struct via_memblock {
36 struct drm_mm_node mm_node;
37 struct list_head owner_list;
38};
39
36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 40int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{ 41{
38 drm_via_agp_t *agp = data; 42 drm_via_agp_t *agp = data;
39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 43 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
40 int ret;
41 44
42 mutex_lock(&dev->struct_mutex); 45 mutex_lock(&dev->struct_mutex);
43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, 46 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
44 agp->size >> VIA_MM_ALIGN_SHIFT);
45
46 if (ret) {
47 DRM_ERROR("AGP memory manager initialisation error\n");
48 mutex_unlock(&dev->struct_mutex);
49 return ret;
50 }
51 47
52 dev_priv->agp_initialized = 1; 48 dev_priv->agp_initialized = 1;
53 dev_priv->agp_offset = agp->offset; 49 dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
61{ 57{
62 drm_via_fb_t *fb = data; 58 drm_via_fb_t *fb = data;
63 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 59 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
64 int ret;
65 60
66 mutex_lock(&dev->struct_mutex); 61 mutex_lock(&dev->struct_mutex);
67 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, 62 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
68 fb->size >> VIA_MM_ALIGN_SHIFT);
69
70 if (ret) {
71 DRM_ERROR("VRAM memory manager initialisation error\n");
72 mutex_unlock(&dev->struct_mutex);
73 return ret;
74 }
75 63
76 dev_priv->vram_initialized = 1; 64 dev_priv->vram_initialized = 1;
77 dev_priv->vram_offset = fb->offset; 65 dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
108 return; 96 return;
109 97
110 mutex_lock(&dev->struct_mutex); 98 mutex_lock(&dev->struct_mutex);
111 drm_sman_cleanup(&dev_priv->sman); 99 if (dev_priv->vram_initialized) {
112 dev_priv->vram_initialized = 0; 100 drm_mm_takedown(&dev_priv->vram_mm);
113 dev_priv->agp_initialized = 0; 101 dev_priv->vram_initialized = 0;
102 }
103 if (dev_priv->agp_initialized) {
104 drm_mm_takedown(&dev_priv->agp_mm);
105 dev_priv->agp_initialized = 0;
106 }
114 mutex_unlock(&dev->struct_mutex); 107 mutex_unlock(&dev->struct_mutex);
115} 108}
116 109
117int via_mem_alloc(struct drm_device *dev, void *data, 110int via_mem_alloc(struct drm_device *dev, void *data,
118 struct drm_file *file_priv) 111 struct drm_file *file)
119{ 112{
120 drm_via_mem_t *mem = data; 113 drm_via_mem_t *mem = data;
121 int retval = 0; 114 int retval = 0, user_key;
122 struct drm_memblock_item *item; 115 struct via_memblock *item;
123 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 116 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
117 struct via_file_private *file_priv = file->driver_priv;
124 unsigned long tmpSize; 118 unsigned long tmpSize;
125 119
126 if (mem->type > VIA_MEM_AGP) { 120 if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
136 return -EINVAL; 130 return -EINVAL;
137 } 131 }
138 132
133 item = kzalloc(sizeof(*item), GFP_KERNEL);
134 if (!item) {
135 retval = -ENOMEM;
136 goto fail_alloc;
137 }
138
139 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; 139 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
140 item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, 140 if (mem->type == VIA_MEM_AGP)
141 (unsigned long)file_priv); 141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 mutex_unlock(&dev->struct_mutex); 142 &item->mm_node,
143 if (item) { 143 tmpSize, 0);
144 mem->offset = ((mem->type == VIA_MEM_VIDEO) ? 144 else
145 dev_priv->vram_offset : dev_priv->agp_offset) + 145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 (item->mm-> 146 &item->mm_node,
147 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); 147 tmpSize, 0);
148 mem->index = item->user_hash.key; 148 if (retval)
149 } else { 149 goto fail_alloc;
150 mem->offset = 0; 150
151 mem->size = 0; 151again:
152 mem->index = 0; 152 if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
153 DRM_DEBUG("Video memory allocation failed\n");
154 retval = -ENOMEM; 153 retval = -ENOMEM;
154 goto fail_idr;
155 } 155 }
156 156
157 retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
158 if (retval == -EAGAIN)
159 goto again;
160 if (retval)
161 goto fail_idr;
162
163 list_add(&item->owner_list, &file_priv->obj_list);
164 mutex_unlock(&dev->struct_mutex);
165
166 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
167 dev_priv->vram_offset : dev_priv->agp_offset) +
168 ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
169 mem->index = user_key;
170
171 return 0;
172
173fail_idr:
174 drm_mm_remove_node(&item->mm_node);
175fail_alloc:
176 kfree(item);
177 mutex_unlock(&dev->struct_mutex);
178
179 mem->offset = 0;
180 mem->size = 0;
181 mem->index = 0;
182 DRM_DEBUG("Video memory allocation failed\n");
183
157 return retval; 184 return retval;
158} 185}
159 186
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
161{ 188{
162 drm_via_private_t *dev_priv = dev->dev_private; 189 drm_via_private_t *dev_priv = dev->dev_private;
163 drm_via_mem_t *mem = data; 190 drm_via_mem_t *mem = data;
164 int ret; 191 struct via_memblock *obj;
165 192
166 mutex_lock(&dev->struct_mutex); 193 mutex_lock(&dev->struct_mutex);
167 ret = drm_sman_free_key(&dev_priv->sman, mem->index); 194 obj = idr_find(&dev_priv->object_idr, mem->index);
195 if (obj == NULL) {
196 mutex_unlock(&dev->struct_mutex);
197 return -EINVAL;
198 }
199
200 idr_remove(&dev_priv->object_idr, mem->index);
201 list_del(&obj->owner_list);
202 drm_mm_remove_node(&obj->mm_node);
203 kfree(obj);
168 mutex_unlock(&dev->struct_mutex); 204 mutex_unlock(&dev->struct_mutex);
205
169 DRM_DEBUG("free = 0x%lx\n", mem->index); 206 DRM_DEBUG("free = 0x%lx\n", mem->index);
170 207
171 return ret; 208 return 0;
172} 209}
173 210
174 211
175void via_reclaim_buffers_locked(struct drm_device *dev, 212void via_reclaim_buffers_locked(struct drm_device *dev,
176 struct drm_file *file_priv) 213 struct drm_file *file)
177{ 214{
178 drm_via_private_t *dev_priv = dev->dev_private; 215 struct via_file_private *file_priv = file->driver_priv;
216 struct via_memblock *entry, *next;
179 217
180 mutex_lock(&dev->struct_mutex); 218 mutex_lock(&dev->struct_mutex);
181 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { 219 if (list_empty(&file_priv->obj_list)) {
182 mutex_unlock(&dev->struct_mutex); 220 mutex_unlock(&dev->struct_mutex);
183 return; 221 return;
184 } 222 }
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
186 if (dev->driver->dma_quiescent) 224 if (dev->driver->dma_quiescent)
187 dev->driver->dma_quiescent(dev); 225 dev->driver->dma_quiescent(dev);
188 226
189 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); 227 list_for_each_entry_safe(entry, next, &file_priv->obj_list,
228 owner_list) {
229 list_del(&entry->owner_list);
230 drm_mm_remove_node(&entry->mm_node);
231 kfree(entry);
232 }
190 mutex_unlock(&dev->struct_mutex); 233 mutex_unlock(&dev->struct_mutex);
191 return; 234 return;
192} 235}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 5a72ed90823..1e2c0fb7f78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,6 +28,7 @@
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h" 29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_page_alloc.h"
31 32
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | 33static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED; 34 TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
139 .busy_placement = gmr_vram_placement_flags 140 .busy_placement = gmr_vram_placement_flags
140}; 141};
141 142
142struct vmw_ttm_backend { 143struct vmw_ttm_tt {
143 struct ttm_backend backend; 144 struct ttm_tt ttm;
144 struct page **pages;
145 unsigned long num_pages;
146 struct vmw_private *dev_priv; 145 struct vmw_private *dev_priv;
147 int gmr_id; 146 int gmr_id;
148}; 147};
149 148
150static int vmw_ttm_populate(struct ttm_backend *backend, 149static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
151 unsigned long num_pages, struct page **pages,
152 struct page *dummy_read_page,
153 dma_addr_t *dma_addrs)
154{ 150{
155 struct vmw_ttm_backend *vmw_be = 151 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
156 container_of(backend, struct vmw_ttm_backend, backend);
157
158 vmw_be->pages = pages;
159 vmw_be->num_pages = num_pages;
160
161 return 0;
162}
163
164static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
165{
166 struct vmw_ttm_backend *vmw_be =
167 container_of(backend, struct vmw_ttm_backend, backend);
168 152
169 vmw_be->gmr_id = bo_mem->start; 153 vmw_be->gmr_id = bo_mem->start;
170 154
171 return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, 155 return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
172 vmw_be->num_pages, vmw_be->gmr_id); 156 ttm->num_pages, vmw_be->gmr_id);
173} 157}
174 158
175static int vmw_ttm_unbind(struct ttm_backend *backend) 159static int vmw_ttm_unbind(struct ttm_tt *ttm)
176{ 160{
177 struct vmw_ttm_backend *vmw_be = 161 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
178 container_of(backend, struct vmw_ttm_backend, backend);
179 162
180 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 163 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
181 return 0; 164 return 0;
182} 165}
183 166
184static void vmw_ttm_clear(struct ttm_backend *backend) 167static void vmw_ttm_destroy(struct ttm_tt *ttm)
185{ 168{
186 struct vmw_ttm_backend *vmw_be = 169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
187 container_of(backend, struct vmw_ttm_backend, backend);
188
189 vmw_be->pages = NULL;
190 vmw_be->num_pages = 0;
191}
192
193static void vmw_ttm_destroy(struct ttm_backend *backend)
194{
195 struct vmw_ttm_backend *vmw_be =
196 container_of(backend, struct vmw_ttm_backend, backend);
197 170
171 ttm_tt_fini(ttm);
198 kfree(vmw_be); 172 kfree(vmw_be);
199} 173}
200 174
201static struct ttm_backend_func vmw_ttm_func = { 175static struct ttm_backend_func vmw_ttm_func = {
202 .populate = vmw_ttm_populate,
203 .clear = vmw_ttm_clear,
204 .bind = vmw_ttm_bind, 176 .bind = vmw_ttm_bind,
205 .unbind = vmw_ttm_unbind, 177 .unbind = vmw_ttm_unbind,
206 .destroy = vmw_ttm_destroy, 178 .destroy = vmw_ttm_destroy,
207}; 179};
208 180
209struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) 181struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
182 unsigned long size, uint32_t page_flags,
183 struct page *dummy_read_page)
210{ 184{
211 struct vmw_ttm_backend *vmw_be; 185 struct vmw_ttm_tt *vmw_be;
212 186
213 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); 187 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
214 if (!vmw_be) 188 if (!vmw_be)
215 return NULL; 189 return NULL;
216 190
217 vmw_be->backend.func = &vmw_ttm_func; 191 vmw_be->ttm.func = &vmw_ttm_func;
218 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 192 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
219 193
220 return &vmw_be->backend; 194 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
195 kfree(vmw_be);
196 return NULL;
197 }
198
199 return &vmw_be->ttm;
221} 200}
222 201
223int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 202int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
357} 336}
358 337
359struct ttm_bo_driver vmw_bo_driver = { 338struct ttm_bo_driver vmw_bo_driver = {
360 .create_ttm_backend_entry = vmw_ttm_backend_init, 339 .ttm_tt_create = &vmw_ttm_tt_create,
340 .ttm_tt_populate = &ttm_pool_populate,
341 .ttm_tt_unpopulate = &ttm_pool_unpopulate,
361 .invalidate_caches = vmw_invalidate_caches, 342 .invalidate_caches = vmw_invalidate_caches,
362 .init_mem_type = vmw_init_mem_type, 343 .init_mem_type = vmw_init_mem_type,
363 .evict_flags = vmw_evict_flags, 344 .evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dff8fc76715..f390f5f9cb6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
1064 .resume = vmw_pm_resume, 1064 .resume = vmw_pm_resume,
1065}; 1065};
1066 1066
1067static const struct file_operations vmwgfx_driver_fops = {
1068 .owner = THIS_MODULE,
1069 .open = drm_open,
1070 .release = drm_release,
1071 .unlocked_ioctl = vmw_unlocked_ioctl,
1072 .mmap = vmw_mmap,
1073 .poll = vmw_fops_poll,
1074 .read = vmw_fops_read,
1075 .fasync = drm_fasync,
1076#if defined(CONFIG_COMPAT)
1077 .compat_ioctl = drm_compat_ioctl,
1078#endif
1079 .llseek = noop_llseek,
1080};
1081
1067static struct drm_driver driver = { 1082static struct drm_driver driver = {
1068 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1083 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1069 DRIVER_MODESET, 1084 DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
1088 .master_drop = vmw_master_drop, 1103 .master_drop = vmw_master_drop,
1089 .open = vmw_driver_open, 1104 .open = vmw_driver_open,
1090 .postclose = vmw_postclose, 1105 .postclose = vmw_postclose,
1091 .fops = { 1106 .fops = &vmwgfx_driver_fops,
1092 .owner = THIS_MODULE,
1093 .open = drm_open,
1094 .release = drm_release,
1095 .unlocked_ioctl = vmw_unlocked_ioctl,
1096 .mmap = vmw_mmap,
1097 .poll = vmw_fops_poll,
1098 .read = vmw_fops_read,
1099 .fasync = drm_fasync,
1100#if defined(CONFIG_COMPAT)
1101 .compat_ioctl = drm_compat_ioctl,
1102#endif
1103 .llseek = noop_llseek,
1104 },
1105 .name = VMWGFX_DRIVER_NAME, 1107 .name = VMWGFX_DRIVER_NAME,
1106 .desc = VMWGFX_DRIVER_DESC, 1108 .desc = VMWGFX_DRIVER_DESC,
1107 .date = VMWGFX_DRIVER_DATE, 1109 .date = VMWGFX_DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f94b33ae221..0af6ebdf205 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -690,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
690 690
691 /* XXX get the first 3 from the surface info */ 691 /* XXX get the first 3 from the surface info */
692 vfbs->base.base.bits_per_pixel = mode_cmd->bpp; 692 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
693 vfbs->base.base.pitch = mode_cmd->pitch; 693 vfbs->base.base.pitches[0] = mode_cmd->pitch;
694 vfbs->base.base.depth = mode_cmd->depth; 694 vfbs->base.base.depth = mode_cmd->depth;
695 vfbs->base.base.width = mode_cmd->width; 695 vfbs->base.base.width = mode_cmd->width;
696 vfbs->base.base.height = mode_cmd->height; 696 vfbs->base.base.height = mode_cmd->height;
@@ -804,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
804 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; 804 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
805 cmd->body.format.colorDepth = depth; 805 cmd->body.format.colorDepth = depth;
806 cmd->body.format.reserved = 0; 806 cmd->body.format.reserved = 0;
807 cmd->body.bytesPerLine = framebuffer->base.pitch; 807 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
808 cmd->body.ptr.gmrId = framebuffer->user_handle; 808 cmd->body.ptr.gmrId = framebuffer->user_handle;
809 cmd->body.ptr.offset = 0; 809 cmd->body.ptr.offset = 0;
810 810
@@ -1056,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1056 } 1056 }
1057 1057
1058 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 1058 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
1059 vfbd->base.base.pitch = mode_cmd->pitch; 1059 vfbd->base.base.pitches[0] = mode_cmd->pitch;
1060 vfbd->base.base.depth = mode_cmd->depth; 1060 vfbd->base.base.depth = mode_cmd->depth;
1061 vfbd->base.base.width = mode_cmd->width; 1061 vfbd->base.base.width = mode_cmd->width;
1062 vfbd->base.base.height = mode_cmd->height; 1062 vfbd->base.base.height = mode_cmd->height;
@@ -1085,7 +1085,7 @@ out_err1:
1085 1085
1086static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1086static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1087 struct drm_file *file_priv, 1087 struct drm_file *file_priv,
1088 struct drm_mode_fb_cmd *mode_cmd) 1088 struct drm_mode_fb_cmd2 *mode_cmd2)
1089{ 1089{
1090 struct vmw_private *dev_priv = vmw_priv(dev); 1090 struct vmw_private *dev_priv = vmw_priv(dev);
1091 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1091 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1093,8 +1093,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1093 struct vmw_surface *surface = NULL; 1093 struct vmw_surface *surface = NULL;
1094 struct vmw_dma_buffer *bo = NULL; 1094 struct vmw_dma_buffer *bo = NULL;
1095 struct ttm_base_object *user_obj; 1095 struct ttm_base_object *user_obj;
1096 struct drm_mode_fb_cmd mode_cmd;
1096 int ret; 1097 int ret;
1097 1098
1099 mode_cmd.width = mode_cmd2->width;
1100 mode_cmd.height = mode_cmd2->height;
1101 mode_cmd.pitch = mode_cmd2->pitches[0];
1102 mode_cmd.handle = mode_cmd2->handles[0];
1103 drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
1104 &mode_cmd.bpp);
1105
1098 /** 1106 /**
1099 * This code should be conditioned on Screen Objects not being used. 1107 * This code should be conditioned on Screen Objects not being used.
1100 * If screen objects are used, we can allocate a GMR to hold the 1108 * If screen objects are used, we can allocate a GMR to hold the
@@ -1102,8 +1110,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1102 */ 1110 */
1103 1111
1104 if (!vmw_kms_validate_mode_vram(dev_priv, 1112 if (!vmw_kms_validate_mode_vram(dev_priv,
1105 mode_cmd->pitch, 1113 mode_cmd.pitch,
1106 mode_cmd->height)) { 1114 mode_cmd.height)) {
1107 DRM_ERROR("VRAM size is too small for requested mode.\n"); 1115 DRM_ERROR("VRAM size is too small for requested mode.\n");
1108 return ERR_PTR(-ENOMEM); 1116 return ERR_PTR(-ENOMEM);
1109 } 1117 }
@@ -1117,15 +1125,19 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1117 * command stream using user-space handles. 1125 * command stream using user-space handles.
1118 */ 1126 */
1119 1127
1120 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle); 1128 user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
1121 if (unlikely(user_obj == NULL)) { 1129 if (unlikely(user_obj == NULL)) {
1122 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1130 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1123 return ERR_PTR(-ENOENT); 1131 return ERR_PTR(-ENOENT);
1124 } 1132 }
1125 1133
1134 /**
1135 * End conditioned code.
1136 */
1137
1126 /* returns either a dmabuf or surface */ 1138 /* returns either a dmabuf or surface */
1127 ret = vmw_user_lookup_handle(dev_priv, tfile, 1139 ret = vmw_user_lookup_handle(dev_priv, tfile,
1128 mode_cmd->handle, 1140 mode_cmd.handle,
1129 &surface, &bo); 1141 &surface, &bo);
1130 if (ret) 1142 if (ret)
1131 goto err_out; 1143 goto err_out;
@@ -1133,10 +1145,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1133 /* Create the new framebuffer depending one what we got back */ 1145 /* Create the new framebuffer depending one what we got back */
1134 if (bo) 1146 if (bo)
1135 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 1147 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1136 mode_cmd); 1148 &mode_cmd);
1137 else if (surface) 1149 else if (surface)
1138 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, 1150 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
1139 surface, &vfb, mode_cmd); 1151 surface, &vfb, &mode_cmd);
1140 else 1152 else
1141 BUG(); 1153 BUG();
1142 1154
@@ -1344,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
1344 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; 1356 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
1345 cmd->body.format.colorDepth = vfb->base.depth; 1357 cmd->body.format.colorDepth = vfb->base.depth;
1346 cmd->body.format.reserved = 0; 1358 cmd->body.format.reserved = 0;
1347 cmd->body.bytesPerLine = vfb->base.pitch; 1359 cmd->body.bytesPerLine = vfb->base.pitches[0];
1348 cmd->body.ptr.gmrId = vfb->user_handle; 1360 cmd->body.ptr.gmrId = vfb->user_handle;
1349 cmd->body.ptr.offset = 0; 1361 cmd->body.ptr.offset = 0;
1350 1362
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index e1cb8556355..a4f7f034996 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,6 +29,7 @@
29#define VMWGFX_KMS_H_ 29#define VMWGFX_KMS_H_
30 30
31#include "drmP.h" 31#include "drmP.h"
32#include "drm_crtc_helper.h"
32#include "vmwgfx_drv.h" 33#include "vmwgfx_drv.h"
33 34
34#define VMWGFX_NUM_DISPLAY_UNITS 8 35#define VMWGFX_NUM_DISPLAY_UNITS 8
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 8f8dbd43c33..f77b184be80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -95,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
95 return 0; 95 return 0;
96 fb = entry->base.crtc.fb; 96 fb = entry->base.crtc.fb;
97 97
98 return vmw_kms_write_svga(dev_priv, w, h, fb->pitch, 98 return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
99 fb->bits_per_pixel, fb->depth); 99 fb->bits_per_pixel, fb->depth);
100 } 100 }
101 101
@@ -103,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
103 entry = list_entry(lds->active.next, typeof(*entry), active); 103 entry = list_entry(lds->active.next, typeof(*entry), active);
104 fb = entry->base.crtc.fb; 104 fb = entry->base.crtc.fb;
105 105
106 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, 106 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
107 fb->bits_per_pixel, fb->depth); 107 fb->bits_per_pixel, fb->depth);
108 } 108 }
109 109
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1c7f09e2681..a37abb581cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1540,29 +1540,10 @@ out_bad_surface:
1540/** 1540/**
1541 * Buffer management. 1541 * Buffer management.
1542 */ 1542 */
1543
1544static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
1545 unsigned long num_pages)
1546{
1547 static size_t bo_user_size = ~0;
1548
1549 size_t page_array_size =
1550 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
1551
1552 if (unlikely(bo_user_size == ~0)) {
1553 bo_user_size = glob->ttm_bo_extra_size +
1554 ttm_round_pot(sizeof(struct vmw_dma_buffer));
1555 }
1556
1557 return bo_user_size + page_array_size;
1558}
1559
1560void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 1543void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1561{ 1544{
1562 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 1545 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1563 struct ttm_bo_global *glob = bo->glob;
1564 1546
1565 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1566 kfree(vmw_bo); 1547 kfree(vmw_bo);
1567} 1548}
1568 1549
@@ -1573,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1573 void (*bo_free) (struct ttm_buffer_object *bo)) 1554 void (*bo_free) (struct ttm_buffer_object *bo))
1574{ 1555{
1575 struct ttm_bo_device *bdev = &dev_priv->bdev; 1556 struct ttm_bo_device *bdev = &dev_priv->bdev;
1576 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1577 size_t acc_size; 1557 size_t acc_size;
1578 int ret; 1558 int ret;
1579 1559
1580 BUG_ON(!bo_free); 1560 BUG_ON(!bo_free);
1581 1561
1582 acc_size = 1562 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1583 vmw_dmabuf_acc_size(bdev->glob,
1584 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1585
1586 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1587 if (unlikely(ret != 0)) {
1588 /* we must free the bo here as
1589 * ttm_buffer_object_init does so as well */
1590 bo_free(&vmw_bo->base);
1591 return ret;
1592 }
1593
1594 memset(vmw_bo, 0, sizeof(*vmw_bo)); 1563 memset(vmw_bo, 0, sizeof(*vmw_bo));
1595 1564
1596 INIT_LIST_HEAD(&vmw_bo->validate_list); 1565 INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1605,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1605static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 1574static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1606{ 1575{
1607 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 1576 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1608 struct ttm_bo_global *glob = bo->glob;
1609 1577
1610 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1611 kfree(vmw_user_bo); 1578 kfree(vmw_user_bo);
1612} 1579}
1613 1580
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
index 114b99a1ce1..b8f78ebbb14 100644
--- a/drivers/staging/gma500/accel_2d.c
+++ b/drivers/staging/gma500/accel_2d.c
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
253 return; 253 return;
254 254
255 offset = psbfb->gtt->offset; 255 offset = psbfb->gtt->offset;
256 stride = fb->pitch; 256 stride = fb->pitches[0];
257 257
258 switch (fb->depth) { 258 switch (fb->depth) {
259 case 8: 259 case 8:
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
index 7b97c600eff..c63a32776a9 100644
--- a/drivers/staging/gma500/cdv_intel_display.c
+++ b/drivers/staging/gma500/cdv_intel_display.c
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
507 if (ret < 0) 507 if (ret < 0)
508 goto psb_intel_pipe_set_base_exit; 508 goto psb_intel_pipe_set_base_exit;
509 start = psbfb->gtt->offset; 509 start = psbfb->gtt->offset;
510 offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 510 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
511 511
512 REG_WRITE(dspstride, crtc->fb->pitch); 512 REG_WRITE(dspstride, crtc->fb->pitches[0]);
513 513
514 dspcntr = REG_READ(dspcntr_reg); 514 dspcntr = REG_READ(dspcntr_reg);
515 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 515 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
index 3f39a37456f..b00761cba14 100644
--- a/drivers/staging/gma500/framebuffer.c
+++ b/drivers/staging/gma500/framebuffer.c
@@ -32,6 +32,7 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/drm.h> 33#include <drm/drm.h>
34#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
35#include <drm/drm_fb_helper.h>
35 36
36#include "psb_drv.h" 37#include "psb_drv.h"
37#include "psb_intel_reg.h" 38#include "psb_intel_reg.h"
@@ -273,14 +274,17 @@ static struct fb_ops psbfb_unaccel_ops = {
273 */ 274 */
274static int psb_framebuffer_init(struct drm_device *dev, 275static int psb_framebuffer_init(struct drm_device *dev,
275 struct psb_framebuffer *fb, 276 struct psb_framebuffer *fb,
276 struct drm_mode_fb_cmd *mode_cmd, 277 struct drm_mode_fb_cmd2 *mode_cmd,
277 struct gtt_range *gt) 278 struct gtt_range *gt)
278{ 279{
280 u32 bpp, depth;
279 int ret; 281 int ret;
280 282
281 if (mode_cmd->pitch & 63) 283 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
284
285 if (mode_cmd->pitches[0] & 63)
282 return -EINVAL; 286 return -EINVAL;
283 switch (mode_cmd->bpp) { 287 switch (bpp) {
284 case 8: 288 case 8:
285 case 16: 289 case 16:
286 case 24: 290 case 24:
@@ -313,7 +317,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
313 317
314static struct drm_framebuffer *psb_framebuffer_create 318static struct drm_framebuffer *psb_framebuffer_create
315 (struct drm_device *dev, 319 (struct drm_device *dev,
316 struct drm_mode_fb_cmd *mode_cmd, 320 struct drm_mode_fb_cmd2 *mode_cmd,
317 struct gtt_range *gt) 321 struct gtt_range *gt)
318{ 322{
319 struct psb_framebuffer *fb; 323 struct psb_framebuffer *fb;
@@ -387,27 +391,28 @@ static int psbfb_create(struct psb_fbdev *fbdev,
387 struct fb_info *info; 391 struct fb_info *info;
388 struct drm_framebuffer *fb; 392 struct drm_framebuffer *fb;
389 struct psb_framebuffer *psbfb = &fbdev->pfb; 393 struct psb_framebuffer *psbfb = &fbdev->pfb;
390 struct drm_mode_fb_cmd mode_cmd; 394 struct drm_mode_fb_cmd2 mode_cmd;
391 struct device *device = &dev->pdev->dev; 395 struct device *device = &dev->pdev->dev;
392 int size; 396 int size;
393 int ret; 397 int ret;
394 struct gtt_range *backing; 398 struct gtt_range *backing;
395 int gtt_roll = 1; 399 int gtt_roll = 1;
400 u32 bpp, depth;
396 401
397 mode_cmd.width = sizes->surface_width; 402 mode_cmd.width = sizes->surface_width;
398 mode_cmd.height = sizes->surface_height; 403 mode_cmd.height = sizes->surface_height;
399 mode_cmd.bpp = sizes->surface_bpp; 404 bpp = sizes->surface_bpp;
400 405
401 /* No 24bit packed */ 406 /* No 24bit packed */
402 if (mode_cmd.bpp == 24) 407 if (bpp == 24)
403 mode_cmd.bpp = 32; 408 bpp = 32;
404 409
405 /* Acceleration via the GTT requires pitch to be 4096 byte aligned 410 /* Acceleration via the GTT requires pitch to be 4096 byte aligned
406 (ie 1024 or 2048 pixels in normal use) */ 411 (ie 1024 or 2048 pixels in normal use) */
407 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096); 412 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
408 mode_cmd.depth = sizes->surface_depth; 413 depth = sizes->surface_depth;
409 414
410 size = mode_cmd.pitch * mode_cmd.height; 415 size = mode_cmd.pitches[0] * mode_cmd.height;
411 size = ALIGN(size, PAGE_SIZE); 416 size = ALIGN(size, PAGE_SIZE);
412 417
413 /* Allocate the framebuffer in the GTT with stolen page backing */ 418 /* Allocate the framebuffer in the GTT with stolen page backing */
@@ -421,10 +426,10 @@ static int psbfb_create(struct psb_fbdev *fbdev,
421 426
422 gtt_roll = 0; /* Don't use GTT accelerated scrolling */ 427 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
423 428
424 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); 429 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
425 mode_cmd.depth = sizes->surface_depth; 430 depth = sizes->surface_depth;
426 431
427 size = mode_cmd.pitch * mode_cmd.height; 432 size = mode_cmd.pitches[0] * mode_cmd.height;
428 size = ALIGN(size, PAGE_SIZE); 433 size = ALIGN(size, PAGE_SIZE);
429 434
430 /* Allocate the framebuffer in the GTT with stolen page 435 /* Allocate the framebuffer in the GTT with stolen page
@@ -443,6 +448,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
443 } 448 }
444 info->par = fbdev; 449 info->par = fbdev;
445 450
451 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
452
446 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing); 453 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
447 if (ret) 454 if (ret)
448 goto out_unref; 455 goto out_unref;
@@ -504,7 +511,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
504 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; 511 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
505 } 512 }
506 513
507 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 514 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
508 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, 515 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
509 sizes->fb_width, sizes->fb_height); 516 sizes->fb_width, sizes->fb_height);
510 517
@@ -546,7 +553,7 @@ out_err1:
546 */ 553 */
547static struct drm_framebuffer *psb_user_framebuffer_create 554static struct drm_framebuffer *psb_user_framebuffer_create
548 (struct drm_device *dev, struct drm_file *filp, 555 (struct drm_device *dev, struct drm_file *filp,
549 struct drm_mode_fb_cmd *cmd) 556 struct drm_mode_fb_cmd2 *cmd)
550{ 557{
551 struct gtt_range *r; 558 struct gtt_range *r;
552 struct drm_gem_object *obj; 559 struct drm_gem_object *obj;
@@ -555,7 +562,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
555 * Find the GEM object and thus the gtt range object that is 562 * Find the GEM object and thus the gtt range object that is
556 * to back this space 563 * to back this space
557 */ 564 */
558 obj = drm_gem_object_lookup(dev, filp, cmd->handle); 565 obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
559 if (obj == NULL) 566 if (obj == NULL)
560 return ERR_PTR(-ENOENT); 567 return ERR_PTR(-ENOENT);
561 568
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
index 8eb827ecc3d..0b37b7b6b02 100644
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ b/drivers/staging/gma500/mdfld_intel_display.c
@@ -390,9 +390,9 @@ int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_f
390 goto psb_intel_pipe_set_base_exit; 390 goto psb_intel_pipe_set_base_exit;
391 391
392 start = psbfb->gtt->offset; 392 start = psbfb->gtt->offset;
393 offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 393 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
394 394
395 REG_WRITE(dspstride, crtc->fb->pitch); 395 REG_WRITE(dspstride, crtc->fb->pitches[0]);
396 dspcntr = REG_READ(dspcntr_reg); 396 dspcntr = REG_READ(dspcntr_reg);
397 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 397 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
398 398
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
index c9311a573c2..980837e37d8 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/staging/gma500/mrst_crtc.c
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
543 return 0; 543 return 0;
544 544
545 start = psbfb->gtt->offset; 545 start = psbfb->gtt->offset;
546 offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 546 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
547 547
548 REG_WRITE(dspstride, crtc->fb->pitch); 548 REG_WRITE(dspstride, crtc->fb->pitches[0]);
549 549
550 dspcntr = REG_READ(dspcntr_reg); 550 dspcntr = REG_READ(dspcntr_reg);
551 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 551 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 986a04d16ba..95816808f86 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -1151,6 +1151,17 @@ static struct vm_operations_struct psb_gem_vm_ops = {
1151 .close = drm_gem_vm_close, 1151 .close = drm_gem_vm_close,
1152}; 1152};
1153 1153
1154static const struct file_operations gma500_driver_fops = {
1155 .owner = THIS_MODULE,
1156 .open = drm_open,
1157 .release = drm_release,
1158 .unlocked_ioctl = psb_unlocked_ioctl,
1159 .mmap = drm_gem_mmap,
1160 .poll = drm_poll,
1161 .fasync = drm_fasync,
1162 .read = drm_read,
1163};
1164
1154static struct drm_driver driver = { 1165static struct drm_driver driver = {
1155 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 1166 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
1156 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM , 1167 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
@@ -1179,17 +1190,7 @@ static struct drm_driver driver = {
1179 .dumb_create = psb_gem_dumb_create, 1190 .dumb_create = psb_gem_dumb_create,
1180 .dumb_map_offset = psb_gem_dumb_map_gtt, 1191 .dumb_map_offset = psb_gem_dumb_map_gtt,
1181 .dumb_destroy = psb_gem_dumb_destroy, 1192 .dumb_destroy = psb_gem_dumb_destroy,
1182 1193 .fops = &gma500_driver_fops,
1183 .fops = {
1184 .owner = THIS_MODULE,
1185 .open = drm_open,
1186 .release = drm_release,
1187 .unlocked_ioctl = psb_unlocked_ioctl,
1188 .mmap = drm_gem_mmap,
1189 .poll = drm_poll,
1190 .fasync = drm_fasync,
1191 .read = drm_read,
1192 },
1193 .name = DRIVER_NAME, 1194 .name = DRIVER_NAME,
1194 .desc = DRIVER_DESC, 1195 .desc = DRIVER_DESC,
1195 .date = PSB_DRM_DRIVER_DATE, 1196 .date = PSB_DRM_DRIVER_DATE,
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
index caa9d86f26d..85659613ae6 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/staging/gma500/psb_intel_display.c
@@ -367,9 +367,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
367 goto psb_intel_pipe_set_base_exit; 367 goto psb_intel_pipe_set_base_exit;
368 start = psbfb->gtt->offset; 368 start = psbfb->gtt->offset;
369 369
370 offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 370 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
371 371
372 REG_WRITE(dspstride, crtc->fb->pitch); 372 REG_WRITE(dspstride, crtc->fb->pitches[0]);
373 373
374 dspcntr = REG_READ(dspcntr_reg); 374 dspcntr = REG_READ(dspcntr_reg);
375 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 375 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 284798aaf8b..19e6a204137 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
153 char *m = NULL; 153 char *m = NULL;
154 unsigned int repeat = 3; 154 unsigned int repeat = 3;
155 155
156 nr_tbl = swioltb_nr_tbl(); 156 nr_tbl = swiotlb_nr_tbl();
157 if (nr_tbl) 157 if (nr_tbl)
158 xen_io_tlb_nslabs = nr_tbl; 158 xen_io_tlb_nslabs = nr_tbl;
159 else { 159 else {
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index 3a60ac88952..a5c0e10fd47 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -1,4 +1,5 @@
1header-y += drm.h 1header-y += drm.h
2header-y += drm_fourcc.h
2header-y += drm_mode.h 3header-y += drm_mode.h
3header-y += drm_sarea.h 4header-y += drm_sarea.h
4header-y += i810_drm.h 5header-y += i810_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 4be33b4ca2f..49d94ede2ec 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -714,6 +714,10 @@ struct drm_get_cap {
714#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) 714#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
715#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) 715#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
716#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) 716#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
717#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
718#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
719#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
720#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
717 721
718/** 722/**
719 * Device specific ioctls should only be in their respective headers 723 * Device specific ioctls should only be in their respective headers
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e8acca892af..76caa67c22e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -918,7 +918,7 @@ struct drm_driver {
918 int dev_priv_size; 918 int dev_priv_size;
919 struct drm_ioctl_desc *ioctls; 919 struct drm_ioctl_desc *ioctls;
920 int num_ioctls; 920 int num_ioctls;
921 struct file_operations fops; 921 const struct file_operations *fops;
922 union { 922 union {
923 struct pci_driver *pci; 923 struct pci_driver *pci;
924 struct platform_device *platform_device; 924 struct platform_device *platform_device;
@@ -1696,5 +1696,13 @@ extern void drm_platform_exit(struct drm_driver *driver, struct platform_device
1696extern int drm_get_platform_dev(struct platform_device *pdev, 1696extern int drm_get_platform_dev(struct platform_device *pdev,
1697 struct drm_driver *driver); 1697 struct drm_driver *driver);
1698 1698
1699/* returns true if currently okay to sleep */
1700static __inline__ bool drm_can_sleep(void)
1701{
1702 if (in_atomic() || in_dbg_master() || irqs_disabled())
1703 return false;
1704 return true;
1705}
1706
1699#endif /* __KERNEL__ */ 1707#endif /* __KERNEL__ */
1700#endif 1708#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 80207980928..63e4fce6728 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -29,9 +29,10 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/idr.h> 31#include <linux/idr.h>
32
33#include <linux/fb.h> 32#include <linux/fb.h>
34 33
34#include <drm/drm_fourcc.h>
35
35struct drm_device; 36struct drm_device;
36struct drm_mode_set; 37struct drm_mode_set;
37struct drm_framebuffer; 38struct drm_framebuffer;
@@ -44,6 +45,7 @@ struct drm_framebuffer;
44#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 45#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
45#define DRM_MODE_OBJECT_FB 0xfbfbfbfb 46#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
46#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 47#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
48#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
47 49
48struct drm_mode_object { 50struct drm_mode_object {
49 uint32_t id; 51 uint32_t id;
@@ -118,7 +120,6 @@ struct drm_display_mode {
118 120
119 char name[DRM_DISPLAY_MODE_LEN]; 121 char name[DRM_DISPLAY_MODE_LEN];
120 122
121 int connector_count;
122 enum drm_mode_status status; 123 enum drm_mode_status status;
123 int type; 124 int type;
124 125
@@ -238,13 +239,15 @@ struct drm_framebuffer {
238 struct list_head head; 239 struct list_head head;
239 struct drm_mode_object base; 240 struct drm_mode_object base;
240 const struct drm_framebuffer_funcs *funcs; 241 const struct drm_framebuffer_funcs *funcs;
241 unsigned int pitch; 242 unsigned int pitches[4];
243 unsigned int offsets[4];
242 unsigned int width; 244 unsigned int width;
243 unsigned int height; 245 unsigned int height;
244 /* depth can be 15 or 16 */ 246 /* depth can be 15 or 16 */
245 unsigned int depth; 247 unsigned int depth;
246 int bits_per_pixel; 248 int bits_per_pixel;
247 int flags; 249 int flags;
250 uint32_t pixel_format; /* fourcc format */
248 struct list_head filp_head; 251 struct list_head filp_head;
249 /* if you are using the helper */ 252 /* if you are using the helper */
250 void *helper_private; 253 void *helper_private;
@@ -278,6 +281,7 @@ struct drm_crtc;
278struct drm_connector; 281struct drm_connector;
279struct drm_encoder; 282struct drm_encoder;
280struct drm_pending_vblank_event; 283struct drm_pending_vblank_event;
284struct drm_plane;
281 285
282/** 286/**
283 * drm_crtc_funcs - control CRTCs for a given device 287 * drm_crtc_funcs - control CRTCs for a given device
@@ -341,10 +345,21 @@ struct drm_crtc_funcs {
341 345
342/** 346/**
343 * drm_crtc - central CRTC control structure 347 * drm_crtc - central CRTC control structure
348 * @dev: parent DRM device
349 * @head: list management
350 * @base: base KMS object for ID tracking etc.
344 * @enabled: is this CRTC enabled? 351 * @enabled: is this CRTC enabled?
352 * @mode: current mode timings
353 * @hwmode: mode timings as programmed to hw regs
345 * @x: x position on screen 354 * @x: x position on screen
346 * @y: y position on screen 355 * @y: y position on screen
347 * @funcs: CRTC control functions 356 * @funcs: CRTC control functions
357 * @gamma_size: size of gamma ramp
358 * @gamma_store: gamma ramp values
359 * @framedur_ns: precise frame timing
360 * @framedur_ns: precise line timing
361 * @pixeldur_ns: precise pixel timing
362 * @helper_private: mid-layer private data
348 * 363 *
349 * Each CRTC may have one or more connectors associated with it. This structure 364 * Each CRTC may have one or more connectors associated with it. This structure
350 * allows the CRTC to be controlled. 365 * allows the CRTC to be controlled.
@@ -423,6 +438,13 @@ struct drm_connector_funcs {
423 void (*force)(struct drm_connector *connector); 438 void (*force)(struct drm_connector *connector);
424}; 439};
425 440
441/**
442 * drm_encoder_funcs - encoder controls
443 * @reset: reset state (e.g. at init or resume time)
444 * @destroy: cleanup and free associated data
445 *
446 * Encoders sit between CRTCs and connectors.
447 */
426struct drm_encoder_funcs { 448struct drm_encoder_funcs {
427 void (*reset)(struct drm_encoder *encoder); 449 void (*reset)(struct drm_encoder *encoder);
428 void (*destroy)(struct drm_encoder *encoder); 450 void (*destroy)(struct drm_encoder *encoder);
@@ -435,6 +457,18 @@ struct drm_encoder_funcs {
435 457
436/** 458/**
437 * drm_encoder - central DRM encoder structure 459 * drm_encoder - central DRM encoder structure
460 * @dev: parent DRM device
461 * @head: list management
462 * @base: base KMS object
463 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
464 * @possible_crtcs: bitmask of potential CRTC bindings
465 * @possible_clones: bitmask of potential sibling encoders for cloning
466 * @crtc: currently bound CRTC
467 * @funcs: control functions
468 * @helper_private: mid-layer private data
469 *
470 * CRTCs drive pixels to encoders, which convert them into signals
471 * appropriate for a given connector or set of connectors.
438 */ 472 */
439struct drm_encoder { 473struct drm_encoder {
440 struct drm_device *dev; 474 struct drm_device *dev;
@@ -470,14 +504,37 @@ enum drm_connector_force {
470 504
471/** 505/**
472 * drm_connector - central DRM connector control structure 506 * drm_connector - central DRM connector control structure
473 * @crtc: CRTC this connector is currently connected to, NULL if none 507 * @dev: parent DRM device
508 * @kdev: kernel device for sysfs attributes
509 * @attr: sysfs attributes
510 * @head: list management
511 * @base: base KMS object
512 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
513 * @connector_type_id: index into connector type enum
474 * @interlace_allowed: can this connector handle interlaced modes? 514 * @interlace_allowed: can this connector handle interlaced modes?
475 * @doublescan_allowed: can this connector handle doublescan? 515 * @doublescan_allowed: can this connector handle doublescan?
476 * @available_modes: modes available on this connector (from get_modes() + user) 516 * @modes: modes available on this connector (from fill_modes() + user)
477 * @initial_x: initial x position for this connector 517 * @status: one of the drm_connector_status enums (connected, not, or unknown)
478 * @initial_y: initial y position for this connector 518 * @probed_modes: list of modes derived directly from the display
479 * @status: connector connected? 519 * @display_info: information about attached display (e.g. from EDID)
480 * @funcs: connector control functions 520 * @funcs: connector control functions
521 * @user_modes: user added mode list
522 * @edid_blob_ptr: DRM property containing EDID if present
523 * @property_ids: property tracking for this connector
524 * @property_values: value pointers or data for properties
525 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
526 * @dpms: current dpms state
527 * @helper_private: mid-layer private data
528 * @force: a %DRM_FORCE_<foo> state for forced mode sets
529 * @encoder_ids: valid encoders for this connector
530 * @encoder: encoder driving this connector, if any
531 * @eld: EDID-like data, if present
532 * @dvi_dual: dual link DVI, if found
533 * @max_tmds_clock: max clock rate, if found
534 * @latency_present: AV delay info from ELD, if found
535 * @video_latency: video latency info from ELD, if found
536 * @audio_latency: audio latency info from ELD, if found
537 * @null_edid_counter: track sinks that give us all zeros for the EDID
481 * 538 *
482 * Each connector may be connected to one or more CRTCs, or may be clonable by 539 * Each connector may be connected to one or more CRTCs, or may be clonable by
483 * another connector if they can share a CRTC. Each connector also has a specific 540 * another connector if they can share a CRTC. Each connector also has a specific
@@ -498,7 +555,6 @@ struct drm_connector {
498 bool doublescan_allowed; 555 bool doublescan_allowed;
499 struct list_head modes; /* list of modes on this connector */ 556 struct list_head modes; /* list of modes on this connector */
500 557
501 int initial_x, initial_y;
502 enum drm_connector_status status; 558 enum drm_connector_status status;
503 559
504 /* these are modes added by probing with DDC or the BIOS */ 560 /* these are modes added by probing with DDC or the BIOS */
@@ -522,7 +578,6 @@ struct drm_connector {
522 /* forced on connector */ 578 /* forced on connector */
523 enum drm_connector_force force; 579 enum drm_connector_force force;
524 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 580 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
525 uint32_t force_encoder_id;
526 struct drm_encoder *encoder; /* currently active encoder */ 581 struct drm_encoder *encoder; /* currently active encoder */
527 582
528 /* EDID bits */ 583 /* EDID bits */
@@ -536,7 +591,71 @@ struct drm_connector {
536}; 591};
537 592
538/** 593/**
539 * struct drm_mode_set 594 * drm_plane_funcs - driver plane control functions
595 * @update_plane: update the plane configuration
596 * @disable_plane: shut down the plane
597 * @destroy: clean up plane resources
598 */
599struct drm_plane_funcs {
600 int (*update_plane)(struct drm_plane *plane,
601 struct drm_crtc *crtc, struct drm_framebuffer *fb,
602 int crtc_x, int crtc_y,
603 unsigned int crtc_w, unsigned int crtc_h,
604 uint32_t src_x, uint32_t src_y,
605 uint32_t src_w, uint32_t src_h);
606 int (*disable_plane)(struct drm_plane *plane);
607 void (*destroy)(struct drm_plane *plane);
608};
609
610/**
611 * drm_plane - central DRM plane control structure
612 * @dev: DRM device this plane belongs to
613 * @head: for list management
614 * @base: base mode object
615 * @possible_crtcs: pipes this plane can be bound to
616 * @format_types: array of formats supported by this plane
617 * @format_count: number of formats supported
618 * @crtc: currently bound CRTC
619 * @fb: currently bound fb
620 * @gamma_size: size of gamma table
621 * @gamma_store: gamma correction table
622 * @enabled: enabled flag
623 * @funcs: helper functions
624 * @helper_private: storage for drver layer
625 */
626struct drm_plane {
627 struct drm_device *dev;
628 struct list_head head;
629
630 struct drm_mode_object base;
631
632 uint32_t possible_crtcs;
633 uint32_t *format_types;
634 uint32_t format_count;
635
636 struct drm_crtc *crtc;
637 struct drm_framebuffer *fb;
638
639 /* CRTC gamma size for reporting to userspace */
640 uint32_t gamma_size;
641 uint16_t *gamma_store;
642
643 bool enabled;
644
645 const struct drm_plane_funcs *funcs;
646 void *helper_private;
647};
648
649/**
650 * drm_mode_set - new values for a CRTC config change
651 * @head: list management
652 * @fb: framebuffer to use for new config
653 * @crtc: CRTC whose configuration we're about to change
654 * @mode: mode timings to use
655 * @x: position of this CRTC relative to @fb
656 * @y: position of this CRTC relative to @fb
657 * @connectors: array of connectors to drive with this CRTC if possible
658 * @num_connectors: size of @connectors array
540 * 659 *
541 * Represents a single crtc the connectors that it drives with what mode 660 * Represents a single crtc the connectors that it drives with what mode
542 * and from which framebuffer it scans out from. 661 * and from which framebuffer it scans out from.
@@ -558,13 +677,33 @@ struct drm_mode_set {
558}; 677};
559 678
560/** 679/**
561 * struct drm_mode_config_funcs - configure CRTCs for a given screen layout 680 * struct drm_mode_config_funcs - basic driver provided mode setting functions
681 * @fb_create: create a new framebuffer object
682 * @output_poll_changed: function to handle output configuration changes
683 *
684 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
685 * involve drivers.
562 */ 686 */
563struct drm_mode_config_funcs { 687struct drm_mode_config_funcs {
564 struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); 688 struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
689 struct drm_file *file_priv,
690 struct drm_mode_fb_cmd2 *mode_cmd);
565 void (*output_poll_changed)(struct drm_device *dev); 691 void (*output_poll_changed)(struct drm_device *dev);
566}; 692};
567 693
694/**
695 * drm_mode_group - group of mode setting resources for potential sub-grouping
696 * @num_crtcs: CRTC count
697 * @num_encoders: encoder count
698 * @num_connectors: connector count
699 * @id_list: list of KMS object IDs in this group
700 *
701 * Currently this simply tracks the global mode setting state. But in the
702 * future it could allow groups of objects to be set aside into independent
703 * control groups for use by different user level processes (e.g. two X servers
704 * running simultaneously on different heads, each with their own mode
705 * configuration and freedom of mode setting).
706 */
568struct drm_mode_group { 707struct drm_mode_group {
569 uint32_t num_crtcs; 708 uint32_t num_crtcs;
570 uint32_t num_encoders; 709 uint32_t num_encoders;
@@ -576,7 +715,30 @@ struct drm_mode_group {
576 715
577/** 716/**
578 * drm_mode_config - Mode configuration control structure 717 * drm_mode_config - Mode configuration control structure
718 * @mutex: mutex protecting KMS related lists and structures
719 * @idr_mutex: mutex for KMS ID allocation and management
720 * @crtc_idr: main KMS ID tracking object
721 * @num_fb: number of fbs available
722 * @fb_list: list of framebuffers available
723 * @num_connector: number of connectors on this device
724 * @connector_list: list of connector objects
725 * @num_encoder: number of encoders on this device
726 * @encoder_list: list of encoder objects
727 * @num_crtc: number of CRTCs on this device
728 * @crtc_list: list of CRTC objects
729 * @min_width: minimum pixel width on this device
730 * @min_height: minimum pixel height on this device
731 * @max_width: maximum pixel width on this device
732 * @max_height: maximum pixel height on this device
733 * @funcs: core driver provided mode setting functions
734 * @fb_base: base address of the framebuffer
735 * @poll_enabled: track polling status for this device
736 * @output_poll_work: delayed work for polling in process context
737 * @*_property: core property tracking
579 * 738 *
739 * Core mode resource tracking structure. All CRTC, encoders, and connectors
740 * enumerated by the driver are added here, as are global properties. Some
741 * global restrictions are also here, e.g. dimension restrictions.
580 */ 742 */
581struct drm_mode_config { 743struct drm_mode_config {
582 struct mutex mutex; /* protects configuration (mode lists etc.) */ 744 struct mutex mutex; /* protects configuration (mode lists etc.) */
@@ -589,6 +751,8 @@ struct drm_mode_config {
589 struct list_head connector_list; 751 struct list_head connector_list;
590 int num_encoder; 752 int num_encoder;
591 struct list_head encoder_list; 753 struct list_head encoder_list;
754 int num_plane;
755 struct list_head plane_list;
592 756
593 int num_crtc; 757 int num_crtc;
594 struct list_head crtc_list; 758 struct list_head crtc_list;
@@ -641,6 +805,7 @@ struct drm_mode_config {
641#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) 805#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
642#define obj_to_property(x) container_of(x, struct drm_property, base) 806#define obj_to_property(x) container_of(x, struct drm_property, base)
643#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) 807#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
808#define obj_to_plane(x) container_of(x, struct drm_plane, base)
644 809
645 810
646extern void drm_crtc_init(struct drm_device *dev, 811extern void drm_crtc_init(struct drm_device *dev,
@@ -660,6 +825,14 @@ extern void drm_encoder_init(struct drm_device *dev,
660 const struct drm_encoder_funcs *funcs, 825 const struct drm_encoder_funcs *funcs,
661 int encoder_type); 826 int encoder_type);
662 827
828extern int drm_plane_init(struct drm_device *dev,
829 struct drm_plane *plane,
830 unsigned long possible_crtcs,
831 const struct drm_plane_funcs *funcs,
832 const uint32_t *formats, uint32_t format_count,
833 bool priv);
834extern void drm_plane_cleanup(struct drm_plane *plane);
835
663extern void drm_encoder_cleanup(struct drm_encoder *encoder); 836extern void drm_encoder_cleanup(struct drm_encoder *encoder);
664 837
665extern char *drm_get_connector_name(struct drm_connector *connector); 838extern char *drm_get_connector_name(struct drm_connector *connector);
@@ -753,17 +926,25 @@ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
753/* IOCTLs */ 926/* IOCTLs */
754extern int drm_mode_getresources(struct drm_device *dev, 927extern int drm_mode_getresources(struct drm_device *dev,
755 void *data, struct drm_file *file_priv); 928 void *data, struct drm_file *file_priv);
756 929extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
930 struct drm_file *file_priv);
757extern int drm_mode_getcrtc(struct drm_device *dev, 931extern int drm_mode_getcrtc(struct drm_device *dev,
758 void *data, struct drm_file *file_priv); 932 void *data, struct drm_file *file_priv);
759extern int drm_mode_getconnector(struct drm_device *dev, 933extern int drm_mode_getconnector(struct drm_device *dev,
760 void *data, struct drm_file *file_priv); 934 void *data, struct drm_file *file_priv);
761extern int drm_mode_setcrtc(struct drm_device *dev, 935extern int drm_mode_setcrtc(struct drm_device *dev,
762 void *data, struct drm_file *file_priv); 936 void *data, struct drm_file *file_priv);
937extern int drm_mode_getplane(struct drm_device *dev,
938 void *data, struct drm_file *file_priv);
939extern int drm_mode_setplane(struct drm_device *dev,
940 void *data, struct drm_file *file_priv);
763extern int drm_mode_cursor_ioctl(struct drm_device *dev, 941extern int drm_mode_cursor_ioctl(struct drm_device *dev,
764 void *data, struct drm_file *file_priv); 942 void *data, struct drm_file *file_priv);
765extern int drm_mode_addfb(struct drm_device *dev, 943extern int drm_mode_addfb(struct drm_device *dev,
766 void *data, struct drm_file *file_priv); 944 void *data, struct drm_file *file_priv);
945extern int drm_mode_addfb2(struct drm_device *dev,
946 void *data, struct drm_file *file_priv);
947extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
767extern int drm_mode_rmfb(struct drm_device *dev, 948extern int drm_mode_rmfb(struct drm_device *dev,
768 void *data, struct drm_file *file_priv); 949 void *data, struct drm_file *file_priv);
769extern int drm_mode_getfb(struct drm_device *dev, 950extern int drm_mode_getfb(struct drm_device *dev,
@@ -824,4 +1005,7 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
824 void *data, struct drm_file *file_priv); 1005 void *data, struct drm_file *file_priv);
825extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, 1006extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
826 void *data, struct drm_file *file_priv); 1007 void *data, struct drm_file *file_priv);
1008
1009extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
1010 int *bpp);
827#endif /* __DRM_CRTC_H__ */ 1011#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 73b071203dc..37515d1afab 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -117,7 +117,7 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
117extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); 117extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
118 118
119extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 119extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
120 struct drm_mode_fb_cmd *mode_cmd); 120 struct drm_mode_fb_cmd2 *mode_cmd);
121 121
122static inline void drm_crtc_helper_add(struct drm_crtc *crtc, 122static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
123 const struct drm_crtc_helper_funcs *funcs) 123 const struct drm_crtc_helper_funcs *funcs)
@@ -144,4 +144,7 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
144 144
145extern void drm_kms_helper_poll_disable(struct drm_device *dev); 145extern void drm_kms_helper_poll_disable(struct drm_device *dev);
146extern void drm_kms_helper_poll_enable(struct drm_device *dev); 146extern void drm_kms_helper_poll_enable(struct drm_device *dev);
147
148extern int drm_format_num_planes(uint32_t format);
149
147#endif 150#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644
index 00000000000..bdf0152cbbe
--- /dev/null
+++ b/include/drm/drm_fourcc.h
@@ -0,0 +1,137 @@
1/*
2 * Copyright 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef DRM_FOURCC_H
25#define DRM_FOURCC_H
26
27#include <linux/types.h>
28
29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
31
32#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
33
34/* color index */
35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
36
37/* 8 bpp RGB */
38#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
39#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
40
41/* 16 bpp RGB */
42#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
43#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
44#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
45#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
46
47#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
48#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
49#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
50#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
51
52#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
53#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
54#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
55#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
56
57#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
58#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
59#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
60#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
61
62#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
63#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
64
65/* 24 bpp RGB */
66#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
67#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
68
69/* 32 bpp RGB */
70#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
71#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
72#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
73#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
74
75#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
76#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
77#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
78#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
79
80#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
81#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
82#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
83#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
84
85#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
86#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
87#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
88#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
89
90/* packed YCbCr */
91#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
92#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
93#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
94#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
95
96#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
97
98/*
99 * 2 plane YCbCr
100 * index 0 = Y plane, [7:0] Y
101 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
102 * or
103 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
104 */
105#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
106#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
107#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
108#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
109
110/* 2 non contiguous plane YCbCr */
111#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
112#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
113
114/*
115 * 3 plane YCbCr
116 * index 0: Y plane, [7:0] Y
117 * index 1: Cb plane, [7:0] Cb
118 * index 2: Cr plane, [7:0] Cr
119 * or
120 * index 1: Cr plane, [7:0] Cr
121 * index 2: Cb plane, [7:0] Cb
122 */
123#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
124#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
125#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
126#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
127#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
128#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
129#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
130#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
131#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
132#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
133
134/* 3 non contiguous plane YCbCr */
135#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
136
137#endif /* DRM_FOURCC_H */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index ddd46db65b5..2a2acda8b43 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -120,11 +120,48 @@ struct drm_mode_crtc {
120 struct drm_mode_modeinfo mode; 120 struct drm_mode_modeinfo mode;
121}; 121};
122 122
123#define DRM_MODE_ENCODER_NONE 0 123#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
124#define DRM_MODE_ENCODER_DAC 1 124#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
125#define DRM_MODE_ENCODER_TMDS 2 125
126#define DRM_MODE_ENCODER_LVDS 3 126/* Planes blend with or override other bits on the CRTC */
127#define DRM_MODE_ENCODER_TVDAC 4 127struct drm_mode_set_plane {
128 __u32 plane_id;
129 __u32 crtc_id;
130 __u32 fb_id; /* fb object contains surface format type */
131 __u32 flags; /* see above flags */
132
133 /* Signed dest location allows it to be partially off screen */
134 __s32 crtc_x, crtc_y;
135 __u32 crtc_w, crtc_h;
136
137 /* Source values are 16.16 fixed point */
138 __u32 src_x, src_y;
139 __u32 src_h, src_w;
140};
141
142struct drm_mode_get_plane {
143 __u32 plane_id;
144
145 __u32 crtc_id;
146 __u32 fb_id;
147
148 __u32 possible_crtcs;
149 __u32 gamma_size;
150
151 __u32 count_format_types;
152 __u64 format_type_ptr;
153};
154
155struct drm_mode_get_plane_res {
156 __u64 plane_id_ptr;
157 __u32 count_planes;
158};
159
160#define DRM_MODE_ENCODER_NONE 0
161#define DRM_MODE_ENCODER_DAC 1
162#define DRM_MODE_ENCODER_TMDS 2
163#define DRM_MODE_ENCODER_LVDS 3
164#define DRM_MODE_ENCODER_TVDAC 4
128#define DRM_MODE_ENCODER_VIRTUAL 5 165#define DRM_MODE_ENCODER_VIRTUAL 5
129 166
130struct drm_mode_get_encoder { 167struct drm_mode_get_encoder {
@@ -231,6 +268,33 @@ struct drm_mode_fb_cmd {
231 __u32 handle; 268 __u32 handle;
232}; 269};
233 270
271#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
272
273struct drm_mode_fb_cmd2 {
274 __u32 fb_id;
275 __u32 width, height;
276 __u32 pixel_format; /* fourcc code from drm_fourcc.h */
277 __u32 flags; /* see above flags */
278
279 /*
280 * In case of planar formats, this ioctl allows up to 4
281 * buffer objects with offets and pitches per plane.
282 * The pitch and offset order is dictated by the fourcc,
283 * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
284 *
285 * YUV 4:2:0 image with a plane of 8 bit Y samples
286 * followed by an interleaved U/V plane containing
287 * 8 bit 2x2 subsampled colour difference samples.
288 *
289 * So it would consist of Y as offset[0] and UV as
290 * offeset[1]. Note that offset[0] will generally
291 * be 0.
292 */
293 __u32 handles[4];
294 __u32 pitches[4]; /* pitch for each plane */
295 __u32 offsets[4]; /* offset of each plane */
296};
297
234#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 298#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
235#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 299#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
236#define DRM_MODE_FB_DIRTY_FLAGS 0x03 300#define DRM_MODE_FB_DIRTY_FLAGS 0x03
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
deleted file mode 100644
index 08ecf83ad5d..00000000000
--- a/include/drm/drm_sman.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory MANager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#ifndef DRM_SMAN_H
40#define DRM_SMAN_H
41
42#include "drmP.h"
43#include "drm_hashtab.h"
44
45/*
46 * A class that is an abstration of a simple memory allocator.
47 * The sman implementation provides a default such allocator
48 * using the drm_mm.c implementation. But the user can replace it.
49 * See the SiS implementation, which may use the SiS FB kernel module
50 * for memory management.
51 */
52
53struct drm_sman_mm {
54 /* private info. If allocated, needs to be destroyed by the destroy
55 function */
56 void *private;
57
58 /* Allocate a memory block with given size and alignment.
59 Return an opaque reference to the memory block */
60
61 void *(*allocate) (void *private, unsigned long size,
62 unsigned alignment);
63
64 /* Free a memory block. "ref" is the opaque reference that we got from
65 the "alloc" function */
66
67 void (*free) (void *private, void *ref);
68
69 /* Free all resources associated with this allocator */
70
71 void (*destroy) (void *private);
72
73 /* Return a memory offset from the opaque reference returned from the
74 "alloc" function */
75
76 unsigned long (*offset) (void *private, void *ref);
77};
78
79struct drm_memblock_item {
80 struct list_head owner_list;
81 struct drm_hash_item user_hash;
82 void *mm_info;
83 struct drm_sman_mm *mm;
84 struct drm_sman *sman;
85};
86
87struct drm_sman {
88 struct drm_sman_mm *mm;
89 int num_managers;
90 struct drm_open_hash owner_hash_tab;
91 struct drm_open_hash user_hash_tab;
92 struct list_head owner_items;
93};
94
95/*
96 * Take down a memory manager. This function should only be called after a
97 * successful init and after a call to drm_sman_cleanup.
98 */
99
100extern void drm_sman_takedown(struct drm_sman * sman);
101
102/*
103 * Allocate structures for a manager.
104 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
105 * user_order is the log2 of the number of buckets in the user hash table.
106 * set this to approximately log2 of the max number of memory regions
107 * that will be allocated for _all_ pools together.
108 * owner_order is the log2 of the number of buckets in the owner hash table.
109 * set this to approximately log2 of
110 * the number of client file connections that will
111 * be using the manager.
112 *
113 */
114
115extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
116 unsigned int user_order, unsigned int owner_order);
117
118/*
119 * Initialize a drm_mm.c allocator. Should be called only once for each
120 * manager unless a customized allogator is used.
121 */
122
123extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
124 unsigned long start, unsigned long size);
125
126/*
127 * Initialize a customized allocator for one of the managers.
128 * (See the SiS module). The object pointed to by "allocator" is copied,
129 * so it can be destroyed after this call.
130 */
131
132extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
133 struct drm_sman_mm * allocator);
134
135/*
136 * Allocate a memory block. Aligment is not implemented yet.
137 */
138
139extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
140 unsigned int manager,
141 unsigned long size,
142 unsigned alignment,
143 unsigned long owner);
144/*
145 * Free a memory block identified by its user hash key.
146 */
147
148extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
149
150/*
151 * returns 1 iff there are no stale memory blocks associated with this owner.
152 * Typically called to determine if we need to idle the hardware and call
153 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
154 * resources associated with owner.
155 */
156
157extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
158
159/*
160 * Frees all stale memory blocks associated with this owner. Note that this
161 * requires that the hardware is finished with all blocks, so the graphics engine
162 * should be idled before this call is made. This function also frees
163 * any resources associated with "owner" and should be called when owner
164 * is not going to be referenced anymore.
165 */
166
167extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
168
169/*
170 * Frees all stale memory blocks associated with the memory manager.
171 * See idling above.
172 */
173
174extern void drm_sman_cleanup(struct drm_sman * sman);
175
176#endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 12050434d57..5e120f1c5cd 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -74,9 +74,16 @@ struct drm_exynos_gem_mmap {
74 uint64_t mapped; 74 uint64_t mapped;
75}; 75};
76 76
77struct drm_exynos_plane_set_zpos {
78 __u32 plane_id;
79 __s32 zpos;
80};
81
77#define DRM_EXYNOS_GEM_CREATE 0x00 82#define DRM_EXYNOS_GEM_CREATE 0x00
78#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 83#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
79#define DRM_EXYNOS_GEM_MMAP 0x02 84#define DRM_EXYNOS_GEM_MMAP 0x02
85/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
86#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
80 87
81#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 88#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
82 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 89 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -87,6 +94,9 @@ struct drm_exynos_gem_mmap {
87#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \ 94#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
88 DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap) 95 DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
89 96
97#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
98 DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
99
90/** 100/**
91 * Platform Specific Structure for DRM based FIMD. 101 * Platform Specific Structure for DRM based FIMD.
92 * 102 *
@@ -102,4 +112,31 @@ struct exynos_drm_fimd_pdata {
102 unsigned int bpp; 112 unsigned int bpp;
103}; 113};
104 114
115/**
116 * Platform Specific Structure for DRM based HDMI.
117 *
118 * @hdmi_dev: device point to specific hdmi driver.
119 * @mixer_dev: device point to specific mixer driver.
120 *
121 * this structure is used for common hdmi driver and each device object
122 * would be used to access specific device driver(hdmi or mixer driver)
123 */
124struct exynos_drm_common_hdmi_pd {
125 struct device *hdmi_dev;
126 struct device *mixer_dev;
127};
128
129/**
130 * Platform Specific Structure for DRM based HDMI core.
131 *
132 * @timing: default video mode for initializing
133 * @default_win: default window layer number to be used for UI.
134 * @bpp: default bit per pixel.
135 */
136struct exynos_drm_hdmi_pdata {
137 struct fb_videomode timing;
138 unsigned int default_win;
139 unsigned int bpp;
140};
141
105#endif 142#endif
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
new file mode 100644
index 00000000000..11368678571
--- /dev/null
+++ b/include/drm/gma_drm.h
@@ -0,0 +1,91 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 **************************************************************************/
21
22#ifndef _PSB_DRM_H_
23#define _PSB_DRM_H_
24
25/*
26 * Manage the LUT for an output
27 */
28struct drm_psb_dpst_lut_arg {
29 uint8_t lut[256];
30 int output_id;
31};
32
33/*
34 * Validate modes
35 */
36struct drm_psb_mode_operation_arg {
37 u32 obj_id;
38 u16 operation;
39 struct drm_mode_modeinfo mode;
40 u64 data;
41};
42
43/*
44 * Query the stolen memory for smarter management of
45 * memory by the server
46 */
47struct drm_psb_stolen_memory_arg {
48 u32 base;
49 u32 size;
50};
51
52struct drm_psb_get_pipe_from_crtc_id_arg {
53 /** ID of CRTC being requested **/
54 u32 crtc_id;
55 /** pipe of requested CRTC **/
56 u32 pipe;
57};
58
59struct drm_psb_gem_create {
60 __u64 size;
61 __u32 handle;
62 __u32 flags;
63#define GMA_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
64};
65
66struct drm_psb_gem_mmap {
67 __u32 handle;
68 __u32 pad;
69 /**
70 * Fake offset to use for subsequent mmap call
71 *
72 * This is a fixed-size type for 32/64 compatibility.
73 */
74 __u64 offset;
75};
76
77/* Controlling the kernel modesetting buffers */
78
79#define DRM_GMA_GEM_CREATE 0x00 /* Create a GEM object */
80#define DRM_GMA_GEM_MMAP 0x01 /* Map GEM memory */
81#define DRM_GMA_STOLEN_MEMORY 0x02 /* Report stolen memory */
82#define DRM_GMA_2D_OP 0x03 /* Will be merged later */
83#define DRM_GMA_GAMMA 0x04 /* Set gamma table */
84#define DRM_GMA_ADB 0x05 /* Get backlight */
85#define DRM_GMA_DPST_BL 0x06 /* Set backlight */
86#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1 /* CRTC to physical pipe# */
87#define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */
88#define PSB_MODE_OPERATION_MODE_VALID 0x01
89
90
91#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 28c0d114cb5..924f6a454fe 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
198#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 198#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
199#define DRM_I915_OVERLAY_ATTRS 0x28 199#define DRM_I915_OVERLAY_ATTRS 0x28
200#define DRM_I915_GEM_EXECBUFFER2 0x29 200#define DRM_I915_GEM_EXECBUFFER2 0x29
201#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
202#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
201 203
202#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 204#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
203#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 205#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
239#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 241#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
240#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 242#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
241#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 243#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
244#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
245#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
242 246
243/* Allow drivers to submit batchbuffers directly to hardware, relying 247/* Allow drivers to submit batchbuffers directly to hardware, relying
244 * on the security mechanisms provided by hardware. 248 * on the security mechanisms provided by hardware.
@@ -291,6 +295,7 @@ typedef struct drm_i915_irq_wait {
291#define I915_PARAM_HAS_COHERENT_RINGS 13 295#define I915_PARAM_HAS_COHERENT_RINGS 13
292#define I915_PARAM_HAS_EXEC_CONSTANTS 14 296#define I915_PARAM_HAS_EXEC_CONSTANTS 14
293#define I915_PARAM_HAS_RELAXED_DELTA 15 297#define I915_PARAM_HAS_RELAXED_DELTA 15
298#define I915_PARAM_HAS_GEN7_SOL_RESET 16
294 299
295typedef struct drm_i915_getparam { 300typedef struct drm_i915_getparam {
296 int param; 301 int param;
@@ -653,6 +658,9 @@ struct drm_i915_gem_execbuffer2 {
653 __u64 rsvd2; 658 __u64 rsvd2;
654}; 659};
655 660
661/** Resets the SO write offset registers for transform feedback on gen7. */
662#define I915_EXEC_GEN7_SOL_RESET (1<<8)
663
656struct drm_i915_gem_pin { 664struct drm_i915_gem_pin {
657 /** Handle of the buffer to be pinned. */ 665 /** Handle of the buffer to be pinned. */
658 __u32 handle; 666 __u32 handle;
@@ -844,4 +852,36 @@ struct drm_intel_overlay_attrs {
844 __u32 gamma5; 852 __u32 gamma5;
845}; 853};
846 854
855/*
856 * Intel sprite handling
857 *
858 * Color keying works with a min/mask/max tuple. Both source and destination
859 * color keying is allowed.
860 *
861 * Source keying:
862 * Sprite pixels within the min & max values, masked against the color channels
863 * specified in the mask field, will be transparent. All other pixels will
864 * be displayed on top of the primary plane. For RGB surfaces, only the min
865 * and mask fields will be used; ranged compares are not allowed.
866 *
867 * Destination keying:
868 * Primary plane pixels that match the min value, masked against the color
869 * channels specified in the mask field, will be replaced by corresponding
870 * pixels from the sprite plane.
871 *
872 * Note that source & destination keying are exclusive; only one can be
873 * active on a given plane.
874 */
875
876#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
877#define I915_SET_COLORKEY_DESTINATION (1<<1)
878#define I915_SET_COLORKEY_SOURCE (1<<2)
879struct drm_intel_sprite_colorkey {
880 __u32 plane_id;
881 __u32 min_value;
882 __u32 channel_mask;
883 __u32 max_value;
884 __u32 flags;
885};
886
847#endif /* _I915_DRM_H_ */ 887#endif /* _I915_DRM_H_ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index be94be6d6f1..b55da40953f 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -509,6 +509,7 @@ typedef struct {
509#define DRM_RADEON_GEM_SET_TILING 0x28 509#define DRM_RADEON_GEM_SET_TILING 0x28
510#define DRM_RADEON_GEM_GET_TILING 0x29 510#define DRM_RADEON_GEM_GET_TILING 0x29
511#define DRM_RADEON_GEM_BUSY 0x2a 511#define DRM_RADEON_GEM_BUSY 0x2a
512#define DRM_RADEON_GEM_VA 0x2b
512 513
513#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 514#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
514#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 515#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -550,6 +551,7 @@ typedef struct {
550#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) 551#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
551#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) 552#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
552#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) 553#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
554#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
553 555
554typedef struct drm_radeon_init { 556typedef struct drm_radeon_init {
555 enum { 557 enum {
@@ -872,12 +874,39 @@ struct drm_radeon_gem_pwrite {
872 uint64_t data_ptr; 874 uint64_t data_ptr;
873}; 875};
874 876
877#define RADEON_VA_MAP 1
878#define RADEON_VA_UNMAP 2
879
880#define RADEON_VA_RESULT_OK 0
881#define RADEON_VA_RESULT_ERROR 1
882#define RADEON_VA_RESULT_VA_EXIST 2
883
884#define RADEON_VM_PAGE_VALID (1 << 0)
885#define RADEON_VM_PAGE_READABLE (1 << 1)
886#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
887#define RADEON_VM_PAGE_SYSTEM (1 << 3)
888#define RADEON_VM_PAGE_SNOOPED (1 << 4)
889
890struct drm_radeon_gem_va {
891 uint32_t handle;
892 uint32_t operation;
893 uint32_t vm_id;
894 uint32_t flags;
895 uint64_t offset;
896};
897
875#define RADEON_CHUNK_ID_RELOCS 0x01 898#define RADEON_CHUNK_ID_RELOCS 0x01
876#define RADEON_CHUNK_ID_IB 0x02 899#define RADEON_CHUNK_ID_IB 0x02
877#define RADEON_CHUNK_ID_FLAGS 0x03 900#define RADEON_CHUNK_ID_FLAGS 0x03
878 901
879/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ 902/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
880#define RADEON_CS_KEEP_TILING_FLAGS 0x01 903#define RADEON_CS_KEEP_TILING_FLAGS 0x01
904#define RADEON_CS_USE_VM 0x02
905/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
906#define RADEON_CS_RING_GFX 0
907#define RADEON_CS_RING_COMPUTE 1
908/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
909/* 0 = normal, + = higher priority, - = lower priority */
881 910
882struct drm_radeon_cs_chunk { 911struct drm_radeon_cs_chunk {
883 uint32_t chunk_id; 912 uint32_t chunk_id;
@@ -885,6 +914,9 @@ struct drm_radeon_cs_chunk {
885 uint64_t chunk_data; 914 uint64_t chunk_data;
886}; 915};
887 916
917/* drm_radeon_cs_reloc.flags */
918#define RADEON_RELOC_DONT_SYNC 0x01
919
888struct drm_radeon_cs_reloc { 920struct drm_radeon_cs_reloc {
889 uint32_t handle; 921 uint32_t handle;
890 uint32_t read_domains; 922 uint32_t read_domains;
@@ -916,6 +948,10 @@ struct drm_radeon_cs {
916#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ 948#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
917#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ 949#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
918#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */ 950#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */
951/* virtual address start, va < start are reserved by the kernel */
952#define RADEON_INFO_VA_START 0x0e
953/* maximum size of ib using the virtual memory cs */
954#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
919 955
920struct drm_radeon_info { 956struct drm_radeon_info {
921 uint32_t request; 957 uint32_t request;
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 30f7b382746..035b804dda6 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -64,4 +64,8 @@ typedef struct {
64 unsigned int offset, size; 64 unsigned int offset, size;
65} drm_sis_fb_t; 65} drm_sis_fb_t;
66 66
67struct sis_file_private {
68 struct list_head obj_list;
69};
70
67#endif /* __SIS_DRM_H__ */ 71#endif /* __SIS_DRM_H__ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 42e34698518..974c8f801c3 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -122,17 +122,12 @@ struct ttm_mem_reg {
122 * be mmapped by user space. Each of these bos occupy a slot in the 122 * be mmapped by user space. Each of these bos occupy a slot in the
123 * device address space, that can be used for normal vm operations. 123 * device address space, that can be used for normal vm operations.
124 * 124 *
125 * @ttm_bo_type_user: These are user-space memory areas that are made
126 * available to the GPU by mapping the buffer pages into the GPU aperture
127 * space. These buffers cannot be mmaped from the device address space.
128 *
129 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, 125 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
130 * but they cannot be accessed from user-space. For kernel-only use. 126 * but they cannot be accessed from user-space. For kernel-only use.
131 */ 127 */
132 128
133enum ttm_bo_type { 129enum ttm_bo_type {
134 ttm_bo_type_device, 130 ttm_bo_type_device,
135 ttm_bo_type_user,
136 ttm_bo_type_kernel 131 ttm_bo_type_kernel
137}; 132};
138 133
@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
434 * -EBUSY if the buffer is busy and no_wait is true. 429 * -EBUSY if the buffer is busy and no_wait is true.
435 * -ERESTARTSYS if interrupted by a signal. 430 * -ERESTARTSYS if interrupted by a signal.
436 */ 431 */
437
438extern int 432extern int
439ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); 433ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
434
440/** 435/**
441 * ttm_bo_synccpu_write_release: 436 * ttm_bo_synccpu_write_release:
442 * 437 *
@@ -447,6 +442,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
447extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 442extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
448 443
449/** 444/**
445 * ttm_bo_acc_size
446 *
447 * @bdev: Pointer to a ttm_bo_device struct.
448 * @bo_size: size of the buffer object in byte.
449 * @struct_size: size of the structure holding buffer object datas
450 *
451 * Returns size to account for a buffer object
452 */
453size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
454 unsigned long bo_size,
455 unsigned struct_size);
456size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
457 unsigned long bo_size,
458 unsigned struct_size);
459
460/**
450 * ttm_bo_init 461 * ttm_bo_init
451 * 462 *
452 * @bdev: Pointer to a ttm_bo_device struct. 463 * @bdev: Pointer to a ttm_bo_device struct.
@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
493 struct file *persistent_swap_storage, 504 struct file *persistent_swap_storage,
494 size_t acc_size, 505 size_t acc_size,
495 void (*destroy) (struct ttm_buffer_object *)); 506 void (*destroy) (struct ttm_buffer_object *));
507
496/** 508/**
497 * ttm_bo_synccpu_object_init 509 * ttm_bo_synccpu_object_init
498 * 510 *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 94eb1434316..d43e892307f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -43,36 +43,9 @@ struct ttm_backend;
43 43
44struct ttm_backend_func { 44struct ttm_backend_func {
45 /** 45 /**
46 * struct ttm_backend_func member populate
47 *
48 * @backend: Pointer to a struct ttm_backend.
49 * @num_pages: Number of pages to populate.
50 * @pages: Array of pointers to ttm pages.
51 * @dummy_read_page: Page to be used instead of NULL pages in the
52 * array @pages.
53 * @dma_addrs: Array of DMA (bus) address of the ttm pages.
54 *
55 * Populate the backend with ttm pages. Depending on the backend,
56 * it may or may not copy the @pages array.
57 */
58 int (*populate) (struct ttm_backend *backend,
59 unsigned long num_pages, struct page **pages,
60 struct page *dummy_read_page,
61 dma_addr_t *dma_addrs);
62 /**
63 * struct ttm_backend_func member clear
64 *
65 * @backend: Pointer to a struct ttm_backend.
66 *
67 * This is an "unpopulate" function. Release all resources
68 * allocated with populate.
69 */
70 void (*clear) (struct ttm_backend *backend);
71
72 /**
73 * struct ttm_backend_func member bind 46 * struct ttm_backend_func member bind
74 * 47 *
75 * @backend: Pointer to a struct ttm_backend. 48 * @ttm: Pointer to a struct ttm_tt.
76 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 49 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
77 * memory type and location for binding. 50 * memory type and location for binding.
78 * 51 *
@@ -80,46 +53,29 @@ struct ttm_backend_func {
80 * indicated by @bo_mem. This function should be able to handle 53 * indicated by @bo_mem. This function should be able to handle
81 * differences between aperture and system page sizes. 54 * differences between aperture and system page sizes.
82 */ 55 */
83 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); 56 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
84 57
85 /** 58 /**
86 * struct ttm_backend_func member unbind 59 * struct ttm_backend_func member unbind
87 * 60 *
88 * @backend: Pointer to a struct ttm_backend. 61 * @ttm: Pointer to a struct ttm_tt.
89 * 62 *
90 * Unbind previously bound backend pages. This function should be 63 * Unbind previously bound backend pages. This function should be
91 * able to handle differences between aperture and system page sizes. 64 * able to handle differences between aperture and system page sizes.
92 */ 65 */
93 int (*unbind) (struct ttm_backend *backend); 66 int (*unbind) (struct ttm_tt *ttm);
94 67
95 /** 68 /**
96 * struct ttm_backend_func member destroy 69 * struct ttm_backend_func member destroy
97 * 70 *
98 * @backend: Pointer to a struct ttm_backend. 71 * @ttm: Pointer to a struct ttm_tt.
99 * 72 *
100 * Destroy the backend. 73 * Destroy the backend. This will be call back from ttm_tt_destroy so
74 * don't call ttm_tt_destroy from the callback or infinite loop.
101 */ 75 */
102 void (*destroy) (struct ttm_backend *backend); 76 void (*destroy) (struct ttm_tt *ttm);
103};
104
105/**
106 * struct ttm_backend
107 *
108 * @bdev: Pointer to a struct ttm_bo_device.
109 * @flags: For driver use.
110 * @func: Pointer to a struct ttm_backend_func that describes
111 * the backend methods.
112 *
113 */
114
115struct ttm_backend {
116 struct ttm_bo_device *bdev;
117 uint32_t flags;
118 struct ttm_backend_func *func;
119}; 77};
120 78
121#define TTM_PAGE_FLAG_USER (1 << 1)
122#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
123#define TTM_PAGE_FLAG_WRITE (1 << 3) 79#define TTM_PAGE_FLAG_WRITE (1 << 3)
124#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 80#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
125#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) 81#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
@@ -135,23 +91,18 @@ enum ttm_caching_state {
135/** 91/**
136 * struct ttm_tt 92 * struct ttm_tt
137 * 93 *
94 * @bdev: Pointer to a struct ttm_bo_device.
95 * @func: Pointer to a struct ttm_backend_func that describes
96 * the backend methods.
138 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 97 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
139 * pointer. 98 * pointer.
140 * @pages: Array of pages backing the data. 99 * @pages: Array of pages backing the data.
141 * @first_himem_page: Himem pages are put last in the page array, which
142 * enables us to run caching attribute changes on only the first part
143 * of the page array containing lomem pages. This is the index of the
144 * first himem page.
145 * @last_lomem_page: Index of the last lomem page in the page array.
146 * @num_pages: Number of pages in the page array. 100 * @num_pages: Number of pages in the page array.
147 * @bdev: Pointer to the current struct ttm_bo_device. 101 * @bdev: Pointer to the current struct ttm_bo_device.
148 * @be: Pointer to the ttm backend. 102 * @be: Pointer to the ttm backend.
149 * @tsk: The task for user ttm.
150 * @start: virtual address for user ttm.
151 * @swap_storage: Pointer to shmem struct file for swap storage. 103 * @swap_storage: Pointer to shmem struct file for swap storage.
152 * @caching_state: The current caching state of the pages. 104 * @caching_state: The current caching state of the pages.
153 * @state: The current binding state of the pages. 105 * @state: The current binding state of the pages.
154 * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
155 * 106 *
156 * This is a structure holding the pages, caching- and aperture binding 107 * This is a structure holding the pages, caching- and aperture binding
157 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 108 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -159,16 +110,14 @@ enum ttm_caching_state {
159 */ 110 */
160 111
161struct ttm_tt { 112struct ttm_tt {
113 struct ttm_bo_device *bdev;
114 struct ttm_backend_func *func;
162 struct page *dummy_read_page; 115 struct page *dummy_read_page;
163 struct page **pages; 116 struct page **pages;
164 long first_himem_page;
165 long last_lomem_page;
166 uint32_t page_flags; 117 uint32_t page_flags;
167 unsigned long num_pages; 118 unsigned long num_pages;
168 struct ttm_bo_global *glob; 119 struct ttm_bo_global *glob;
169 struct ttm_backend *be; 120 struct ttm_backend *be;
170 struct task_struct *tsk;
171 unsigned long start;
172 struct file *swap_storage; 121 struct file *swap_storage;
173 enum ttm_caching_state caching_state; 122 enum ttm_caching_state caching_state;
174 enum { 123 enum {
@@ -176,7 +125,23 @@ struct ttm_tt {
176 tt_unbound, 125 tt_unbound,
177 tt_unpopulated, 126 tt_unpopulated,
178 } state; 127 } state;
128};
129
130/**
131 * struct ttm_dma_tt
132 *
133 * @ttm: Base ttm_tt struct.
134 * @dma_address: The DMA (bus) addresses of the pages
135 * @pages_list: used by some page allocation backend
136 *
137 * This is a structure holding the pages, caching- and aperture binding
138 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
139 * memory.
140 */
141struct ttm_dma_tt {
142 struct ttm_tt ttm;
179 dma_addr_t *dma_address; 143 dma_addr_t *dma_address;
144 struct list_head pages_list;
180}; 145};
181 146
182#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 147#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
351 316
352struct ttm_bo_driver { 317struct ttm_bo_driver {
353 /** 318 /**
354 * struct ttm_bo_driver member create_ttm_backend_entry 319 * ttm_tt_create
355 * 320 *
356 * @bdev: The buffer object device. 321 * @bdev: pointer to a struct ttm_bo_device:
322 * @size: Size of the data needed backing.
323 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
324 * @dummy_read_page: See struct ttm_bo_device.
357 * 325 *
358 * Create a driver specific struct ttm_backend. 326 * Create a struct ttm_tt to back data with system memory pages.
327 * No pages are actually allocated.
328 * Returns:
329 * NULL: Out of memory.
359 */ 330 */
331 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
332 unsigned long size,
333 uint32_t page_flags,
334 struct page *dummy_read_page);
360 335
361 struct ttm_backend *(*create_ttm_backend_entry) 336 /**
362 (struct ttm_bo_device *bdev); 337 * ttm_tt_populate
338 *
339 * @ttm: The struct ttm_tt to contain the backing pages.
340 *
341 * Allocate all backing pages
342 * Returns:
343 * -ENOMEM: Out of memory.
344 */
345 int (*ttm_tt_populate)(struct ttm_tt *ttm);
346
347 /**
348 * ttm_tt_unpopulate
349 *
350 * @ttm: The struct ttm_tt to contain the backing pages.
351 *
352 * Free all backing page
353 */
354 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
363 355
364 /** 356 /**
365 * struct ttm_bo_driver member invalidate_caches 357 * struct ttm_bo_driver member invalidate_caches
@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
477 * @dummy_read_page: Pointer to a dummy page used for mapping requests 469 * @dummy_read_page: Pointer to a dummy page used for mapping requests
478 * of unpopulated pages. 470 * of unpopulated pages.
479 * @shrink: A shrink callback object used for buffer object swap. 471 * @shrink: A shrink callback object used for buffer object swap.
480 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
481 * used by a buffer object. This is excluding page arrays and backing pages.
482 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
483 * @device_list_mutex: Mutex protecting the device list. 472 * @device_list_mutex: Mutex protecting the device list.
484 * This mutex is held while traversing the device list for pm options. 473 * This mutex is held while traversing the device list for pm options.
485 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 474 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
@@ -497,8 +486,6 @@ struct ttm_bo_global {
497 struct ttm_mem_global *mem_glob; 486 struct ttm_mem_global *mem_glob;
498 struct page *dummy_read_page; 487 struct page *dummy_read_page;
499 struct ttm_mem_shrink shrink; 488 struct ttm_mem_shrink shrink;
500 size_t ttm_bo_extra_size;
501 size_t ttm_bo_size;
502 struct mutex device_list_mutex; 489 struct mutex device_list_mutex;
503 spinlock_t lru_lock; 490 spinlock_t lru_lock;
504 491
@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
600} 587}
601 588
602/** 589/**
603 * ttm_tt_create 590 * ttm_tt_init
604 * 591 *
592 * @ttm: The struct ttm_tt.
605 * @bdev: pointer to a struct ttm_bo_device: 593 * @bdev: pointer to a struct ttm_bo_device:
606 * @size: Size of the data needed backing. 594 * @size: Size of the data needed backing.
607 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 595 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
612 * Returns: 600 * Returns:
613 * NULL: Out of memory. 601 * NULL: Out of memory.
614 */ 602 */
615extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, 603extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
616 unsigned long size, 604 unsigned long size, uint32_t page_flags,
617 uint32_t page_flags, 605 struct page *dummy_read_page);
618 struct page *dummy_read_page); 606extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
607 unsigned long size, uint32_t page_flags,
608 struct page *dummy_read_page);
619 609
620/** 610/**
621 * ttm_tt_set_user: 611 * ttm_tt_fini
622 * 612 *
623 * @ttm: The struct ttm_tt to populate. 613 * @ttm: the ttm_tt structure.
624 * @tsk: A struct task_struct for which @start is a valid user-space address.
625 * @start: A valid user-space address.
626 * @num_pages: Size in pages of the user memory area.
627 * 614 *
628 * Populate a struct ttm_tt with a user-space memory area after first pinning 615 * Free memory of ttm_tt structure
629 * the pages backing it.
630 * Returns:
631 * !0: Error.
632 */ 616 */
633 617extern void ttm_tt_fini(struct ttm_tt *ttm);
634extern int ttm_tt_set_user(struct ttm_tt *ttm, 618extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
635 struct task_struct *tsk,
636 unsigned long start, unsigned long num_pages);
637 619
638/** 620/**
639 * ttm_ttm_bind: 621 * ttm_ttm_bind:
@@ -646,20 +628,11 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
646extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 628extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
647 629
648/** 630/**
649 * ttm_tt_populate:
650 *
651 * @ttm: The struct ttm_tt to contain the backing pages.
652 *
653 * Add backing pages to all of @ttm
654 */
655extern int ttm_tt_populate(struct ttm_tt *ttm);
656
657/**
658 * ttm_ttm_destroy: 631 * ttm_ttm_destroy:
659 * 632 *
660 * @ttm: The struct ttm_tt. 633 * @ttm: The struct ttm_tt.
661 * 634 *
662 * Unbind, unpopulate and destroy a struct ttm_tt. 635 * Unbind, unpopulate and destroy common struct ttm_tt.
663 */ 636 */
664extern void ttm_tt_destroy(struct ttm_tt *ttm); 637extern void ttm_tt_destroy(struct ttm_tt *ttm);
665 638
@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
673extern void ttm_tt_unbind(struct ttm_tt *ttm); 646extern void ttm_tt_unbind(struct ttm_tt *ttm);
674 647
675/** 648/**
676 * ttm_ttm_destroy: 649 * ttm_tt_swapin:
677 * 650 *
678 * @ttm: The struct ttm_tt. 651 * @ttm: The struct ttm_tt.
679 * @index: Index of the desired page.
680 *
681 * Return a pointer to the struct page backing @ttm at page
682 * index @index. If the page is unpopulated, one will be allocated to
683 * populate that index.
684 * 652 *
685 * Returns: 653 * Swap in a previously swap out ttm_tt.
686 * NULL on OOM.
687 */ 654 */
688extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); 655extern int ttm_tt_swapin(struct ttm_tt *ttm);
689 656
690/** 657/**
691 * ttm_tt_cache_flush: 658 * ttm_tt_cache_flush:
@@ -1046,17 +1013,25 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1046#include <linux/agp_backend.h> 1013#include <linux/agp_backend.h>
1047 1014
1048/** 1015/**
1049 * ttm_agp_backend_init 1016 * ttm_agp_tt_create
1050 * 1017 *
1051 * @bdev: Pointer to a struct ttm_bo_device. 1018 * @bdev: Pointer to a struct ttm_bo_device.
1052 * @bridge: The agp bridge this device is sitting on. 1019 * @bridge: The agp bridge this device is sitting on.
1020 * @size: Size of the data needed backing.
1021 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1022 * @dummy_read_page: See struct ttm_bo_device.
1023 *
1053 * 1024 *
1054 * Create a TTM backend that uses the indicated AGP bridge as an aperture 1025 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1055 * for TT memory. This function uses the linux agpgart interface to 1026 * for TT memory. This function uses the linux agpgart interface to
1056 * bind and unbind memory backing a ttm_tt. 1027 * bind and unbind memory backing a ttm_tt.
1057 */ 1028 */
1058extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 1029extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1059 struct agp_bridge_data *bridge); 1030 struct agp_bridge_data *bridge,
1031 unsigned long size, uint32_t page_flags,
1032 struct page *dummy_read_page);
1033int ttm_agp_tt_populate(struct ttm_tt *ttm);
1034void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1060#endif 1035#endif
1061 1036
1062#endif 1037#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 129de12353f..5fe27400d17 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -30,45 +30,70 @@
30#include "ttm_memory.h" 30#include "ttm_memory.h"
31 31
32/** 32/**
33 * Get count number of pages from pool to pages list. 33 * Initialize pool allocator.
34 */
35int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
36/**
37 * Free pool allocator.
38 */
39void ttm_page_alloc_fini(void);
40
41/**
42 * ttm_pool_populate:
43 *
44 * @ttm: The struct ttm_tt to contain the backing pages.
34 * 45 *
35 * @pages: head of empty linked list where pages are filled. 46 * Add backing pages to all of @ttm
36 * @flags: ttm flags for page allocation.
37 * @cstate: ttm caching state for the page.
38 * @count: number of pages to allocate.
39 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
40 */ 47 */
41int ttm_get_pages(struct list_head *pages, 48extern int ttm_pool_populate(struct ttm_tt *ttm);
42 int flags, 49
43 enum ttm_caching_state cstate,
44 unsigned count,
45 dma_addr_t *dma_address);
46/** 50/**
47 * Put linked list of pages to pool. 51 * ttm_pool_unpopulate:
52 *
53 * @ttm: The struct ttm_tt which to free backing pages.
48 * 54 *
49 * @pages: list of pages to free. 55 * Free all pages of @ttm
50 * @page_count: number of pages in the list. Zero can be passed for unknown
51 * count.
52 * @flags: ttm flags for page allocation.
53 * @cstate: ttm caching state.
54 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
55 */ 56 */
56void ttm_put_pages(struct list_head *pages, 57extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
57 unsigned page_count, 58
58 int flags, 59/**
59 enum ttm_caching_state cstate, 60 * Output the state of pools to debugfs file
60 dma_addr_t *dma_address); 61 */
62extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
63
64
65#ifdef CONFIG_SWIOTLB
61/** 66/**
62 * Initialize pool allocator. 67 * Initialize pool allocator.
63 */ 68 */
64int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); 69int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
70
65/** 71/**
66 * Free pool allocator. 72 * Free pool allocator.
67 */ 73 */
68void ttm_page_alloc_fini(void); 74void ttm_dma_page_alloc_fini(void);
69 75
70/** 76/**
71 * Output the state of pools to debugfs file 77 * Output the state of pools to debugfs file
72 */ 78 */
73extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); 79extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80
81extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
82extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
83
84#else
85static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
86 unsigned max_pages)
87{
88 return -ENODEV;
89}
90
91static inline void ttm_dma_page_alloc_fini(void) { return; }
92
93static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
94{
95 return 0;
96}
97#endif
98
74#endif 99#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index fd11a5bd892..79b3b6e0f6b 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
274 drm_via_blitsync_t sync; 274 drm_via_blitsync_t sync;
275} drm_via_dmablit_t; 275} drm_via_dmablit_t;
276 276
277struct via_file_private {
278 struct list_head obj_list;
279};
280
277#endif /* _VIA_DRM_H_ */ 281#endif /* _VIA_DRM_H_ */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 445702c60d0..e872526fdc5 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -24,7 +24,7 @@ extern int swiotlb_force;
24 24
25extern void swiotlb_init(int verbose); 25extern void swiotlb_init(int verbose);
26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
27extern unsigned long swioltb_nr_tbl(void); 27extern unsigned long swiotlb_nr_tbl(void);
28 28
29/* 29/*
30 * Enumeration for sync targets 30 * Enumeration for sync targets
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 99093b39614..058935ef397 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
110__setup("swiotlb=", setup_io_tlb_npages); 110__setup("swiotlb=", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swioltb_nr_tbl(void) 113unsigned long swiotlb_nr_tbl(void)
114{ 114{
115 return io_tlb_nslabs; 115 return io_tlb_nslabs;
116} 116}
117 117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
118/* Note that this doesn't work with highmem page */ 118/* Note that this doesn't work with highmem page */
119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
120 volatile void *address) 120 volatile void *address)
@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
321 free_bootmem_late(__pa(io_tlb_start), 321 free_bootmem_late(__pa(io_tlb_start),
322 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 322 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
323 } 323 }
324 io_tlb_nslabs = 0;
324} 325}
325 326
326static int is_swiotlb_buffer(phys_addr_t paddr) 327static int is_swiotlb_buffer(phys_addr_t paddr)