aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/video.c67
-rw-r--r--drivers/acpi/video_detect.c15
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_imx.c236
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/cpufreq/intel_pstate.c12
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/edac/edac_mc.c9
-rw-r--r--drivers/edac/edac_mc_sysfs.c28
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c236
-rw-r--r--drivers/gpu/drm/drm_context.c81
-rw-r--r--drivers/gpu/drm/drm_crtc.c105
-rw-r--r--drivers/gpu/drm/drm_dma.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c106
-rw-r--r--drivers/gpu/drm/drm_edid.c247
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_flip_work.c124
-rw-r--r--drivers/gpu/drm/drm_fops.c98
-rw-r--r--drivers/gpu/drm/drm_gem.c440
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c62
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c195
-rw-r--r--drivers/gpu/drm/drm_modes.c58
-rw-r--r--drivers/gpu/drm/drm_pci.c35
-rw-r--r--drivers/gpu/drm/drm_platform.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c190
-rw-r--r--drivers/gpu/drm/drm_proc.c209
-rw-r--r--drivers/gpu/drm/drm_scatter.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c73
-rw-r--r--drivers/gpu/drm/drm_usb.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c3
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c920
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c154
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c89
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c71
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c31
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c776
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h103
-rw-r--r--drivers/gpu/drm/gma500/gtt.c38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c15
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c65
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c63
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c43
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c48
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h (renamed from drivers/gpu/drm/gma500/psb_intel_display.h)13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c944
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h44
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c53
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c481
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c81
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c47
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c235
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h112
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c461
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c685
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c317
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c368
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_connector.c34
-rw-r--r--drivers/gpu/drm/msm/msm_connector.h68
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h211
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c260
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h84
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c321
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c71
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c45
-rw-r--r--drivers/gpu/drm/radeon/cik.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c41
-rw-r--r--drivers/gpu/drm/radeon/si.c14
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig7
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c258
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h63
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c165
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.c)101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.h)17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c196
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c170
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.c)65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.h)15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c43
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c231
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c63
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/drm/drm.c7
-rw-r--r--drivers/gpu/host1x/drm/gem.c16
-rw-r--r--drivers/gpu/host1x/drm/gem.h3
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c20
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c147
-rw-r--r--drivers/hv/hv_balloon.c21
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/misc/atmel-ssc.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c14
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/of/irq.c6
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c24
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.c9
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c11
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c25
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/zram/zram_drv.c6
-rw-r--r--drivers/tty/serial/8250/8250_early.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/usb/core/hub.c48
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_eem.c7
-rw-r--r--drivers/usb/gadget/f_ncm.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c7
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/f_subset.c7
-rw-r--r--drivers/usb/gadget/fotg210-udc.c4
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/udc-core.c6
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c17
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/mos7840.c25
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/backlight/max8925_bl.c41
-rw-r--r--drivers/video/hdmi.c88
340 files changed, 22323 insertions, 6281 deletions
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 227aca77ee1e..5da44e81dd4d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -169,10 +169,8 @@ int acpi_create_platform_device(struct acpi_device *adev,
169 -------------------------------------------------------------------------- */ 169 -------------------------------------------------------------------------- */
170#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 170#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
171bool acpi_video_backlight_quirks(void); 171bool acpi_video_backlight_quirks(void);
172bool acpi_video_verify_backlight_support(void);
173#else 172#else
174static inline bool acpi_video_backlight_quirks(void) { return false; } 173static inline bool acpi_video_backlight_quirks(void) { return false; }
175static inline bool acpi_video_verify_backlight_support(void) { return false; }
176#endif 174#endif
177 175
178#endif /* _ACPI_INTERNAL_H_ */ 176#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 6dd237e79b4f..0ec434d2586d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -911,7 +911,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
911 if (acpi_video_init_brightness(device)) 911 if (acpi_video_init_brightness(device))
912 return; 912 return;
913 913
914 if (acpi_video_verify_backlight_support()) { 914 if (acpi_video_backlight_support()) {
915 struct backlight_properties props; 915 struct backlight_properties props;
916 struct pci_dev *pdev; 916 struct pci_dev *pdev;
917 acpi_handle acpi_parent; 917 acpi_handle acpi_parent;
@@ -1366,8 +1366,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1366 unsigned long long level_current, level_next; 1366 unsigned long long level_current, level_next;
1367 int result = -EINVAL; 1367 int result = -EINVAL;
1368 1368
1369 /* no warning message if acpi_backlight=vendor or a quirk is used */ 1369 /* no warning message if acpi_backlight=vendor is used */
1370 if (!acpi_video_verify_backlight_support()) 1370 if (!acpi_video_backlight_support())
1371 return 0; 1371 return 0;
1372 1372
1373 if (!device->brightness) 1373 if (!device->brightness)
@@ -1875,46 +1875,6 @@ static int acpi_video_bus_remove(struct acpi_device *device)
1875 return 0; 1875 return 0;
1876} 1876}
1877 1877
1878static acpi_status video_unregister_backlight(acpi_handle handle, u32 lvl,
1879 void *context, void **rv)
1880{
1881 struct acpi_device *acpi_dev;
1882 struct acpi_video_bus *video;
1883 struct acpi_video_device *dev, *next;
1884
1885 if (acpi_bus_get_device(handle, &acpi_dev))
1886 return AE_OK;
1887
1888 if (acpi_match_device_ids(acpi_dev, video_device_ids))
1889 return AE_OK;
1890
1891 video = acpi_driver_data(acpi_dev);
1892 if (!video)
1893 return AE_OK;
1894
1895 acpi_video_bus_stop_devices(video);
1896 mutex_lock(&video->device_list_lock);
1897 list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
1898 if (dev->backlight) {
1899 backlight_device_unregister(dev->backlight);
1900 dev->backlight = NULL;
1901 kfree(dev->brightness->levels);
1902 kfree(dev->brightness);
1903 }
1904 if (dev->cooling_dev) {
1905 sysfs_remove_link(&dev->dev->dev.kobj,
1906 "thermal_cooling");
1907 sysfs_remove_link(&dev->cooling_dev->device.kobj,
1908 "device");
1909 thermal_cooling_device_unregister(dev->cooling_dev);
1910 dev->cooling_dev = NULL;
1911 }
1912 }
1913 mutex_unlock(&video->device_list_lock);
1914 acpi_video_bus_start_devices(video);
1915 return AE_OK;
1916}
1917
1918static int __init is_i740(struct pci_dev *dev) 1878static int __init is_i740(struct pci_dev *dev)
1919{ 1879{
1920 if (dev->device == 0x00D1) 1880 if (dev->device == 0x00D1)
@@ -1946,25 +1906,14 @@ static int __init intel_opregion_present(void)
1946 return opregion; 1906 return opregion;
1947} 1907}
1948 1908
1949int __acpi_video_register(bool backlight_quirks) 1909int acpi_video_register(void)
1950{ 1910{
1951 bool no_backlight; 1911 int result = 0;
1952 int result;
1953
1954 no_backlight = backlight_quirks ? acpi_video_backlight_quirks() : false;
1955
1956 if (register_count) { 1912 if (register_count) {
1957 /* 1913 /*
1958 * If acpi_video_register() has been called already, don't try 1914 * if the function of acpi_video_register is already called,
1959 * to register acpi_video_bus, but unregister backlight devices 1915 * don't register the acpi_vide_bus again and return no error.
1960 * if no backlight support is requested.
1961 */ 1916 */
1962 if (no_backlight)
1963 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1964 ACPI_UINT32_MAX,
1965 video_unregister_backlight,
1966 NULL, NULL, NULL);
1967
1968 return 0; 1917 return 0;
1969 } 1918 }
1970 1919
@@ -1980,7 +1929,7 @@ int __acpi_video_register(bool backlight_quirks)
1980 1929
1981 return 0; 1930 return 0;
1982} 1931}
1983EXPORT_SYMBOL(__acpi_video_register); 1932EXPORT_SYMBOL(acpi_video_register);
1984 1933
1985void acpi_video_unregister(void) 1934void acpi_video_unregister(void)
1986{ 1935{
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 826e52def080..c3397748ba46 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -238,12 +238,7 @@ static void acpi_video_caps_check(void)
238 238
239bool acpi_video_backlight_quirks(void) 239bool acpi_video_backlight_quirks(void)
240{ 240{
241 if (acpi_gbl_osi_data >= ACPI_OSI_WIN_8) { 241 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
242 acpi_video_caps_check();
243 acpi_video_support |= ACPI_VIDEO_SKIP_BACKLIGHT;
244 return true;
245 }
246 return false;
247} 242}
248EXPORT_SYMBOL(acpi_video_backlight_quirks); 243EXPORT_SYMBOL(acpi_video_backlight_quirks);
249 244
@@ -291,14 +286,6 @@ int acpi_video_backlight_support(void)
291} 286}
292EXPORT_SYMBOL(acpi_video_backlight_support); 287EXPORT_SYMBOL(acpi_video_backlight_support);
293 288
294/* For the ACPI video driver use only. */
295bool acpi_video_verify_backlight_support(void)
296{
297 return (acpi_video_support & ACPI_VIDEO_SKIP_BACKLIGHT) ?
298 false : acpi_video_backlight_support();
299}
300EXPORT_SYMBOL(acpi_video_verify_backlight_support);
301
302/* 289/*
303 * Use acpi_backlight=vendor/video to force that backlight switching 290 * Use acpi_backlight=vendor/video to force that backlight switching
304 * is processed by vendor specific acpi drivers or video.ko driver. 291 * is processed by vendor specific acpi drivers or video.ko driver.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 80dc988f01e4..4e737728aee2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM
97 97
98 If unsure, say N. 98 If unsure, say N.
99 99
100config AHCI_IMX
101 tristate "Freescale i.MX AHCI SATA support"
102 depends on SATA_AHCI_PLATFORM && MFD_SYSCON
103 help
104 This option enables support for the Freescale i.MX SoC's
105 onboard AHCI SATA.
106
107 If unsure, say N.
108
100config SATA_FSL 109config SATA_FSL
101 tristate "Freescale 3.0Gbps SATA support" 110 tristate "Freescale 3.0Gbps SATA support"
102 depends on FSL_SOC 111 depends on FSL_SOC
@@ -107,7 +116,7 @@ config SATA_FSL
107 If unsure, say N. 116 If unsure, say N.
108 117
109config SATA_INIC162X 118config SATA_INIC162X
110 tristate "Initio 162x SATA support" 119 tristate "Initio 162x SATA support (Very Experimental)"
111 depends on PCI 120 depends on PCI
112 help 121 help
113 This option enables support for Initio 162x Serial ATA. 122 This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a3..46518c622460 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o 10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o 11obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o 12obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
13obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
13 14
14# SFF w/ custom DMA 15# SFF w/ custom DMA
15obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 16obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5064f3ea20f1..db4380d70031 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
1146 return rc; 1146 return rc;
1147 1147
1148 for (i = 0; i < host->n_ports; i++) { 1148 for (i = 0; i < host->n_ports; i++) {
1149 const char* desc;
1149 struct ahci_port_priv *pp = host->ports[i]->private_data; 1150 struct ahci_port_priv *pp = host->ports[i]->private_data;
1150 1151
1152 /* pp is NULL for dummy ports */
1153 if (pp)
1154 desc = pp->irq_desc;
1155 else
1156 desc = dev_driver_string(host->dev);
1157
1151 rc = devm_request_threaded_irq(host->dev, 1158 rc = devm_request_threaded_irq(host->dev,
1152 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, 1159 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
1153 pp->irq_desc, host->ports[i]); 1160 desc, host->ports[i]);
1154 if (rc) 1161 if (rc)
1155 goto out_free_irqs; 1162 goto out_free_irqs;
1156 } 1163 }
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 000000000000..58debb0acc3a
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,236 @@
1/*
2 * Freescale IMX AHCI SATA platform driver
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/regmap.h>
24#include <linux/ahci_platform.h>
25#include <linux/of_device.h>
26#include <linux/mfd/syscon.h>
27#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
28#include "ahci.h"
29
30enum {
31 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
32};
33
34struct imx_ahci_priv {
35 struct platform_device *ahci_pdev;
36 struct clk *sata_ref_clk;
37 struct clk *ahb_clk;
38 struct regmap *gpr;
39};
40
41static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
42{
43 int ret = 0;
44 unsigned int reg_val;
45 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
46
47 imxpriv->gpr =
48 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
49 if (IS_ERR(imxpriv->gpr)) {
50 dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
51 return PTR_ERR(imxpriv->gpr);
52 }
53
54 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
55 if (ret < 0) {
56 dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
57 return ret;
58 }
59
60 /*
61 * set PHY Paremeters, two steps to configure the GPR13,
62 * one write for rest of parameters, mask of first write
63 * is 0x07fffffd, and the other one write for setting
64 * the mpll_clk_en.
65 */
66 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
67 | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
68 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
69 | IMX6Q_GPR13_SATA_SPD_MODE_MASK
70 | IMX6Q_GPR13_SATA_MPLL_SS_EN
71 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
72 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
73 | IMX6Q_GPR13_SATA_TX_LVL_MASK
74 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
75 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
76 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
77 | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
78 | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
79 | IMX6Q_GPR13_SATA_MPLL_SS_EN
80 | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
81 | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
82 | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
83 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
84 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
85 usleep_range(100, 200);
86
87 /*
88 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
89 * and IP vendor specific register HOST_TIMER1MS.
90 * Configure CAP_SSS (support stagered spin up).
91 * Implement the port0.
92 * Get the ahb clock rate, and configure the TIMER1MS register.
93 */
94 reg_val = readl(mmio + HOST_CAP);
95 if (!(reg_val & HOST_CAP_SSS)) {
96 reg_val |= HOST_CAP_SSS;
97 writel(reg_val, mmio + HOST_CAP);
98 }
99 reg_val = readl(mmio + HOST_PORTS_IMPL);
100 if (!(reg_val & 0x1)) {
101 reg_val |= 0x1;
102 writel(reg_val, mmio + HOST_PORTS_IMPL);
103 }
104
105 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
106 writel(reg_val, mmio + HOST_TIMER1MS);
107
108 return 0;
109}
110
111static void imx6q_sata_exit(struct device *dev)
112{
113 struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
114
115 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
116 !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
117 clk_disable_unprepare(imxpriv->sata_ref_clk);
118}
119
120static struct ahci_platform_data imx6q_sata_pdata = {
121 .init = imx6q_sata_init,
122 .exit = imx6q_sata_exit,
123};
124
125static const struct of_device_id imx_ahci_of_match[] = {
126 { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
127 {},
128};
129MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
130
131static int imx_ahci_probe(struct platform_device *pdev)
132{
133 struct device *dev = &pdev->dev;
134 struct resource *mem, *irq, res[2];
135 const struct of_device_id *of_id;
136 const struct ahci_platform_data *pdata = NULL;
137 struct imx_ahci_priv *imxpriv;
138 struct device *ahci_dev;
139 struct platform_device *ahci_pdev;
140 int ret;
141
142 imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
143 if (!imxpriv) {
144 dev_err(dev, "can't alloc ahci_host_priv\n");
145 return -ENOMEM;
146 }
147
148 ahci_pdev = platform_device_alloc("ahci", -1);
149 if (!ahci_pdev)
150 return -ENODEV;
151
152 ahci_dev = &ahci_pdev->dev;
153 ahci_dev->parent = dev;
154
155 imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
156 if (IS_ERR(imxpriv->ahb_clk)) {
157 dev_err(dev, "can't get ahb clock.\n");
158 ret = PTR_ERR(imxpriv->ahb_clk);
159 goto err_out;
160 }
161
162 imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
163 if (IS_ERR(imxpriv->sata_ref_clk)) {
164 dev_err(dev, "can't get sata_ref clock.\n");
165 ret = PTR_ERR(imxpriv->sata_ref_clk);
166 goto err_out;
167 }
168
169 imxpriv->ahci_pdev = ahci_pdev;
170 platform_set_drvdata(pdev, imxpriv);
171
172 of_id = of_match_device(imx_ahci_of_match, dev);
173 if (of_id) {
174 pdata = of_id->data;
175 } else {
176 ret = -EINVAL;
177 goto err_out;
178 }
179
180 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
182 if (!mem || !irq) {
183 dev_err(dev, "no mmio/irq resource\n");
184 ret = -ENOMEM;
185 goto err_out;
186 }
187
188 res[0] = *mem;
189 res[1] = *irq;
190
191 ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
192 ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
193 ahci_dev->of_node = dev->of_node;
194
195 ret = platform_device_add_resources(ahci_pdev, res, 2);
196 if (ret)
197 goto err_out;
198
199 ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
200 if (ret)
201 goto err_out;
202
203 ret = platform_device_add(ahci_pdev);
204 if (ret) {
205err_out:
206 platform_device_put(ahci_pdev);
207 return ret;
208 }
209
210 return 0;
211}
212
213static int imx_ahci_remove(struct platform_device *pdev)
214{
215 struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
216 struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
217
218 platform_device_unregister(ahci_pdev);
219 return 0;
220}
221
222static struct platform_driver imx_ahci_driver = {
223 .probe = imx_ahci_probe,
224 .remove = imx_ahci_remove,
225 .driver = {
226 .name = "ahci-imx",
227 .owner = THIS_MODULE,
228 .of_match_table = imx_ahci_of_match,
229 },
230};
231module_platform_driver(imx_ahci_driver);
232
233MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
234MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
235MODULE_LICENSE("GPL");
236MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b52a10c8eeb9..513ad7ed0c99 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
330 /* SATA Controller IDE (Wellsburg) */ 330 /* SATA Controller IDE (Wellsburg) */
331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 331 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
332 /* SATA Controller IDE (Wellsburg) */ 332 /* SATA Controller IDE (Wellsburg) */
333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 333 { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
334 /* SATA Controller IDE (Wellsburg) */ 334 /* SATA Controller IDE (Wellsburg) */
335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 335 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Wellsburg) */ 336 /* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 83c08907e042..b1e880a3c3da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -206,8 +206,10 @@ static ssize_t ata_scsi_park_store(struct device *device,
206 unsigned long flags; 206 unsigned long flags;
207 int rc; 207 int rc;
208 208
209 rc = strict_strtol(buf, 10, &input); 209 rc = kstrtol(buf, 10, &input);
210 if (rc || input < -2) 210 if (rc)
211 return rc;
212 if (input < -2)
211 return -EINVAL; 213 return -EINVAL;
212 if (input > ATA_TMOUT_MAX_PARK) { 214 if (input > ATA_TMOUT_MAX_PARK) {
213 rc = -EOVERFLOW; 215 rc = -EOVERFLOW;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e45131748248..5c54d957370a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
6 * 6 *
7 * This file is released under GPL v2. 7 * This file is released under GPL v2.
8 * 8 *
9 * **** WARNING ****
10 *
11 * This driver never worked properly and unfortunately data corruption is
12 * relatively common. There isn't anyone working on the driver and there's
13 * no support from the vendor. Do not use this driver in any production
14 * environment.
15 *
16 * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
17 * https://bugzilla.kernel.org/show_bug.cgi?id=60565
18 *
19 * *****************
20 *
9 * This controller is eccentric and easily locks up if something isn't 21 * This controller is eccentric and easily locks up if something isn't
10 * right. Documentation is available at initio's website but it only 22 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model). 23 * documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
807 819
808 ata_print_version_once(&pdev->dev, DRV_VERSION); 820 ata_print_version_once(&pdev->dev, DRV_VERSION);
809 821
822 dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
823
810 /* alloc host */ 824 /* alloc host */
811 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 825 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
812 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 826 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b012d7600e1a..7cde885011ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
103static struct pstate_adjust_policy default_policy = { 103static struct pstate_adjust_policy default_policy = {
104 .sample_rate_ms = 10, 104 .sample_rate_ms = 10,
105 .deadband = 0, 105 .deadband = 0,
106 .setpoint = 109, 106 .setpoint = 97,
107 .p_gain_pct = 17, 107 .p_gain_pct = 20,
108 .d_gain_pct = 0, 108 .d_gain_pct = 0,
109 .i_gain_pct = 4, 109 .i_gain_pct = 0,
110}; 110};
111 111
112struct perf_limits { 112struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
469{ 469{
470 int32_t busy_scaled; 470 int32_t busy_scaled;
471 int32_t core_busy, turbo_pstate, current_pstate; 471 int32_t core_busy, max_pstate, current_pstate;
472 472
473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
474 turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); 474 max_pstate = int_tofp(cpu->pstate.max_pstate);
475 current_pstate = int_tofp(cpu->pstate.current_pstate); 475 current_pstate = int_tofp(cpu->pstate.current_pstate);
476 busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); 476 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
477 477
478 return fp_toint(busy_scaled); 478 return fp_toint(busy_scaled);
479} 479}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521a1caf..84573b4d6f92 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
429 dma_addr_t src_dma, dst_dma; 429 dma_addr_t src_dma, dst_dma;
430 int ret = 0; 430 int ret = 0;
431 431
432 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 432 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
433 if (!desc) { 433 if (!desc) {
434 dev_err(jrdev, "unable to allocate key input memory\n"); 434 dev_err(jrdev, "unable to allocate key input memory\n");
435 return -ENOMEM; 435 return -ENOMEM;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 27e86d938262..89e109022d78 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
48 */ 48 */
49static void const *edac_mc_owner; 49static void const *edac_mc_owner;
50 50
51static struct bus_type mc_bus[EDAC_MAX_MCS];
52
51unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, 53unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
52 unsigned len) 54 unsigned len)
53{ 55{
@@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
723 int ret = -EINVAL; 725 int ret = -EINVAL;
724 edac_dbg(0, "\n"); 726 edac_dbg(0, "\n");
725 727
728 if (mci->mc_idx >= EDAC_MAX_MCS) {
729 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
730 return -ENODEV;
731 }
732
726#ifdef CONFIG_EDAC_DEBUG 733#ifdef CONFIG_EDAC_DEBUG
727 if (edac_debug_level >= 3) 734 if (edac_debug_level >= 3)
728 edac_mc_dump_mci(mci); 735 edac_mc_dump_mci(mci);
@@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
762 /* set load time so that error rate can be tracked */ 769 /* set load time so that error rate can be tracked */
763 mci->start_time = jiffies; 770 mci->start_time = jiffies;
764 771
772 mci->bus = &mc_bus[mci->mc_idx];
773
765 if (edac_create_sysfs_mci_device(mci)) { 774 if (edac_create_sysfs_mci_device(mci)) {
766 edac_mc_printk(mci, KERN_WARNING, 775 edac_mc_printk(mci, KERN_WARNING,
767 "failed to create sysfs device\n"); 776 "failed to create sysfs device\n");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ef15a7e613bc..e7c32c4f7837 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
370 return -ENODEV; 370 return -ENODEV;
371 371
372 csrow->dev.type = &csrow_attr_type; 372 csrow->dev.type = &csrow_attr_type;
373 csrow->dev.bus = &mci->bus; 373 csrow->dev.bus = mci->bus;
374 device_initialize(&csrow->dev); 374 device_initialize(&csrow->dev);
375 csrow->dev.parent = &mci->dev; 375 csrow->dev.parent = &mci->dev;
376 csrow->mci = mci; 376 csrow->mci = mci;
@@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
605 dimm->mci = mci; 605 dimm->mci = mci;
606 606
607 dimm->dev.type = &dimm_attr_type; 607 dimm->dev.type = &dimm_attr_type;
608 dimm->dev.bus = &mci->bus; 608 dimm->dev.bus = mci->bus;
609 device_initialize(&dimm->dev); 609 device_initialize(&dimm->dev);
610 610
611 dimm->dev.parent = &mci->dev; 611 dimm->dev.parent = &mci->dev;
@@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
975 * The memory controller needs its own bus, in order to avoid 975 * The memory controller needs its own bus, in order to avoid
976 * namespace conflicts at /sys/bus/edac. 976 * namespace conflicts at /sys/bus/edac.
977 */ 977 */
978 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); 978 mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
979 if (!mci->bus.name) 979 if (!mci->bus->name)
980 return -ENOMEM; 980 return -ENOMEM;
981 edac_dbg(0, "creating bus %s\n", mci->bus.name); 981
982 err = bus_register(&mci->bus); 982 edac_dbg(0, "creating bus %s\n", mci->bus->name);
983
984 err = bus_register(mci->bus);
983 if (err < 0) 985 if (err < 0)
984 return err; 986 return err;
985 987
@@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
988 device_initialize(&mci->dev); 990 device_initialize(&mci->dev);
989 991
990 mci->dev.parent = mci_pdev; 992 mci->dev.parent = mci_pdev;
991 mci->dev.bus = &mci->bus; 993 mci->dev.bus = mci->bus;
992 dev_set_name(&mci->dev, "mc%d", mci->mc_idx); 994 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
993 dev_set_drvdata(&mci->dev, mci); 995 dev_set_drvdata(&mci->dev, mci);
994 pm_runtime_forbid(&mci->dev); 996 pm_runtime_forbid(&mci->dev);
@@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
997 err = device_add(&mci->dev); 999 err = device_add(&mci->dev);
998 if (err < 0) { 1000 if (err < 0) {
999 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); 1001 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1000 bus_unregister(&mci->bus); 1002 bus_unregister(mci->bus);
1001 kfree(mci->bus.name); 1003 kfree(mci->bus->name);
1002 return err; 1004 return err;
1003 } 1005 }
1004 1006
@@ -1064,8 +1066,8 @@ fail:
1064 } 1066 }
1065fail2: 1067fail2:
1066 device_unregister(&mci->dev); 1068 device_unregister(&mci->dev);
1067 bus_unregister(&mci->bus); 1069 bus_unregister(mci->bus);
1068 kfree(mci->bus.name); 1070 kfree(mci->bus->name);
1069 return err; 1071 return err;
1070} 1072}
1071 1073
@@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
1098{ 1100{
1099 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); 1101 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1100 device_unregister(&mci->dev); 1102 device_unregister(&mci->dev);
1101 bus_unregister(&mci->bus); 1103 bus_unregister(mci->bus);
1102 kfree(mci->bus.name); 1104 kfree(mci->bus->name);
1103} 1105}
1104 1106
1105static void mc_attr_release(struct device *dev) 1107static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 1b635178cc44..157b934e8ce3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
974 if (!i5100_debugfs) 974 if (!i5100_debugfs)
975 return -ENODEV; 975 return -ENODEV;
976 976
977 priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs); 977 priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
978 978
979 if (!priv->debugfs) 979 if (!priv->debugfs)
980 return -ENOMEM; 980 return -ENOMEM;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cd4246b480d4..955555d6ec88 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
6# 6#
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
10 select HDMI 10 select HDMI
11 select I2C 11 select I2C
12 select I2C_ALGOBIT 12 select I2C_ALGOBIT
@@ -234,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
234source "drivers/gpu/drm/tilcdc/Kconfig" 234source "drivers/gpu/drm/tilcdc/Kconfig"
235 235
236source "drivers/gpu/drm/qxl/Kconfig" 236source "drivers/gpu/drm/qxl/Kconfig"
237
238source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa3028..f089adfe70ee 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o 16 drm_rect.o drm_vma_manager.o drm_flip_work.o
17 17
18drm-$(CONFIG_COMPAT) += drm_ioc32.o 18drm-$(CONFIG_COMPAT) += drm_ioc32.o
19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 54obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 55obj-$(CONFIG_DRM_TILCDC) += tilcdc/
56obj-$(CONFIG_DRM_QXL) += qxl/ 56obj-$(CONFIG_DRM_QXL) += qxl/
57obj-$(CONFIG_DRM_MSM) += msm/
57obj-y += i2c/ 58obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097a..32e270dc714e 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
190 .unlocked_ioctl = drm_ioctl, 190 .unlocked_ioctl = drm_ioctl,
191 .mmap = ast_mmap, 191 .mmap = ast_mmap,
192 .poll = drm_poll, 192 .poll = drm_poll,
193 .fasync = drm_fasync,
194#ifdef CONFIG_COMPAT 193#ifdef CONFIG_COMPAT
195 .compat_ioctl = drm_compat_ioctl, 194 .compat_ioctl = drm_compat_ioctl,
196#endif 195#endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
198}; 197};
199 198
200static struct drm_driver driver = { 199static struct drm_driver driver = {
201 .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM, 200 .driver_features = DRIVER_MODESET | DRIVER_GEM,
202 .dev_priv_size = 0, 201 .dev_priv_size = 0,
203 202
204 .load = ast_driver_load, 203 .load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
216 .gem_free_object = ast_gem_free_object, 215 .gem_free_object = ast_gem_free_object,
217 .dumb_create = ast_dumb_create, 216 .dumb_create = ast_dumb_create,
218 .dumb_map_offset = ast_dumb_mmap_offset, 217 .dumb_map_offset = ast_dumb_mmap_offset,
219 .dumb_destroy = ast_dumb_destroy, 218 .dumb_destroy = drm_gem_dumb_destroy,
220 219
221}; 220};
222 221
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9e..796dbb212a41 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
322extern int ast_dumb_create(struct drm_file *file, 322extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325extern int ast_dumb_destroy(struct drm_file *file,
326 struct drm_device *dev,
327 uint32_t handle);
328 325
329extern int ast_gem_init_object(struct drm_gem_object *obj); 326extern int ast_gem_init_object(struct drm_gem_object *obj);
330extern void ast_gem_free_object(struct drm_gem_object *obj); 327extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd1183..7f6152d374ca 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_dumb_destroy(struct drm_file *file,
453 struct drm_device *dev,
454 uint32_t handle)
455{
456 return drm_gem_handle_delete(file, handle);
457}
458
459int ast_gem_init_object(struct drm_gem_object *obj) 452int ast_gem_init_object(struct drm_gem_object *obj)
460{ 453{
461 BUG(); 454 BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
487 480
488static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) 481static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
489{ 482{
490 return bo->bo.addr_space_offset; 483 return drm_vma_node_offset_addr(&bo->bo.vma_node);
491} 484}
492int 485int
493ast_dumb_mmap_offset(struct drm_file *file, 486ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d670825a1a..20fcf4ee3af0 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct ast_bo *astbo = ast_bo(bo);
152
153 return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
152} 154}
153 155
154static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 astbo->gem.driver_private = NULL;
325 astbo->bo.bdev = &ast->ttm.bdev; 326 astbo->bo.bdev = &ast->ttm.bdev;
326 327
327 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 328 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152ef..138364d91782 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
85#ifdef CONFIG_COMPAT 85#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 86 .compat_ioctl = drm_compat_ioctl,
87#endif 87#endif
88 .fasync = drm_fasync,
89}; 88};
90static struct drm_driver driver = { 89static struct drm_driver driver = {
91 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR, 90 .driver_features = DRIVER_MODESET | DRIVER_GEM,
92 .load = cirrus_driver_load, 91 .load = cirrus_driver_load,
93 .unload = cirrus_driver_unload, 92 .unload = cirrus_driver_unload,
94 .fops = &cirrus_driver_fops, 93 .fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
102 .gem_free_object = cirrus_gem_free_object, 101 .gem_free_object = cirrus_gem_free_object,
103 .dumb_create = cirrus_dumb_create, 102 .dumb_create = cirrus_dumb_create,
104 .dumb_map_offset = cirrus_dumb_mmap_offset, 103 .dumb_map_offset = cirrus_dumb_mmap_offset,
105 .dumb_destroy = cirrus_dumb_destroy, 104 .dumb_destroy = drm_gem_dumb_destroy,
106}; 105};
107 106
108static struct pci_driver cirrus_pci_driver = { 107static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c3..9b0bb9184afd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
203int cirrus_dumb_create(struct drm_file *file, 203int cirrus_dumb_create(struct drm_file *file,
204 struct drm_device *dev, 204 struct drm_device *dev,
205 struct drm_mode_create_dumb *args); 205 struct drm_mode_create_dumb *args);
206int cirrus_dumb_destroy(struct drm_file *file,
207 struct drm_device *dev,
208 uint32_t handle);
209 206
210int cirrus_framebuffer_init(struct drm_device *dev, 207int cirrus_framebuffer_init(struct drm_device *dev,
211 struct cirrus_framebuffer *gfb, 208 struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae827771..f130a533a512 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_dumb_destroy(struct drm_file *file,
259 struct drm_device *dev,
260 uint32_t handle)
261{
262 return drm_gem_handle_delete(file, handle);
263}
264
265int cirrus_gem_init_object(struct drm_gem_object *obj) 258int cirrus_gem_init_object(struct drm_gem_object *obj)
266{ 259{
267 BUG(); 260 BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
294 287
295static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) 288static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
296{ 289{
297 return bo->bo.addr_space_offset; 290 return drm_vma_node_offset_addr(&bo->bo.vma_node);
298} 291}
299 292
300int 293int
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012045c2..ae2385cc71cb 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
152
153 return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
152} 154}
153 155
154static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,7 +328,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
326 return ret; 328 return ret;
327 } 329 }
328 330
329 cirrusbo->gem.driver_private = NULL;
330 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 331 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
331 332
332 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 333 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed179797..e301d653d97e 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
424} 424}
425 425
426/** 426/**
427 * drm_agp_clear - Clear AGP resource list
428 * @dev: DRM device
429 *
430 * Iterate over all AGP resources and remove them. But keep the AGP head
431 * intact so it can still be used. It is safe to call this if AGP is disabled or
432 * was already removed.
433 *
434 * If DRIVER_MODESET is active, nothing is done to protect the modesetting
435 * resources from getting destroyed. Drivers are responsible of cleaning them up
436 * during device shutdown.
437 */
438void drm_agp_clear(struct drm_device *dev)
439{
440 struct drm_agp_mem *entry, *tempe;
441
442 if (!drm_core_has_AGP(dev) || !dev->agp)
443 return;
444 if (drm_core_check_feature(dev, DRIVER_MODESET))
445 return;
446
447 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
448 if (entry->bound)
449 drm_unbind_agp(entry->memory);
450 drm_free_agp(entry->memory, entry->pages);
451 kfree(entry);
452 }
453 INIT_LIST_HEAD(&dev->agp->memory);
454
455 if (dev->agp->acquired)
456 drm_agp_release(dev);
457
458 dev->agp->acquired = 0;
459 dev->agp->enabled = 0;
460}
461
462/**
463 * drm_agp_destroy - Destroy AGP head
464 * @dev: DRM device
465 *
466 * Destroy resources that were previously allocated via drm_agp_initp. Caller
467 * must ensure to clean up all AGP resources before calling this. See
468 * drm_agp_clear().
469 *
470 * Call this to destroy AGP heads allocated via drm_agp_init().
471 */
472void drm_agp_destroy(struct drm_agp_head *agp)
473{
474 kfree(agp);
475}
476
477/**
427 * Binds a collection of pages into AGP memory at the given offset, returning 478 * Binds a collection of pages into AGP memory at the given offset, returning
428 * the AGP memory structure containing them. 479 * the AGP memory structure containing them.
429 * 480 *
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b71..471e051d295e 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
207 return 0; 207 return 0;
208 } 208 }
209 209
210 if (drm_core_has_MTRR(dev)) { 210 if (map->type == _DRM_FRAME_BUFFER ||
211 if (map->type == _DRM_FRAME_BUFFER || 211 (map->flags & _DRM_WRITE_COMBINING)) {
212 (map->flags & _DRM_WRITE_COMBINING)) { 212 map->mtrr =
213 map->mtrr = 213 arch_phys_wc_add(map->offset, map->size);
214 arch_phys_wc_add(map->offset, map->size);
215 }
216 } 214 }
217 if (map->type == _DRM_REGISTERS) { 215 if (map->type == _DRM_REGISTERS) {
218 if (map->flags & _DRM_WRITE_COMBINING) 216 if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
243 } 241 }
244 map->handle = vmalloc_user(map->size); 242 map->handle = vmalloc_user(map->size);
245 DRM_DEBUG("%lu %d %p\n", 243 DRM_DEBUG("%lu %d %p\n",
246 map->size, drm_order(map->size), map->handle); 244 map->size, order_base_2(map->size), map->handle);
247 if (!map->handle) { 245 if (!map->handle) {
248 kfree(map); 246 kfree(map);
249 return -ENOMEM; 247 return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
464 iounmap(map->handle); 462 iounmap(map->handle);
465 /* FALLTHROUGH */ 463 /* FALLTHROUGH */
466 case _DRM_FRAME_BUFFER: 464 case _DRM_FRAME_BUFFER:
467 if (drm_core_has_MTRR(dev)) 465 arch_phys_wc_del(map->mtrr);
468 arch_phys_wc_del(map->mtrr);
469 break; 466 break;
470 case _DRM_SHM: 467 case _DRM_SHM:
471 vfree(map->handle); 468 vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
630 return -EINVAL; 627 return -EINVAL;
631 628
632 count = request->count; 629 count = request->count;
633 order = drm_order(request->size); 630 order = order_base_2(request->size);
634 size = 1 << order; 631 size = 1 << order;
635 632
636 alignment = (request->flags & _DRM_PAGE_ALIGN) 633 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
800 return -EPERM; 797 return -EPERM;
801 798
802 count = request->count; 799 count = request->count;
803 order = drm_order(request->size); 800 order = order_base_2(request->size);
804 size = 1 << order; 801 size = 1 << order;
805 802
806 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 803 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1002 return -EPERM; 999 return -EPERM;
1003 1000
1004 count = request->count; 1001 count = request->count;
1005 order = drm_order(request->size); 1002 order = order_base_2(request->size);
1006 size = 1 << order; 1003 size = 1 << order;
1007 1004
1008 alignment = (request->flags & _DRM_PAGE_ALIGN) 1005 alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1130 return 0; 1127 return 0;
1131} 1128}
1132 1129
1133static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1134{
1135 struct drm_device_dma *dma = dev->dma;
1136 struct drm_buf_entry *entry;
1137 struct drm_buf *buf;
1138 unsigned long offset;
1139 unsigned long agp_offset;
1140 int count;
1141 int order;
1142 int size;
1143 int alignment;
1144 int page_order;
1145 int total;
1146 int byte_count;
1147 int i;
1148 struct drm_buf **temp_buflist;
1149
1150 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1151 return -EINVAL;
1152
1153 if (!dma)
1154 return -EINVAL;
1155
1156 if (!capable(CAP_SYS_ADMIN))
1157 return -EPERM;
1158
1159 count = request->count;
1160 order = drm_order(request->size);
1161 size = 1 << order;
1162
1163 alignment = (request->flags & _DRM_PAGE_ALIGN)
1164 ? PAGE_ALIGN(size) : size;
1165 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1166 total = PAGE_SIZE << page_order;
1167
1168 byte_count = 0;
1169 agp_offset = request->agp_start;
1170
1171 DRM_DEBUG("count: %d\n", count);
1172 DRM_DEBUG("order: %d\n", order);
1173 DRM_DEBUG("size: %d\n", size);
1174 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1175 DRM_DEBUG("alignment: %d\n", alignment);
1176 DRM_DEBUG("page_order: %d\n", page_order);
1177 DRM_DEBUG("total: %d\n", total);
1178
1179 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1180 return -EINVAL;
1181
1182 spin_lock(&dev->count_lock);
1183 if (dev->buf_use) {
1184 spin_unlock(&dev->count_lock);
1185 return -EBUSY;
1186 }
1187 atomic_inc(&dev->buf_alloc);
1188 spin_unlock(&dev->count_lock);
1189
1190 mutex_lock(&dev->struct_mutex);
1191 entry = &dma->bufs[order];
1192 if (entry->buf_count) {
1193 mutex_unlock(&dev->struct_mutex);
1194 atomic_dec(&dev->buf_alloc);
1195 return -ENOMEM; /* May only call once for each order */
1196 }
1197
1198 if (count < 0 || count > 4096) {
1199 mutex_unlock(&dev->struct_mutex);
1200 atomic_dec(&dev->buf_alloc);
1201 return -EINVAL;
1202 }
1203
1204 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1205 GFP_KERNEL);
1206 if (!entry->buflist) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1209 return -ENOMEM;
1210 }
1211
1212 entry->buf_size = size;
1213 entry->page_order = page_order;
1214
1215 offset = 0;
1216
1217 while (entry->buf_count < count) {
1218 buf = &entry->buflist[entry->buf_count];
1219 buf->idx = dma->buf_count + entry->buf_count;
1220 buf->total = alignment;
1221 buf->order = order;
1222 buf->used = 0;
1223
1224 buf->offset = (dma->byte_count + offset);
1225 buf->bus_address = agp_offset + offset;
1226 buf->address = (void *)(agp_offset + offset);
1227 buf->next = NULL;
1228 buf->waiting = 0;
1229 buf->pending = 0;
1230 buf->file_priv = NULL;
1231
1232 buf->dev_priv_size = dev->driver->dev_priv_size;
1233 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1234 if (!buf->dev_private) {
1235 /* Set count correctly so we free the proper amount. */
1236 entry->buf_count = count;
1237 drm_cleanup_buf_error(dev, entry);
1238 mutex_unlock(&dev->struct_mutex);
1239 atomic_dec(&dev->buf_alloc);
1240 return -ENOMEM;
1241 }
1242
1243 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1244
1245 offset += alignment;
1246 entry->buf_count++;
1247 byte_count += PAGE_SIZE << page_order;
1248 }
1249
1250 DRM_DEBUG("byte_count: %d\n", byte_count);
1251
1252 temp_buflist = krealloc(dma->buflist,
1253 (dma->buf_count + entry->buf_count) *
1254 sizeof(*dma->buflist), GFP_KERNEL);
1255 if (!temp_buflist) {
1256 /* Free the entry because it isn't valid */
1257 drm_cleanup_buf_error(dev, entry);
1258 mutex_unlock(&dev->struct_mutex);
1259 atomic_dec(&dev->buf_alloc);
1260 return -ENOMEM;
1261 }
1262 dma->buflist = temp_buflist;
1263
1264 for (i = 0; i < entry->buf_count; i++) {
1265 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1266 }
1267
1268 dma->buf_count += entry->buf_count;
1269 dma->seg_count += entry->seg_count;
1270 dma->page_count += byte_count >> PAGE_SHIFT;
1271 dma->byte_count += byte_count;
1272
1273 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1274 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1275
1276 mutex_unlock(&dev->struct_mutex);
1277
1278 request->count = entry->buf_count;
1279 request->size = size;
1280
1281 dma->flags = _DRM_DMA_USE_FB;
1282
1283 atomic_dec(&dev->buf_alloc);
1284 return 0;
1285}
1286
1287
1288/** 1130/**
1289 * Add buffers for DMA transfers (ioctl). 1131 * Add buffers for DMA transfers (ioctl).
1290 * 1132 *
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
1305 struct drm_buf_desc *request = data; 1147 struct drm_buf_desc *request = data;
1306 int ret; 1148 int ret;
1307 1149
1150 if (drm_core_check_feature(dev, DRIVER_MODESET))
1151 return -EINVAL;
1152
1308 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1153 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1309 return -EINVAL; 1154 return -EINVAL;
1310 1155
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1316 if (request->flags & _DRM_SG_BUFFER) 1161 if (request->flags & _DRM_SG_BUFFER)
1317 ret = drm_addbufs_sg(dev, request); 1162 ret = drm_addbufs_sg(dev, request);
1318 else if (request->flags & _DRM_FB_BUFFER) 1163 else if (request->flags & _DRM_FB_BUFFER)
1319 ret = drm_addbufs_fb(dev, request); 1164 ret = -EINVAL;
1320 else 1165 else
1321 ret = drm_addbufs_pci(dev, request); 1166 ret = drm_addbufs_pci(dev, request);
1322 1167
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
1348 int i; 1193 int i;
1349 int count; 1194 int count;
1350 1195
1196 if (drm_core_check_feature(dev, DRIVER_MODESET))
1197 return -EINVAL;
1198
1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1199 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1352 return -EINVAL; 1200 return -EINVAL;
1353 1201
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
1427 int order; 1275 int order;
1428 struct drm_buf_entry *entry; 1276 struct drm_buf_entry *entry;
1429 1277
1278 if (drm_core_check_feature(dev, DRIVER_MODESET))
1279 return -EINVAL;
1280
1430 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1281 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1431 return -EINVAL; 1282 return -EINVAL;
1432 1283
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
1435 1286
1436 DRM_DEBUG("%d, %d, %d\n", 1287 DRM_DEBUG("%d, %d, %d\n",
1437 request->size, request->low_mark, request->high_mark); 1288 request->size, request->low_mark, request->high_mark);
1438 order = drm_order(request->size); 1289 order = order_base_2(request->size);
1439 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1290 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1440 return -EINVAL; 1291 return -EINVAL;
1441 entry = &dma->bufs[order]; 1292 entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
1472 int idx; 1323 int idx;
1473 struct drm_buf *buf; 1324 struct drm_buf *buf;
1474 1325
1326 if (drm_core_check_feature(dev, DRIVER_MODESET))
1327 return -EINVAL;
1328
1475 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1329 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1476 return -EINVAL; 1330 return -EINVAL;
1477 1331
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1524 struct drm_buf_map *request = data; 1378 struct drm_buf_map *request = data;
1525 int i; 1379 int i;
1526 1380
1381 if (drm_core_check_feature(dev, DRIVER_MODESET))
1382 return -EINVAL;
1383
1527 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1384 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1528 return -EINVAL; 1385 return -EINVAL;
1529 1386
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1541 if (request->count >= dma->buf_count) { 1398 if (request->count >= dma->buf_count) {
1542 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1399 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1543 || (drm_core_check_feature(dev, DRIVER_SG) 1400 || (drm_core_check_feature(dev, DRIVER_SG)
1544 && (dma->flags & _DRM_DMA_USE_SG)) 1401 && (dma->flags & _DRM_DMA_USE_SG))) {
1545 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1546 && (dma->flags & _DRM_DMA_USE_FB))) {
1547 struct drm_local_map *map = dev->agp_buffer_map; 1402 struct drm_local_map *map = dev->agp_buffer_map;
1548 unsigned long token = dev->agp_buffer_token; 1403 unsigned long token = dev->agp_buffer_token;
1549 1404
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1600 return retcode; 1455 return retcode;
1601} 1456}
1602 1457
1603/** 1458int drm_dma_ioctl(struct drm_device *dev, void *data,
1604 * Compute size order. Returns the exponent of the smaller power of two which 1459 struct drm_file *file_priv)
1605 * is greater or equal to given number.
1606 *
1607 * \param size size.
1608 * \return order.
1609 *
1610 * \todo Can be made faster.
1611 */
1612int drm_order(unsigned long size)
1613{ 1460{
1614 int order; 1461 if (drm_core_check_feature(dev, DRIVER_MODESET))
1615 unsigned long tmp; 1462 return -EINVAL;
1616 1463
1617 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; 1464 if (dev->driver->dma_ioctl)
1465 return dev->driver->dma_ioctl(dev, data, file_priv);
1466 else
1467 return -EINVAL;
1468}
1618 1469
1619 if (size & (size - 1)) 1470struct drm_local_map *drm_getsarea(struct drm_device *dev)
1620 ++order; 1471{
1472 struct drm_map_list *entry;
1621 1473
1622 return order; 1474 list_for_each_entry(entry, &dev->maplist, head) {
1475 if (entry->map && entry->map->type == _DRM_SHM &&
1476 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1477 return entry->map;
1478 }
1479 }
1480 return NULL;
1623} 1481}
1624EXPORT_SYMBOL(drm_order); 1482EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d38976..b4fb86d89850 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,10 +42,6 @@
42 42
43#include <drm/drmP.h> 43#include <drm/drmP.h>
44 44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
49/** 45/**
50 * Free a handle from the context bitmap. 46 * Free a handle from the context bitmap.
51 * 47 *
@@ -56,13 +52,48 @@
56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
57 * lock. 53 * lock.
58 */ 54 */
59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 55static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
60{ 56{
57 if (drm_core_check_feature(dev, DRIVER_MODESET))
58 return;
59
61 mutex_lock(&dev->struct_mutex); 60 mutex_lock(&dev->struct_mutex);
62 idr_remove(&dev->ctx_idr, ctx_handle); 61 idr_remove(&dev->ctx_idr, ctx_handle);
63 mutex_unlock(&dev->struct_mutex); 62 mutex_unlock(&dev->struct_mutex);
64} 63}
65 64
65/******************************************************************/
66/** \name Context bitmap support */
67/*@{*/
68
69void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70 struct drm_file *file_priv)
71{
72 if (drm_core_check_feature(dev, DRIVER_MODESET))
73 return;
74
75 mutex_lock(&dev->ctxlist_mutex);
76 if (!list_empty(&dev->ctxlist)) {
77 struct drm_ctx_list *pos, *n;
78
79 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80 if (pos->tag == file_priv &&
81 pos->handle != DRM_KERNEL_CONTEXT) {
82 if (dev->driver->context_dtor)
83 dev->driver->context_dtor(dev,
84 pos->handle);
85
86 drm_ctxbitmap_free(dev, pos->handle);
87
88 list_del(&pos->head);
89 kfree(pos);
90 --dev->ctx_count;
91 }
92 }
93 }
94 mutex_unlock(&dev->ctxlist_mutex);
95}
96
66/** 97/**
67 * Context bitmap allocation. 98 * Context bitmap allocation.
68 * 99 *
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
90 * 121 *
91 * Initialise the drm_device::ctx_idr 122 * Initialise the drm_device::ctx_idr
92 */ 123 */
93int drm_ctxbitmap_init(struct drm_device * dev) 124void drm_legacy_ctxbitmap_init(struct drm_device * dev)
94{ 125{
126 if (drm_core_check_feature(dev, DRIVER_MODESET))
127 return;
128
95 idr_init(&dev->ctx_idr); 129 idr_init(&dev->ctx_idr);
96 return 0;
97} 130}
98 131
99/** 132/**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
104 * Free all idr members using drm_ctx_sarea_free helper function 137 * Free all idr members using drm_ctx_sarea_free helper function
105 * while holding the drm_device::struct_mutex lock. 138 * while holding the drm_device::struct_mutex lock.
106 */ 139 */
107void drm_ctxbitmap_cleanup(struct drm_device * dev) 140void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
108{ 141{
109 mutex_lock(&dev->struct_mutex); 142 mutex_lock(&dev->struct_mutex);
110 idr_destroy(&dev->ctx_idr); 143 idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
136 struct drm_local_map *map; 169 struct drm_local_map *map;
137 struct drm_map_list *_entry; 170 struct drm_map_list *_entry;
138 171
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 return -EINVAL;
174
139 mutex_lock(&dev->struct_mutex); 175 mutex_lock(&dev->struct_mutex);
140 176
141 map = idr_find(&dev->ctx_idr, request->ctx_id); 177 map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
180 struct drm_local_map *map = NULL; 216 struct drm_local_map *map = NULL;
181 struct drm_map_list *r_list = NULL; 217 struct drm_map_list *r_list = NULL;
182 218
219 if (drm_core_check_feature(dev, DRIVER_MODESET))
220 return -EINVAL;
221
183 mutex_lock(&dev->struct_mutex); 222 mutex_lock(&dev->struct_mutex);
184 list_for_each_entry(r_list, &dev->maplist, head) { 223 list_for_each_entry(r_list, &dev->maplist, head) {
185 if (r_list->map 224 if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
251 struct drm_file *file_priv, int new) 290 struct drm_file *file_priv, int new)
252{ 291{
253 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ 292 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
254 dev->last_switch = jiffies;
255 293
256 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { 294 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
257 DRM_ERROR("Lock isn't held after context switch\n"); 295 DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
261 when the kernel holds the lock, release 299 when the kernel holds the lock, release
262 that lock here. */ 300 that lock here. */
263 clear_bit(0, &dev->context_flag); 301 clear_bit(0, &dev->context_flag);
264 wake_up(&dev->context_wait);
265 302
266 return 0; 303 return 0;
267} 304}
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
282 struct drm_ctx ctx; 319 struct drm_ctx ctx;
283 int i; 320 int i;
284 321
322 if (drm_core_check_feature(dev, DRIVER_MODESET))
323 return -EINVAL;
324
285 if (res->count >= DRM_RESERVED_CONTEXTS) { 325 if (res->count >= DRM_RESERVED_CONTEXTS) {
286 memset(&ctx, 0, sizeof(ctx)); 326 memset(&ctx, 0, sizeof(ctx));
287 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 327 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
312 struct drm_ctx_list *ctx_entry; 352 struct drm_ctx_list *ctx_entry;
313 struct drm_ctx *ctx = data; 353 struct drm_ctx *ctx = data;
314 354
355 if (drm_core_check_feature(dev, DRIVER_MODESET))
356 return -EINVAL;
357
315 ctx->handle = drm_ctxbitmap_next(dev); 358 ctx->handle = drm_ctxbitmap_next(dev);
316 if (ctx->handle == DRM_KERNEL_CONTEXT) { 359 if (ctx->handle == DRM_KERNEL_CONTEXT) {
317 /* Skip kernel's context and get a new one. */ 360 /* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
342 return 0; 385 return 0;
343} 386}
344 387
345int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
346{
347 /* This does nothing */
348 return 0;
349}
350
351/** 388/**
352 * Get context. 389 * Get context.
353 * 390 *
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
361{ 398{
362 struct drm_ctx *ctx = data; 399 struct drm_ctx *ctx = data;
363 400
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
402 return -EINVAL;
403
364 /* This is 0, because we don't handle any context flags */ 404 /* This is 0, because we don't handle any context flags */
365 ctx->flags = 0; 405 ctx->flags = 0;
366 406
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
383{ 423{
384 struct drm_ctx *ctx = data; 424 struct drm_ctx *ctx = data;
385 425
426 if (drm_core_check_feature(dev, DRIVER_MODESET))
427 return -EINVAL;
428
386 DRM_DEBUG("%d\n", ctx->handle); 429 DRM_DEBUG("%d\n", ctx->handle);
387 return drm_context_switch(dev, dev->last_context, ctx->handle); 430 return drm_context_switch(dev, dev->last_context, ctx->handle);
388} 431}
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
403{ 446{
404 struct drm_ctx *ctx = data; 447 struct drm_ctx *ctx = data;
405 448
449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return -EINVAL;
451
406 DRM_DEBUG("%d\n", ctx->handle); 452 DRM_DEBUG("%d\n", ctx->handle);
407 drm_context_switch_complete(dev, file_priv, ctx->handle); 453 drm_context_switch_complete(dev, file_priv, ctx->handle);
408 454
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
425{ 471{
426 struct drm_ctx *ctx = data; 472 struct drm_ctx *ctx = data;
427 473
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 return -EINVAL;
476
428 DRM_DEBUG("%d\n", ctx->handle); 477 DRM_DEBUG("%d\n", ctx->handle);
429 if (ctx->handle != DRM_KERNEL_CONTEXT) { 478 if (ctx->handle != DRM_KERNEL_CONTEXT) {
430 if (dev->driver->context_dtor) 479 if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fc83bb9eb514..452591b67996 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
125 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 125 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
126}; 126};
127 127
128static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
129{
130 { DRM_MODE_DITHERING_OFF, "Off" },
131 { DRM_MODE_DITHERING_ON, "On" },
132 { DRM_MODE_DITHERING_AUTO, "Automatic" },
133};
134
135/* 128/*
136 * Non-global properties, but "required" for certain connectors. 129 * Non-global properties, but "required" for certain connectors.
137 */ 130 */
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
186struct drm_conn_prop_enum_list { 179struct drm_conn_prop_enum_list {
187 int type; 180 int type;
188 const char *name; 181 const char *name;
189 int count; 182 struct ida ida;
190}; 183};
191 184
192/* 185/*
193 * Connector and encoder types. 186 * Connector and encoder types.
194 */ 187 */
195static struct drm_conn_prop_enum_list drm_connector_enum_list[] = 188static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
196{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, 189{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
197 { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, 190 { DRM_MODE_CONNECTOR_VGA, "VGA" },
198 { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, 191 { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
199 { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, 192 { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
200 { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, 193 { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
201 { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, 194 { DRM_MODE_CONNECTOR_Composite, "Composite" },
202 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, 195 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
203 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, 196 { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
204 { DRM_MODE_CONNECTOR_Component, "Component", 0 }, 197 { DRM_MODE_CONNECTOR_Component, "Component" },
205 { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, 198 { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
206 { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, 199 { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
207 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, 200 { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
208 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, 201 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
209 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
210 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
211 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
212}; 205};
213 206
214static const struct drm_prop_enum_list drm_encoder_enum_list[] = 207static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
220 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
221}; 214};
222 215
216void drm_connector_ida_init(void)
217{
218 int i;
219
220 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
221 ida_init(&drm_connector_enum_list[i].ida);
222}
223
224void drm_connector_ida_destroy(void)
225{
226 int i;
227
228 for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
229 ida_destroy(&drm_connector_enum_list[i].ida);
230}
231
223const char *drm_get_encoder_name(const struct drm_encoder *encoder) 232const char *drm_get_encoder_name(const struct drm_encoder *encoder)
224{ 233{
225 static char buf[32]; 234 static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
677} 686}
678EXPORT_SYMBOL(drm_mode_probed_add); 687EXPORT_SYMBOL(drm_mode_probed_add);
679 688
680/** 689/*
681 * drm_mode_remove - remove and free a mode 690 * drm_mode_remove - remove and free a mode
682 * @connector: connector list to modify 691 * @connector: connector list to modify
683 * @mode: mode to remove 692 * @mode: mode to remove
684 * 693 *
685 * Remove @mode from @connector's mode list, then free it. 694 * Remove @mode from @connector's mode list, then free it.
686 */ 695 */
687void drm_mode_remove(struct drm_connector *connector, 696static void drm_mode_remove(struct drm_connector *connector,
688 struct drm_display_mode *mode) 697 struct drm_display_mode *mode)
689{ 698{
690 list_del(&mode->head); 699 list_del(&mode->head);
691 drm_mode_destroy(connector->dev, mode); 700 drm_mode_destroy(connector->dev, mode);
692} 701}
693EXPORT_SYMBOL(drm_mode_remove);
694 702
695/** 703/**
696 * drm_connector_init - Init a preallocated connector 704 * drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
711 int connector_type) 719 int connector_type)
712{ 720{
713 int ret; 721 int ret;
722 struct ida *connector_ida =
723 &drm_connector_enum_list[connector_type].ida;
714 724
715 drm_modeset_lock_all(dev); 725 drm_modeset_lock_all(dev);
716 726
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
723 connector->funcs = funcs; 733 connector->funcs = funcs;
724 connector->connector_type = connector_type; 734 connector->connector_type = connector_type;
725 connector->connector_type_id = 735 connector->connector_type_id =
726 ++drm_connector_enum_list[connector_type].count; /* TODO */ 736 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
737 if (connector->connector_type_id < 0) {
738 ret = connector->connector_type_id;
739 drm_mode_object_put(dev, &connector->base);
740 goto out;
741 }
727 INIT_LIST_HEAD(&connector->probed_modes); 742 INIT_LIST_HEAD(&connector->probed_modes);
728 INIT_LIST_HEAD(&connector->modes); 743 INIT_LIST_HEAD(&connector->modes);
729 connector->edid_blob_ptr = NULL; 744 connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
764 list_for_each_entry_safe(mode, t, &connector->modes, head) 779 list_for_each_entry_safe(mode, t, &connector->modes, head)
765 drm_mode_remove(connector, mode); 780 drm_mode_remove(connector, mode);
766 781
782 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
783 connector->connector_type_id);
784
767 drm_mode_object_put(dev, &connector->base); 785 drm_mode_object_put(dev, &connector->base);
768 list_del(&connector->head); 786 list_del(&connector->head);
769 dev->mode_config.num_connector--; 787 dev->mode_config.num_connector--;
@@ -1135,30 +1153,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
1135EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); 1153EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1136 1154
1137/** 1155/**
1138 * drm_mode_create_dithering_property - create dithering property
1139 * @dev: DRM device
1140 *
1141 * Called by a driver the first time it's needed, must be attached to desired
1142 * connectors.
1143 */
1144int drm_mode_create_dithering_property(struct drm_device *dev)
1145{
1146 struct drm_property *dithering_mode;
1147
1148 if (dev->mode_config.dithering_mode_property)
1149 return 0;
1150
1151 dithering_mode =
1152 drm_property_create_enum(dev, 0, "dithering",
1153 drm_dithering_mode_enum_list,
1154 ARRAY_SIZE(drm_dithering_mode_enum_list));
1155 dev->mode_config.dithering_mode_property = dithering_mode;
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL(drm_mode_create_dithering_property);
1160
1161/**
1162 * drm_mode_create_dirty_property - create dirty property 1156 * drm_mode_create_dirty_property - create dirty property
1163 * @dev: DRM device 1157 * @dev: DRM device
1164 * 1158 *
@@ -3514,6 +3508,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3514 page_flip->reserved != 0) 3508 page_flip->reserved != 0)
3515 return -EINVAL; 3509 return -EINVAL;
3516 3510
3511 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
3512 return -EINVAL;
3513
3517 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3514 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3518 if (!obj) 3515 if (!obj)
3519 return -EINVAL; 3516 return -EINVAL;
@@ -3587,7 +3584,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3587 } 3584 }
3588 3585
3589 old_fb = crtc->fb; 3586 old_fb = crtc->fb;
3590 ret = crtc->funcs->page_flip(crtc, fb, e); 3587 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
3591 if (ret) { 3588 if (ret) {
3592 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3589 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3593 spin_lock_irqsave(&dev->event_lock, flags); 3590 spin_lock_irqsave(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 495b5fd2787c..8a140a953754 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -44,10 +44,18 @@
44 * 44 *
45 * Allocate and initialize a drm_device_dma structure. 45 * Allocate and initialize a drm_device_dma structure.
46 */ 46 */
47int drm_dma_setup(struct drm_device *dev) 47int drm_legacy_dma_setup(struct drm_device *dev)
48{ 48{
49 int i; 49 int i;
50 50
51 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
52 drm_core_check_feature(dev, DRIVER_MODESET)) {
53 return 0;
54 }
55
56 dev->buf_use = 0;
57 atomic_set(&dev->buf_alloc, 0);
58
51 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); 59 dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
52 if (!dev->dma) 60 if (!dev->dma)
53 return -ENOMEM; 61 return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
66 * Free all pages associated with DMA buffers, the buffers and pages lists, and 74 * Free all pages associated with DMA buffers, the buffers and pages lists, and
67 * finally the drm_device::dma structure itself. 75 * finally the drm_device::dma structure itself.
68 */ 76 */
69void drm_dma_takedown(struct drm_device *dev) 77void drm_legacy_dma_takedown(struct drm_device *dev)
70{ 78{
71 struct drm_device_dma *dma = dev->dma; 79 struct drm_device_dma *dma = dev->dma;
72 int i, j; 80 int i, j;
73 81
82 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
83 drm_core_check_feature(dev, DRIVER_MODESET)) {
84 return;
85 }
86
74 if (!dma) 87 if (!dma)
75 return; 88 return;
76 89
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea2..e572dd20bdee 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
87 87
88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 89 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 90 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 91 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 92 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 93 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 106 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 107 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 108 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
109 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 109 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
110 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
111 110
112 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 112
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
122 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123#endif 122#endif
124 123
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 124 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 126
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 127 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
131 130
132 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 131 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133 132
134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 133 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137 136
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 138
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 139 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
142 141
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -172,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
173 172
174/** 173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
175 * Take down the DRM device. 199 * Take down the DRM device.
176 * 200 *
177 * \param dev DRM device structure. 201 * \param dev DRM device structure.
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
195 219
196 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
197 221
198 /* Clear AGP information */ 222 drm_agp_clear(dev);
199 if (drm_core_has_AGP(dev) && dev->agp &&
200 !drm_core_check_feature(dev, DRIVER_MODESET)) {
201 struct drm_agp_mem *entry, *tempe;
202
203 /* Remove AGP resources, but leave dev->agp
204 intact until drv_cleanup is called. */
205 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
206 if (entry->bound)
207 drm_unbind_agp(entry->memory);
208 drm_free_agp(entry->memory, entry->pages);
209 kfree(entry);
210 }
211 INIT_LIST_HEAD(&dev->agp->memory);
212 223
213 if (dev->agp->acquired) 224 drm_legacy_sg_cleanup(dev);
214 drm_agp_release(dev);
215
216 dev->agp->acquired = 0;
217 dev->agp->enabled = 0;
218 }
219 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
220 !drm_core_check_feature(dev, DRIVER_MODESET)) {
221 drm_sg_cleanup(dev->sg);
222 dev->sg = NULL;
223 }
224 225
225 /* Clear vma list (only built for debugging) */ 226 /* Clear vma list (only built for debugging) */
226 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
228 kfree(vma); 229 kfree(vma);
229 } 230 }
230 231
231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 232 drm_legacy_dma_takedown(dev);
232 !drm_core_check_feature(dev, DRIVER_MODESET))
233 drm_dma_takedown(dev);
234 233
235 dev->dev_mapping = NULL; 234 dev->dev_mapping = NULL;
236 mutex_unlock(&dev->struct_mutex); 235 mutex_unlock(&dev->struct_mutex);
237 236
237 drm_legacy_dev_reinit(dev);
238
238 DRM_DEBUG("lastclose completed\n"); 239 DRM_DEBUG("lastclose completed\n");
239 return 0; 240 return 0;
240} 241}
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
251 int ret = -ENOMEM; 252 int ret = -ENOMEM;
252 253
253 drm_global_init(); 254 drm_global_init();
255 drm_connector_ida_init();
254 idr_init(&drm_minors_idr); 256 idr_init(&drm_minors_idr);
255 257
256 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 258 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
263 goto err_p2; 265 goto err_p2;
264 } 266 }
265 267
266 drm_proc_root = proc_mkdir("dri", NULL);
267 if (!drm_proc_root) {
268 DRM_ERROR("Cannot create /proc/dri\n");
269 ret = -1;
270 goto err_p3;
271 }
272
273 drm_debugfs_root = debugfs_create_dir("dri", NULL); 268 drm_debugfs_root = debugfs_create_dir("dri", NULL);
274 if (!drm_debugfs_root) { 269 if (!drm_debugfs_root) {
275 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 270 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
292 287
293static void __exit drm_core_exit(void) 288static void __exit drm_core_exit(void)
294{ 289{
295 remove_proc_entry("dri", NULL);
296 debugfs_remove(drm_debugfs_root); 290 debugfs_remove(drm_debugfs_root);
297 drm_sysfs_destroy(); 291 drm_sysfs_destroy();
298 292
299 unregister_chrdev(DRM_MAJOR, "drm"); 293 unregister_chrdev(DRM_MAJOR, "drm");
300 294
295 drm_connector_ida_destroy();
301 idr_destroy(&drm_minors_idr); 296 idr_destroy(&drm_minors_idr);
302} 297}
303 298
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
420 415
421 /* Do not trust userspace, use our own definition */ 416 /* Do not trust userspace, use our own definition */
422 func = ioctl->func; 417 func = ioctl->func;
423 /* is there a local override? */
424 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
425 func = dev->driver->dma_ioctl;
426 418
427 if (!func) { 419 if (!func) {
428 DRM_DEBUG("no function\n"); 420 DRM_DEBUG("no function\n");
429 retcode = -EINVAL; 421 retcode = -EINVAL;
430 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 422 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
431 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 423 ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
432 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || 424 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
433 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { 425 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
426 (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
434 retcode = -EACCES; 427 retcode = -EACCES;
435 } else { 428 } else {
436 if (cmd & (IOC_IN | IOC_OUT)) { 429 if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
485 DRM_DEBUG("ret = %d\n", retcode); 478 DRM_DEBUG("ret = %d\n", retcode);
486 return retcode; 479 return retcode;
487} 480}
488
489EXPORT_SYMBOL(drm_ioctl); 481EXPORT_SYMBOL(drm_ioctl);
490
491struct drm_local_map *drm_getsarea(struct drm_device *dev)
492{
493 struct drm_map_list *entry;
494
495 list_for_each_entry(entry, &dev->maplist, head) {
496 if (entry->map && entry->map->type == _DRM_SHM &&
497 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
498 return entry->map;
499 }
500 }
501 return NULL;
502}
503EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfc7a1ba9360..a207cc3f2c57 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -931,6 +931,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
931 .vrefresh = 100, }, 931 .vrefresh = 100, },
932}; 932};
933 933
934/*
935 * HDMI 1.4 4k modes.
936 */
937static const struct drm_display_mode edid_4k_modes[] = {
938 /* 1 - 3840x2160@30Hz */
939 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
940 3840, 4016, 4104, 4400, 0,
941 2160, 2168, 2178, 2250, 0,
942 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
943 .vrefresh = 30, },
944 /* 2 - 3840x2160@25Hz */
945 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
946 3840, 4896, 4984, 5280, 0,
947 2160, 2168, 2178, 2250, 0,
948 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
949 .vrefresh = 25, },
950 /* 3 - 3840x2160@24Hz */
951 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
952 3840, 5116, 5204, 5500, 0,
953 2160, 2168, 2178, 2250, 0,
954 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
955 .vrefresh = 24, },
956 /* 4 - 4096x2160@24Hz (SMPTE) */
957 { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
958 4096, 5116, 5204, 5500, 0,
959 2160, 2168, 2178, 2250, 0,
960 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
961 .vrefresh = 24, },
962};
963
934/*** DDC fetch and block validation ***/ 964/*** DDC fetch and block validation ***/
935 965
936static const u8 edid_header[] = { 966static const u8 edid_header[] = {
@@ -2287,7 +2317,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2287 return closure.modes; 2317 return closure.modes;
2288} 2318}
2289 2319
2290#define HDMI_IDENTIFIER 0x000C03
2291#define AUDIO_BLOCK 0x01 2320#define AUDIO_BLOCK 0x01
2292#define VIDEO_BLOCK 0x02 2321#define VIDEO_BLOCK 0x02
2293#define VENDOR_BLOCK 0x03 2322#define VENDOR_BLOCK 0x03
@@ -2298,10 +2327,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2298#define EDID_CEA_YCRCB422 (1 << 4) 2327#define EDID_CEA_YCRCB422 (1 << 4)
2299#define EDID_CEA_VCDB_QS (1 << 6) 2328#define EDID_CEA_VCDB_QS (1 << 6)
2300 2329
2301/** 2330/*
2302 * Search EDID for CEA extension block. 2331 * Search EDID for CEA extension block.
2303 */ 2332 */
2304u8 *drm_find_cea_extension(struct edid *edid) 2333static u8 *drm_find_cea_extension(struct edid *edid)
2305{ 2334{
2306 u8 *edid_ext = NULL; 2335 u8 *edid_ext = NULL;
2307 int i; 2336 int i;
@@ -2322,7 +2351,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
2322 2351
2323 return edid_ext; 2352 return edid_ext;
2324} 2353}
2325EXPORT_SYMBOL(drm_find_cea_extension);
2326 2354
2327/* 2355/*
2328 * Calculate the alternate clock for the CEA mode 2356 * Calculate the alternate clock for the CEA mode
@@ -2380,6 +2408,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2380} 2408}
2381EXPORT_SYMBOL(drm_match_cea_mode); 2409EXPORT_SYMBOL(drm_match_cea_mode);
2382 2410
2411/*
2412 * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
2413 * specific block).
2414 *
2415 * It's almost like cea_mode_alternate_clock(), we just need to add an
2416 * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
2417 * one.
2418 */
2419static unsigned int
2420hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
2421{
2422 if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
2423 return hdmi_mode->clock;
2424
2425 return cea_mode_alternate_clock(hdmi_mode);
2426}
2427
2428/*
2429 * drm_match_hdmi_mode - look for a HDMI mode matching given mode
2430 * @to_match: display mode
2431 *
2432 * An HDMI mode is one defined in the HDMI vendor specific block.
2433 *
2434 * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
2435 */
2436static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2437{
2438 u8 mode;
2439
2440 if (!to_match->clock)
2441 return 0;
2442
2443 for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
2444 const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
2445 unsigned int clock1, clock2;
2446
2447 /* Make sure to also match alternate clocks */
2448 clock1 = hdmi_mode->clock;
2449 clock2 = hdmi_mode_alternate_clock(hdmi_mode);
2450
2451 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2452 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2453 drm_mode_equal_no_clocks(to_match, hdmi_mode))
2454 return mode + 1;
2455 }
2456 return 0;
2457}
2458
2383static int 2459static int
2384add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) 2460add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2385{ 2461{
@@ -2397,18 +2473,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2397 * with the alternate clock for certain CEA modes. 2473 * with the alternate clock for certain CEA modes.
2398 */ 2474 */
2399 list_for_each_entry(mode, &connector->probed_modes, head) { 2475 list_for_each_entry(mode, &connector->probed_modes, head) {
2400 const struct drm_display_mode *cea_mode; 2476 const struct drm_display_mode *cea_mode = NULL;
2401 struct drm_display_mode *newmode; 2477 struct drm_display_mode *newmode;
2402 u8 cea_mode_idx = drm_match_cea_mode(mode) - 1; 2478 u8 mode_idx = drm_match_cea_mode(mode) - 1;
2403 unsigned int clock1, clock2; 2479 unsigned int clock1, clock2;
2404 2480
2405 if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes)) 2481 if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
2406 continue; 2482 cea_mode = &edid_cea_modes[mode_idx];
2483 clock2 = cea_mode_alternate_clock(cea_mode);
2484 } else {
2485 mode_idx = drm_match_hdmi_mode(mode) - 1;
2486 if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
2487 cea_mode = &edid_4k_modes[mode_idx];
2488 clock2 = hdmi_mode_alternate_clock(cea_mode);
2489 }
2490 }
2407 2491
2408 cea_mode = &edid_cea_modes[cea_mode_idx]; 2492 if (!cea_mode)
2493 continue;
2409 2494
2410 clock1 = cea_mode->clock; 2495 clock1 = cea_mode->clock;
2411 clock2 = cea_mode_alternate_clock(cea_mode);
2412 2496
2413 if (clock1 == clock2) 2497 if (clock1 == clock2)
2414 continue; 2498 continue;
@@ -2442,10 +2526,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2442} 2526}
2443 2527
2444static int 2528static int
2445do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 2529do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2446{ 2530{
2447 struct drm_device *dev = connector->dev; 2531 struct drm_device *dev = connector->dev;
2448 u8 * mode, cea_mode; 2532 const u8 *mode;
2533 u8 cea_mode;
2449 int modes = 0; 2534 int modes = 0;
2450 2535
2451 for (mode = db; mode < db + len; mode++) { 2536 for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2550,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
2465 return modes; 2550 return modes;
2466} 2551}
2467 2552
2553/*
2554 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2555 * @connector: connector corresponding to the HDMI sink
2556 * @db: start of the CEA vendor specific block
2557 * @len: length of the CEA block payload, ie. one can access up to db[len]
2558 *
2559 * Parses the HDMI VSDB looking for modes to add to @connector.
2560 */
2561static int
2562do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2563{
2564 struct drm_device *dev = connector->dev;
2565 int modes = 0, offset = 0, i;
2566 u8 vic_len;
2567
2568 if (len < 8)
2569 goto out;
2570
2571 /* no HDMI_Video_Present */
2572 if (!(db[8] & (1 << 5)))
2573 goto out;
2574
2575 /* Latency_Fields_Present */
2576 if (db[8] & (1 << 7))
2577 offset += 2;
2578
2579 /* I_Latency_Fields_Present */
2580 if (db[8] & (1 << 6))
2581 offset += 2;
2582
2583 /* the declared length is not long enough for the 2 first bytes
2584 * of additional video format capabilities */
2585 offset += 2;
2586 if (len < (8 + offset))
2587 goto out;
2588
2589 vic_len = db[8 + offset] >> 5;
2590
2591 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2592 struct drm_display_mode *newmode;
2593 u8 vic;
2594
2595 vic = db[9 + offset + i];
2596
2597 vic--; /* VICs start at 1 */
2598 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2599 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2600 continue;
2601 }
2602
2603 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2604 if (!newmode)
2605 continue;
2606
2607 drm_mode_probed_add(connector, newmode);
2608 modes++;
2609 }
2610
2611out:
2612 return modes;
2613}
2614
2468static int 2615static int
2469cea_db_payload_len(const u8 *db) 2616cea_db_payload_len(const u8 *db)
2470{ 2617{
@@ -2496,14 +2643,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
2496 return 0; 2643 return 0;
2497} 2644}
2498 2645
2646static bool cea_db_is_hdmi_vsdb(const u8 *db)
2647{
2648 int hdmi_id;
2649
2650 if (cea_db_tag(db) != VENDOR_BLOCK)
2651 return false;
2652
2653 if (cea_db_payload_len(db) < 5)
2654 return false;
2655
2656 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2657
2658 return hdmi_id == HDMI_IEEE_OUI;
2659}
2660
2499#define for_each_cea_db(cea, i, start, end) \ 2661#define for_each_cea_db(cea, i, start, end) \
2500 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) 2662 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
2501 2663
2502static int 2664static int
2503add_cea_modes(struct drm_connector *connector, struct edid *edid) 2665add_cea_modes(struct drm_connector *connector, struct edid *edid)
2504{ 2666{
2505 u8 * cea = drm_find_cea_extension(edid); 2667 const u8 *cea = drm_find_cea_extension(edid);
2506 u8 * db, dbl; 2668 const u8 *db;
2669 u8 dbl;
2507 int modes = 0; 2670 int modes = 0;
2508 2671
2509 if (cea && cea_revision(cea) >= 3) { 2672 if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2680,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2517 dbl = cea_db_payload_len(db); 2680 dbl = cea_db_payload_len(db);
2518 2681
2519 if (cea_db_tag(db) == VIDEO_BLOCK) 2682 if (cea_db_tag(db) == VIDEO_BLOCK)
2520 modes += do_cea_modes (connector, db+1, dbl); 2683 modes += do_cea_modes(connector, db + 1, dbl);
2684 else if (cea_db_is_hdmi_vsdb(db))
2685 modes += do_hdmi_vsdb_modes(connector, db, dbl);
2521 } 2686 }
2522 } 2687 }
2523 2688
@@ -2570,21 +2735,6 @@ monitor_name(struct detailed_timing *t, void *data)
2570 *(u8 **)data = t->data.other_data.data.str.str; 2735 *(u8 **)data = t->data.other_data.data.str.str;
2571} 2736}
2572 2737
2573static bool cea_db_is_hdmi_vsdb(const u8 *db)
2574{
2575 int hdmi_id;
2576
2577 if (cea_db_tag(db) != VENDOR_BLOCK)
2578 return false;
2579
2580 if (cea_db_payload_len(db) < 5)
2581 return false;
2582
2583 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
2584
2585 return hdmi_id == HDMI_IDENTIFIER;
2586}
2587
2588/** 2738/**
2589 * drm_edid_to_eld - build ELD from EDID 2739 * drm_edid_to_eld - build ELD from EDID
2590 * @connector: connector corresponding to the HDMI/DP sink 2740 * @connector: connector corresponding to the HDMI/DP sink
@@ -3108,9 +3258,44 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3108 frame->video_code = drm_match_cea_mode(mode); 3258 frame->video_code = drm_match_cea_mode(mode);
3109 3259
3110 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3260 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3111 frame->active_info_valid = 1;
3112 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 3261 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
3113 3262
3114 return 0; 3263 return 0;
3115} 3264}
3116EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3265EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3266
3267/**
3268 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3269 * data from a DRM display mode
3270 * @frame: HDMI vendor infoframe
3271 * @mode: DRM display mode
3272 *
3273 * Note that there's is a need to send HDMI vendor infoframes only when using a
3274 * 4k or stereoscopic 3D mode. So when giving any other mode as input this
3275 * function will return -EINVAL, error that can be safely ignored.
3276 *
3277 * Returns 0 on success or a negative error code on failure.
3278 */
3279int
3280drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3281 const struct drm_display_mode *mode)
3282{
3283 int err;
3284 u8 vic;
3285
3286 if (!frame || !mode)
3287 return -EINVAL;
3288
3289 vic = drm_match_hdmi_mode(mode);
3290 if (!vic)
3291 return -EINVAL;
3292
3293 err = hdmi_vendor_infoframe_init(frame);
3294 if (err < 0)
3295 return err;
3296
3297 frame->vic = vic;
3298
3299 return 0;
3300}
3301EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c385cc5e730e..61b5a47ad239 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
182 182
183#ifdef CONFIG_DEBUG_FS 183#ifdef CONFIG_DEBUG_FS
184/** 184/*
185 * drm_fb_cma_describe() - Helper to dump information about a single 185 * drm_fb_cma_describe() - Helper to dump information about a single
186 * CMA framebuffer object 186 * CMA framebuffer object
187 */ 187 */
188void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 188static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
189{ 189{
190 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 190 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
191 int i, n = drm_format_num_planes(fb->pixel_format); 191 int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
199 drm_gem_cma_describe(fb_cma->obj[i], m); 199 drm_gem_cma_describe(fb_cma->obj[i], m);
200 } 200 }
201} 201}
202EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
203 202
204/** 203/**
205 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 204 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 000000000000..e788882d9021
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "drm_flip_work.h"
26
27/**
28 * drm_flip_work_queue - queue work
29 * @work: the flip-work
30 * @val: the value to queue
31 *
32 * Queues work, that will later be run (passed back to drm_flip_func_t
33 * func) on a work queue after drm_flip_work_commit() is called.
34 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{
37 if (kfifo_put(&work->fifo, (const void **)&val)) {
38 atomic_inc(&work->pending);
39 } else {
40 DRM_ERROR("%s fifo full!\n", work->name);
41 work->func(work, val);
42 }
43}
44EXPORT_SYMBOL(drm_flip_work_queue);
45
46/**
47 * drm_flip_work_commit - commit queued work
48 * @work: the flip-work
49 * @wq: the work-queue to run the queued work on
50 *
51 * Trigger work previously queued by drm_flip_work_queue() to run
52 * on a workqueue. The typical usage would be to queue work (via
53 * drm_flip_work_queue()) at any point (from vblank irq and/or
54 * prior), and then from vblank irq commit the queued work.
55 */
56void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq)
58{
59 uint32_t pending = atomic_read(&work->pending);
60 atomic_add(pending, &work->count);
61 atomic_sub(pending, &work->pending);
62 queue_work(wq, &work->worker);
63}
64EXPORT_SYMBOL(drm_flip_work_commit);
65
66static void flip_worker(struct work_struct *w)
67{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count);
70 void *val = NULL;
71
72 atomic_sub(count, &work->count);
73
74 while(count--)
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
76 work->func(work, val);
77}
78
79/**
80 * drm_flip_work_init - initialize flip-work
81 * @work: the flip-work to initialize
82 * @size: the max queue depth
83 * @name: debug name
84 * @func: the callback work function
85 *
86 * Initializes/allocates resources for the flip-work
87 *
88 * RETURNS:
89 * Zero on success, error code on failure.
90 */
91int drm_flip_work_init(struct drm_flip_work *work, int size,
92 const char *name, drm_flip_func_t func)
93{
94 int ret;
95
96 work->name = name;
97 atomic_set(&work->count, 0);
98 atomic_set(&work->pending, 0);
99 work->func = func;
100
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker);
108
109 return 0;
110}
111EXPORT_SYMBOL(drm_flip_work_init);
112
113/**
114 * drm_flip_work_cleanup - cleans up flip-work
115 * @work: the flip-work to cleanup
116 *
117 * Destroy resources allocated for the flip-work
118 */
119void drm_flip_work_cleanup(struct drm_flip_work *work)
120{
121 WARN_ON(!kfifo_is_empty(&work->fifo));
122 kfifo_free(&work->fifo);
123}
124EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e0368..4be8e09a32ef 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
48 48
49static int drm_setup(struct drm_device * dev) 49static int drm_setup(struct drm_device * dev)
50{ 50{
51 int i;
52 int ret; 51 int ret;
53 52
54 if (dev->driver->firstopen) { 53 if (dev->driver->firstopen &&
54 !drm_core_check_feature(dev, DRIVER_MODESET)) {
55 ret = dev->driver->firstopen(dev); 55 ret = dev->driver->firstopen(dev);
56 if (ret != 0) 56 if (ret != 0)
57 return ret; 57 return ret;
58 } 58 }
59 59
60 atomic_set(&dev->ioctl_count, 0); 60 ret = drm_legacy_dma_setup(dev);
61 atomic_set(&dev->vma_count, 0); 61 if (ret < 0)
62 62 return ret;
63 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
64 !drm_core_check_feature(dev, DRIVER_MODESET)) {
65 dev->buf_use = 0;
66 atomic_set(&dev->buf_alloc, 0);
67
68 i = drm_dma_setup(dev);
69 if (i < 0)
70 return i;
71 }
72
73 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
74 atomic_set(&dev->counts[i], 0);
75
76 dev->sigdata.lock = NULL;
77
78 dev->context_flag = 0;
79 dev->interrupt_flag = 0;
80 dev->dma_flag = 0;
81 dev->last_context = 0;
82 dev->last_switch = 0;
83 dev->last_checked = 0;
84 init_waitqueue_head(&dev->context_wait);
85 dev->if_version = 0;
86
87 dev->ctx_start = 0;
88 dev->lck_start = 0;
89 63
90 dev->buf_async = NULL;
91 init_waitqueue_head(&dev->buf_readers);
92 init_waitqueue_head(&dev->buf_writers);
93 64
94 DRM_DEBUG("\n"); 65 DRM_DEBUG("\n");
95
96 /*
97 * The kernel's context could be created here, but is now created
98 * in drm_dma_enqueue. This is more resource-efficient for
99 * hardware that does not do DMA, but may mean that
100 * drm_select_queue fails between the time the interrupt is
101 * initialized and the time the queues are initialized.
102 */
103
104 return 0; 66 return 0;
105} 67}
106 68
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
257 return -EBUSY; /* No exclusive opens */ 219 return -EBUSY; /* No exclusive opens */
258 if (!drm_cpu_valid()) 220 if (!drm_cpu_valid())
259 return -EINVAL; 221 return -EINVAL;
260 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 222 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
261 return -EINVAL; 223 return -EINVAL;
262 224
263 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 225 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
300 goto out_prime_destroy; 262 goto out_prime_destroy;
301 } 263 }
302 264
303 265 /* if there is no current master make this fd it, but do not create
304 /* if there is no current master make this fd it */ 266 * any master object for render clients */
305 mutex_lock(&dev->struct_mutex); 267 mutex_lock(&dev->struct_mutex);
306 if (!priv->minor->master) { 268 if (!priv->minor->master && !drm_is_render_client(priv)) {
307 /* create a new master */ 269 /* create a new master */
308 priv->minor->master = drm_master_create(priv->minor); 270 priv->minor->master = drm_master_create(priv->minor);
309 if (!priv->minor->master) { 271 if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
341 goto out_close; 303 goto out_close;
342 } 304 }
343 } 305 }
344 mutex_unlock(&dev->struct_mutex); 306 } else if (!drm_is_render_client(priv)) {
345 } else {
346 /* get a reference to the master */ 307 /* get a reference to the master */
347 priv->master = drm_master_get(priv->minor->master); 308 priv->master = drm_master_get(priv->minor->master);
348 mutex_unlock(&dev->struct_mutex);
349 } 309 }
310 mutex_unlock(&dev->struct_mutex);
350 311
351 mutex_lock(&dev->struct_mutex); 312 mutex_lock(&dev->struct_mutex);
352 list_add(&priv->lhead, &dev->filelist); 313 list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
388 return ret; 349 return ret;
389} 350}
390 351
391/** No-op. */
392int drm_fasync(int fd, struct file *filp, int on)
393{
394 struct drm_file *priv = filp->private_data;
395 struct drm_device *dev = priv->minor->dev;
396
397 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
398 (long)old_encode_dev(priv->minor->device));
399 return fasync_helper(fd, filp, on, &dev->buf_async);
400}
401EXPORT_SYMBOL(drm_fasync);
402
403static void drm_master_release(struct drm_device *dev, struct file *filp) 352static void drm_master_release(struct drm_device *dev, struct file *filp)
404{ 353{
405 struct drm_file *file_priv = filp->private_data; 354 struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
490 if (dev->driver->driver_features & DRIVER_GEM) 439 if (dev->driver->driver_features & DRIVER_GEM)
491 drm_gem_release(dev, file_priv); 440 drm_gem_release(dev, file_priv);
492 441
493 mutex_lock(&dev->ctxlist_mutex); 442 drm_legacy_ctxbitmap_release(dev, file_priv);
494 if (!list_empty(&dev->ctxlist)) {
495 struct drm_ctx_list *pos, *n;
496
497 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
498 if (pos->tag == file_priv &&
499 pos->handle != DRM_KERNEL_CONTEXT) {
500 if (dev->driver->context_dtor)
501 dev->driver->context_dtor(dev,
502 pos->handle);
503
504 drm_ctxbitmap_free(dev, pos->handle);
505
506 list_del(&pos->head);
507 kfree(pos);
508 --dev->ctx_count;
509 }
510 }
511 }
512 mutex_unlock(&dev->ctxlist_mutex);
513 443
514 mutex_lock(&dev->struct_mutex); 444 mutex_lock(&dev->struct_mutex);
515 445
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
547 iput(container_of(dev->dev_mapping, struct inode, i_data)); 477 iput(container_of(dev->dev_mapping, struct inode, i_data));
548 478
549 /* drop the reference held my the file priv */ 479 /* drop the reference held my the file priv */
550 drm_master_put(&file_priv->master); 480 if (file_priv->master)
481 drm_master_put(&file_priv->master);
551 file_priv->is_master = 0; 482 file_priv->is_master = 0;
552 list_del(&file_priv->lhead); 483 list_del(&file_priv->lhead);
553 mutex_unlock(&dev->struct_mutex); 484 mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
555 if (dev->driver->postclose) 486 if (dev->driver->postclose)
556 dev->driver->postclose(dev, file_priv); 487 dev->driver->postclose(dev, file_priv);
557 488
489
558 if (drm_core_check_feature(dev, DRIVER_PRIME)) 490 if (drm_core_check_feature(dev, DRIVER_PRIME))
559 drm_prime_destroy_file_private(&file_priv->prime); 491 drm_prime_destroy_file_private(&file_priv->prime);
560 492
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152ef..49293bdc972a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <drm/drmP.h> 39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h>
40 41
41/** @file drm_gem.c 42/** @file drm_gem.c
42 * 43 *
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
92{ 93{
93 struct drm_gem_mm *mm; 94 struct drm_gem_mm *mm;
94 95
95 spin_lock_init(&dev->object_name_lock); 96 mutex_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr); 97 idr_init(&dev->object_name_idr);
97 98
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 99 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
102 } 103 }
103 104
104 dev->mm_private = mm; 105 dev->mm_private = mm;
105 106 drm_vma_offset_manager_init(&mm->vma_manager,
106 if (drm_ht_create(&mm->offset_hash, 12)) { 107 DRM_FILE_PAGE_OFFSET_START,
107 kfree(mm); 108 DRM_FILE_PAGE_OFFSET_SIZE);
108 return -ENOMEM;
109 }
110
111 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE);
113 109
114 return 0; 110 return 0;
115} 111}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
119{ 115{
120 struct drm_gem_mm *mm = dev->mm_private; 116 struct drm_gem_mm *mm = dev->mm_private;
121 117
122 drm_mm_takedown(&mm->offset_manager); 118 drm_vma_offset_manager_destroy(&mm->vma_manager);
123 drm_ht_remove(&mm->offset_hash);
124 kfree(mm); 119 kfree(mm);
125 dev->mm_private = NULL; 120 dev->mm_private = NULL;
126} 121}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
132int drm_gem_object_init(struct drm_device *dev, 127int drm_gem_object_init(struct drm_device *dev,
133 struct drm_gem_object *obj, size_t size) 128 struct drm_gem_object *obj, size_t size)
134{ 129{
135 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 130 struct file *filp;
136 131
137 obj->dev = dev; 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
138 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp))
139 if (IS_ERR(obj->filp)) 134 return PTR_ERR(filp);
140 return PTR_ERR(obj->filp);
141 135
142 kref_init(&obj->refcount); 136 drm_gem_private_object_init(dev, obj, size);
143 atomic_set(&obj->handle_count, 0); 137 obj->filp = filp;
144 obj->size = size;
145 138
146 return 0; 139 return 0;
147} 140}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
152 * no GEM provided backing store. Instead the caller is responsible for 145 * no GEM provided backing store. Instead the caller is responsible for
153 * backing the object and handling it. 146 * backing the object and handling it.
154 */ 147 */
155int drm_gem_private_object_init(struct drm_device *dev, 148void drm_gem_private_object_init(struct drm_device *dev,
156 struct drm_gem_object *obj, size_t size) 149 struct drm_gem_object *obj, size_t size)
157{ 150{
158 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 151 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
159 152
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
161 obj->filp = NULL; 154 obj->filp = NULL;
162 155
163 kref_init(&obj->refcount); 156 kref_init(&obj->refcount);
164 atomic_set(&obj->handle_count, 0); 157 obj->handle_count = 0;
165 obj->size = size; 158 obj->size = size;
166 159 drm_vma_node_reset(&obj->vma_node);
167 return 0;
168} 160}
169EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
170 162
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
200static void 192static void
201drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
202{ 194{
203 if (obj->import_attach) { 195 /*
204 drm_prime_remove_buf_handle(&filp->prime, 196 * Note: obj->dma_buf can't disappear as long as we still hold a
205 obj->import_attach->dmabuf); 197 * handle reference in obj->handle_count.
198 */
199 mutex_lock(&filp->prime.lock);
200 if (obj->dma_buf) {
201 drm_prime_remove_buf_handle_locked(&filp->prime,
202 obj->dma_buf);
206 } 203 }
207 if (obj->export_dma_buf) { 204 mutex_unlock(&filp->prime.lock);
208 drm_prime_remove_buf_handle(&filp->prime, 205}
209 obj->export_dma_buf); 206
207static void drm_gem_object_ref_bug(struct kref *list_kref)
208{
209 BUG();
210}
211
212/**
213 * Called after the last handle to the object has been closed
214 *
215 * Removes any name for the object. Note that this must be
216 * called before drm_gem_object_free or we'll be touching
217 * freed memory
218 */
219static void drm_gem_object_handle_free(struct drm_gem_object *obj)
220{
221 struct drm_device *dev = obj->dev;
222
223 /* Remove any name for this object */
224 if (obj->name) {
225 idr_remove(&dev->object_name_idr, obj->name);
226 obj->name = 0;
227 /*
228 * The object name held a reference to this object, drop
229 * that now.
230 *
231 * This cannot be the last reference, since the handle holds one too.
232 */
233 kref_put(&obj->refcount, drm_gem_object_ref_bug);
210 } 234 }
211} 235}
212 236
237static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
238{
239 /* Unbreak the reference cycle if we have an exported dma_buf. */
240 if (obj->dma_buf) {
241 dma_buf_put(obj->dma_buf);
242 obj->dma_buf = NULL;
243 }
244}
245
246static void
247drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
248{
249 if (WARN_ON(obj->handle_count == 0))
250 return;
251
252 /*
253 * Must bump handle count first as this may be the last
254 * ref, in which case the object would disappear before we
255 * checked for a name
256 */
257
258 mutex_lock(&obj->dev->object_name_lock);
259 if (--obj->handle_count == 0) {
260 drm_gem_object_handle_free(obj);
261 drm_gem_object_exported_dma_buf_free(obj);
262 }
263 mutex_unlock(&obj->dev->object_name_lock);
264
265 drm_gem_object_unreference_unlocked(obj);
266}
267
213/** 268/**
214 * Removes the mapping from handle to filp for this object. 269 * Removes the mapping from handle to filp for this object.
215 */ 270 */
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
242 idr_remove(&filp->object_idr, handle); 297 idr_remove(&filp->object_idr, handle);
243 spin_unlock(&filp->table_lock); 298 spin_unlock(&filp->table_lock);
244 299
245 drm_gem_remove_prime_handles(obj, filp); 300 if (drm_core_check_feature(dev, DRIVER_PRIME))
301 drm_gem_remove_prime_handles(obj, filp);
302 drm_vma_node_revoke(&obj->vma_node, filp->filp);
246 303
247 if (dev->driver->gem_close_object) 304 if (dev->driver->gem_close_object)
248 dev->driver->gem_close_object(obj, filp); 305 dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
253EXPORT_SYMBOL(drm_gem_handle_delete); 310EXPORT_SYMBOL(drm_gem_handle_delete);
254 311
255/** 312/**
256 * Create a handle for this object. This adds a handle reference 313 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
257 * to the object, which includes a regular reference count. Callers 314 *
258 * will likely want to dereference the object afterwards. 315 * This implements the ->dumb_destroy kms driver callback for drivers which use
316 * gem to manage their backing storage.
317 */
318int drm_gem_dumb_destroy(struct drm_file *file,
319 struct drm_device *dev,
320 uint32_t handle)
321{
322 return drm_gem_handle_delete(file, handle);
323}
324EXPORT_SYMBOL(drm_gem_dumb_destroy);
325
326/**
327 * drm_gem_handle_create_tail - internal functions to create a handle
328 *
329 * This expects the dev->object_name_lock to be held already and will drop it
330 * before returning. Used to avoid races in establishing new handles when
331 * importing an object from either an flink name or a dma-buf.
259 */ 332 */
260int 333int
261drm_gem_handle_create(struct drm_file *file_priv, 334drm_gem_handle_create_tail(struct drm_file *file_priv,
262 struct drm_gem_object *obj, 335 struct drm_gem_object *obj,
263 u32 *handlep) 336 u32 *handlep)
264{ 337{
265 struct drm_device *dev = obj->dev; 338 struct drm_device *dev = obj->dev;
266 int ret; 339 int ret;
267 340
341 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
342
268 /* 343 /*
269 * Get the user-visible handle using idr. Preload and perform 344 * Get the user-visible handle using idr. Preload and perform
270 * allocation under our spinlock. 345 * allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
273 spin_lock(&file_priv->table_lock); 348 spin_lock(&file_priv->table_lock);
274 349
275 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 350 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
276 351 drm_gem_object_reference(obj);
352 obj->handle_count++;
277 spin_unlock(&file_priv->table_lock); 353 spin_unlock(&file_priv->table_lock);
278 idr_preload_end(); 354 idr_preload_end();
279 if (ret < 0) 355 mutex_unlock(&dev->object_name_lock);
356 if (ret < 0) {
357 drm_gem_object_handle_unreference_unlocked(obj);
280 return ret; 358 return ret;
359 }
281 *handlep = ret; 360 *handlep = ret;
282 361
283 drm_gem_object_handle_reference(obj); 362 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
363 if (ret) {
364 drm_gem_handle_delete(file_priv, *handlep);
365 return ret;
366 }
284 367
285 if (dev->driver->gem_open_object) { 368 if (dev->driver->gem_open_object) {
286 ret = dev->driver->gem_open_object(obj, file_priv); 369 ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
292 375
293 return 0; 376 return 0;
294} 377}
378
379/**
380 * Create a handle for this object. This adds a handle reference
381 * to the object, which includes a regular reference count. Callers
382 * will likely want to dereference the object afterwards.
383 */
384int
385drm_gem_handle_create(struct drm_file *file_priv,
386 struct drm_gem_object *obj,
387 u32 *handlep)
388{
389 mutex_lock(&obj->dev->object_name_lock);
390
391 return drm_gem_handle_create_tail(file_priv, obj, handlep);
392}
295EXPORT_SYMBOL(drm_gem_handle_create); 393EXPORT_SYMBOL(drm_gem_handle_create);
296 394
297 395
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
306{ 404{
307 struct drm_device *dev = obj->dev; 405 struct drm_device *dev = obj->dev;
308 struct drm_gem_mm *mm = dev->mm_private; 406 struct drm_gem_mm *mm = dev->mm_private;
309 struct drm_map_list *list = &obj->map_list;
310 407
311 drm_ht_remove_item(&mm->offset_hash, &list->hash); 408 drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
312 drm_mm_put_block(list->file_offset_node);
313 kfree(list->map);
314 list->map = NULL;
315} 409}
316EXPORT_SYMBOL(drm_gem_free_mmap_offset); 410EXPORT_SYMBOL(drm_gem_free_mmap_offset);
317 411
318/** 412/**
319 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 413 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
320 * @obj: obj in question 414 * @obj: obj in question
415 * @size: the virtual size
321 * 416 *
322 * GEM memory mapping works by handing back to userspace a fake mmap offset 417 * GEM memory mapping works by handing back to userspace a fake mmap offset
323 * it can use in a subsequent mmap(2) call. The DRM core code then looks 418 * it can use in a subsequent mmap(2) call. The DRM core code then looks
324 * up the object based on the offset and sets up the various memory mapping 419 * up the object based on the offset and sets up the various memory mapping
325 * structures. 420 * structures.
326 * 421 *
327 * This routine allocates and attaches a fake offset for @obj. 422 * This routine allocates and attaches a fake offset for @obj, in cases where
423 * the virtual size differs from the physical size (ie. obj->size). Otherwise
424 * just use drm_gem_create_mmap_offset().
328 */ 425 */
329int 426int
330drm_gem_create_mmap_offset(struct drm_gem_object *obj) 427drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
331{ 428{
332 struct drm_device *dev = obj->dev; 429 struct drm_device *dev = obj->dev;
333 struct drm_gem_mm *mm = dev->mm_private; 430 struct drm_gem_mm *mm = dev->mm_private;
334 struct drm_map_list *list;
335 struct drm_local_map *map;
336 int ret;
337 431
338 /* Set the object up for mmap'ing */ 432 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
339 list = &obj->map_list; 433 size / PAGE_SIZE);
340 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 434}
341 if (!list->map) 435EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
342 return -ENOMEM;
343
344 map = list->map;
345 map->type = _DRM_GEM;
346 map->size = obj->size;
347 map->handle = obj;
348 436
349 /* Get a DRM GEM mmap offset allocated... */ 437/**
350 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 438 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
351 obj->size / PAGE_SIZE, 0, false); 439 * @obj: obj in question
440 *
441 * GEM memory mapping works by handing back to userspace a fake mmap offset
442 * it can use in a subsequent mmap(2) call. The DRM core code then looks
443 * up the object based on the offset and sets up the various memory mapping
444 * structures.
445 *
446 * This routine allocates and attaches a fake offset for @obj.
447 */
448int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
449{
450 return drm_gem_create_mmap_offset_size(obj, obj->size);
451}
452EXPORT_SYMBOL(drm_gem_create_mmap_offset);
352 453
353 if (!list->file_offset_node) { 454/**
354 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 455 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
355 ret = -ENOSPC; 456 * from shmem
356 goto out_free_list; 457 * @obj: obj in question
458 * @gfpmask: gfp mask of requested pages
459 */
460struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
461{
462 struct inode *inode;
463 struct address_space *mapping;
464 struct page *p, **pages;
465 int i, npages;
466
467 /* This is the shared memory object that backs the GEM resource */
468 inode = file_inode(obj->filp);
469 mapping = inode->i_mapping;
470
471 /* We already BUG_ON() for non-page-aligned sizes in
472 * drm_gem_object_init(), so we should never hit this unless
473 * driver author is doing something really wrong:
474 */
475 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
476
477 npages = obj->size >> PAGE_SHIFT;
478
479 pages = drm_malloc_ab(npages, sizeof(struct page *));
480 if (pages == NULL)
481 return ERR_PTR(-ENOMEM);
482
483 gfpmask |= mapping_gfp_mask(mapping);
484
485 for (i = 0; i < npages; i++) {
486 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
487 if (IS_ERR(p))
488 goto fail;
489 pages[i] = p;
490
491 /* There is a hypothetical issue w/ drivers that require
492 * buffer memory in the low 4GB.. if the pages are un-
493 * pinned, and swapped out, they can end up swapped back
494 * in above 4GB. If pages are already in memory, then
495 * shmem_read_mapping_page_gfp will ignore the gfpmask,
496 * even if the already in-memory page disobeys the mask.
497 *
498 * It is only a theoretical issue today, because none of
499 * the devices with this limitation can be populated with
500 * enough memory to trigger the issue. But this BUG_ON()
501 * is here as a reminder in case the problem with
502 * shmem_read_mapping_page_gfp() isn't solved by the time
503 * it does become a real issue.
504 *
505 * See this thread: http://lkml.org/lkml/2011/7/11/238
506 */
507 BUG_ON((gfpmask & __GFP_DMA32) &&
508 (page_to_pfn(p) >= 0x00100000UL));
357 } 509 }
358 510
359 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 511 return pages;
360 obj->size / PAGE_SIZE, 0);
361 if (!list->file_offset_node) {
362 ret = -ENOMEM;
363 goto out_free_list;
364 }
365 512
366 list->hash.key = list->file_offset_node->start; 513fail:
367 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 514 while (i--)
368 if (ret) { 515 page_cache_release(pages[i]);
369 DRM_ERROR("failed to add to map hash\n");
370 goto out_free_mm;
371 }
372 516
373 return 0; 517 drm_free_large(pages);
518 return ERR_CAST(p);
519}
520EXPORT_SYMBOL(drm_gem_get_pages);
374 521
375out_free_mm: 522/**
376 drm_mm_put_block(list->file_offset_node); 523 * drm_gem_put_pages - helper to free backing pages for a GEM object
377out_free_list: 524 * @obj: obj in question
378 kfree(list->map); 525 * @pages: pages to free
379 list->map = NULL; 526 * @dirty: if true, pages will be marked as dirty
527 * @accessed: if true, the pages will be marked as accessed
528 */
529void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
530 bool dirty, bool accessed)
531{
532 int i, npages;
380 533
381 return ret; 534 /* We already BUG_ON() for non-page-aligned sizes in
535 * drm_gem_object_init(), so we should never hit this unless
536 * driver author is doing something really wrong:
537 */
538 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
539
540 npages = obj->size >> PAGE_SHIFT;
541
542 for (i = 0; i < npages; i++) {
543 if (dirty)
544 set_page_dirty(pages[i]);
545
546 if (accessed)
547 mark_page_accessed(pages[i]);
548
549 /* Undo the reference we took when populating the table */
550 page_cache_release(pages[i]);
551 }
552
553 drm_free_large(pages);
382} 554}
383EXPORT_SYMBOL(drm_gem_create_mmap_offset); 555EXPORT_SYMBOL(drm_gem_put_pages);
384 556
385/** Returns a reference to the object named by the handle. */ 557/** Returns a reference to the object named by the handle. */
386struct drm_gem_object * 558struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
445 if (obj == NULL) 617 if (obj == NULL)
446 return -ENOENT; 618 return -ENOENT;
447 619
620 mutex_lock(&dev->object_name_lock);
448 idr_preload(GFP_KERNEL); 621 idr_preload(GFP_KERNEL);
449 spin_lock(&dev->object_name_lock); 622 /* prevent races with concurrent gem_close. */
623 if (obj->handle_count == 0) {
624 ret = -ENOENT;
625 goto err;
626 }
627
450 if (!obj->name) { 628 if (!obj->name) {
451 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 629 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
452 if (ret < 0) 630 if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
462 ret = 0; 640 ret = 0;
463 641
464err: 642err:
465 spin_unlock(&dev->object_name_lock);
466 idr_preload_end(); 643 idr_preload_end();
644 mutex_unlock(&dev->object_name_lock);
467 drm_gem_object_unreference_unlocked(obj); 645 drm_gem_object_unreference_unlocked(obj);
468 return ret; 646 return ret;
469} 647}
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
486 if (!(dev->driver->driver_features & DRIVER_GEM)) 664 if (!(dev->driver->driver_features & DRIVER_GEM))
487 return -ENODEV; 665 return -ENODEV;
488 666
489 spin_lock(&dev->object_name_lock); 667 mutex_lock(&dev->object_name_lock);
490 obj = idr_find(&dev->object_name_idr, (int) args->name); 668 obj = idr_find(&dev->object_name_idr, (int) args->name);
491 if (obj) 669 if (obj) {
492 drm_gem_object_reference(obj); 670 drm_gem_object_reference(obj);
493 spin_unlock(&dev->object_name_lock); 671 } else {
494 if (!obj) 672 mutex_unlock(&dev->object_name_lock);
495 return -ENOENT; 673 return -ENOENT;
674 }
496 675
497 ret = drm_gem_handle_create(file_priv, obj, &handle); 676 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
677 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
498 drm_gem_object_unreference_unlocked(obj); 678 drm_gem_object_unreference_unlocked(obj);
499 if (ret) 679 if (ret)
500 return ret; 680 return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
527 struct drm_gem_object *obj = ptr; 707 struct drm_gem_object *obj = ptr;
528 struct drm_device *dev = obj->dev; 708 struct drm_device *dev = obj->dev;
529 709
530 drm_gem_remove_prime_handles(obj, file_priv); 710 if (drm_core_check_feature(dev, DRIVER_PRIME))
711 drm_gem_remove_prime_handles(obj, file_priv);
712 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
531 713
532 if (dev->driver->gem_close_object) 714 if (dev->driver->gem_close_object)
533 dev->driver->gem_close_object(obj, file_priv); 715 dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
553void 735void
554drm_gem_object_release(struct drm_gem_object *obj) 736drm_gem_object_release(struct drm_gem_object *obj)
555{ 737{
738 WARN_ON(obj->dma_buf);
739
556 if (obj->filp) 740 if (obj->filp)
557 fput(obj->filp); 741 fput(obj->filp);
558} 742}
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
577} 761}
578EXPORT_SYMBOL(drm_gem_object_free); 762EXPORT_SYMBOL(drm_gem_object_free);
579 763
580static void drm_gem_object_ref_bug(struct kref *list_kref)
581{
582 BUG();
583}
584
585/**
586 * Called after the last handle to the object has been closed
587 *
588 * Removes any name for the object. Note that this must be
589 * called before drm_gem_object_free or we'll be touching
590 * freed memory
591 */
592void drm_gem_object_handle_free(struct drm_gem_object *obj)
593{
594 struct drm_device *dev = obj->dev;
595
596 /* Remove any name for this object */
597 spin_lock(&dev->object_name_lock);
598 if (obj->name) {
599 idr_remove(&dev->object_name_idr, obj->name);
600 obj->name = 0;
601 spin_unlock(&dev->object_name_lock);
602 /*
603 * The object name held a reference to this object, drop
604 * that now.
605 *
606 * This cannot be the last reference, since the handle holds one too.
607 */
608 kref_put(&obj->refcount, drm_gem_object_ref_bug);
609 } else
610 spin_unlock(&dev->object_name_lock);
611
612}
613EXPORT_SYMBOL(drm_gem_object_handle_free);
614
615void drm_gem_vm_open(struct vm_area_struct *vma) 764void drm_gem_vm_open(struct vm_area_struct *vma)
616{ 765{
617 struct drm_gem_object *obj = vma->vm_private_data; 766 struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
653 * the GEM object is not looked up based on its fake offset. To implement the 802 * the GEM object is not looked up based on its fake offset. To implement the
654 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 803 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
655 * 804 *
805 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
806 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
807 * callers must verify access restrictions before calling this helper.
808 *
656 * NOTE: This function has to be protected with dev->struct_mutex 809 * NOTE: This function has to be protected with dev->struct_mutex
657 * 810 *
658 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 811 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
701 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 854 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
702 * contain the fake offset we created when the GTT map ioctl was called on 855 * contain the fake offset we created when the GTT map ioctl was called on
703 * the object) and map it with a call to drm_gem_mmap_obj(). 856 * the object) and map it with a call to drm_gem_mmap_obj().
857 *
858 * If the caller is not granted access to the buffer object, the mmap will fail
859 * with EACCES. Please see the vma manager for more information.
704 */ 860 */
705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 861int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
706{ 862{
707 struct drm_file *priv = filp->private_data; 863 struct drm_file *priv = filp->private_data;
708 struct drm_device *dev = priv->minor->dev; 864 struct drm_device *dev = priv->minor->dev;
709 struct drm_gem_mm *mm = dev->mm_private; 865 struct drm_gem_mm *mm = dev->mm_private;
710 struct drm_local_map *map = NULL; 866 struct drm_gem_object *obj;
711 struct drm_hash_item *hash; 867 struct drm_vma_offset_node *node;
712 int ret = 0; 868 int ret = 0;
713 869
714 if (drm_device_is_unplugged(dev)) 870 if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
716 872
717 mutex_lock(&dev->struct_mutex); 873 mutex_lock(&dev->struct_mutex);
718 874
719 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 875 node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
876 vma_pages(vma));
877 if (!node) {
720 mutex_unlock(&dev->struct_mutex); 878 mutex_unlock(&dev->struct_mutex);
721 return drm_mmap(filp, vma); 879 return drm_mmap(filp, vma);
880 } else if (!drm_vma_node_is_allowed(node, filp)) {
881 mutex_unlock(&dev->struct_mutex);
882 return -EACCES;
722 } 883 }
723 884
724 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 885 obj = container_of(node, struct drm_gem_object, vma_node);
725 if (!map || 886 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
726 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
727 ret = -EPERM;
728 goto out_unlock;
729 }
730
731 ret = drm_gem_mmap_obj(map->handle, map->size, vma);
732 887
733out_unlock:
734 mutex_unlock(&dev->struct_mutex); 888 mutex_unlock(&dev->struct_mutex);
735 889
736 return ret; 890 return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index ece72a8ac245..0a4f80574eb4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm.h> 28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
30 30#include <drm/drm_vma_manager.h>
31static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
32{
33 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
34}
35 31
36/* 32/*
37 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory 33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
172{ 168{
173 struct drm_gem_cma_object *cma_obj; 169 struct drm_gem_cma_object *cma_obj;
174 170
175 if (gem_obj->map_list.map) 171 drm_gem_free_mmap_offset(gem_obj);
176 drm_gem_free_mmap_offset(gem_obj);
177 172
178 cma_obj = to_drm_gem_cma_obj(gem_obj); 173 cma_obj = to_drm_gem_cma_obj(gem_obj);
179 174
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
237 return -EINVAL; 232 return -EINVAL;
238 } 233 }
239 234
240 *offset = get_gem_mmap_offset(gem_obj); 235 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
241 236
242 drm_gem_object_unreference(gem_obj); 237 drm_gem_object_unreference(gem_obj);
243 238
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
286} 281}
287EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 282EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
288 283
289/*
290 * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
291 */
292int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
293 struct drm_device *drm, unsigned int handle)
294{
295 return drm_gem_handle_delete(file_priv, handle);
296}
297EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
298
299#ifdef CONFIG_DEBUG_FS 284#ifdef CONFIG_DEBUG_FS
300void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) 285void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
301{ 286{
302 struct drm_gem_object *obj = &cma_obj->base; 287 struct drm_gem_object *obj = &cma_obj->base;
303 struct drm_device *dev = obj->dev; 288 struct drm_device *dev = obj->dev;
304 uint64_t off = 0; 289 uint64_t off;
305 290
306 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 291 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
307 292
308 if (obj->map_list.map) 293 off = drm_vma_node_start(&obj->vma_node);
309 off = (uint64_t)obj->map_list.hash.key;
310 294
311 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d", 295 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
312 obj->name, obj->refcount.refcount.counter, 296 obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ceda3fb..53298320080b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
207 207
208 seq_printf(m, "%6d %8zd %7d %8d\n", 208 seq_printf(m, "%6d %8zd %7d %8d\n",
209 obj->name, obj->size, 209 obj->name, obj->size,
210 atomic_read(&obj->handle_count), 210 obj->handle_count,
211 atomic_read(&obj->refcount.refcount)); 211 atomic_read(&obj->refcount.refcount));
212 return 0; 212 return 0;
213} 213}
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
218 struct drm_device *dev = node->minor->dev; 218 struct drm_device *dev = node->minor->dev;
219 219
220 seq_printf(m, " name size handles refcount\n"); 220 seq_printf(m, " name size handles refcount\n");
221
222 mutex_lock(&dev->object_name_lock);
221 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); 223 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
224 mutex_unlock(&dev->object_name_lock);
225
222 return 0; 226 return 0;
223} 227}
224 228
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ffd7a7ba70d4..07247e2855a2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
217 struct drm_file *file_priv) 217 struct drm_file *file_priv)
218{ 218{
219 struct drm_client *client = data; 219 struct drm_client *client = data;
220 struct drm_file *pt;
221 int idx;
222 int i;
223 220
224 idx = client->idx; 221 /*
225 i = 0; 222 * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
226 223 * not breaking completely. Userspace tools stop enumerating one they
227 mutex_lock(&dev->struct_mutex); 224 * get -EINVAL, hence this is the return value we need to hand back for
228 list_for_each_entry(pt, &dev->filelist, lhead) { 225 * no clients tracked.
229 if (i++ >= idx) { 226 *
230 client->auth = pt->authenticated; 227 * Unfortunately some clients (*cough* libva *cough*) use this in a fun
231 client->pid = pid_vnr(pt->pid); 228 * attempt to figure out whether they're authenticated or not. Since
232 client->uid = from_kuid_munged(current_user_ns(), pt->uid); 229 * that's the only thing they care about, give it to the directly
233 client->magic = pt->magic; 230 * instead of walking one giant list.
234 client->iocs = pt->ioctl_count; 231 */
235 mutex_unlock(&dev->struct_mutex); 232 if (client->idx == 0) {
236 233 client->auth = file_priv->authenticated;
237 return 0; 234 client->pid = pid_vnr(file_priv->pid);
238 } 235 client->uid = from_kuid_munged(current_user_ns(),
236 file_priv->uid);
237 client->magic = 0;
238 client->iocs = 0;
239
240 return 0;
241 } else {
242 return -EINVAL;
239 } 243 }
240 mutex_unlock(&dev->struct_mutex);
241
242 return -EINVAL;
243} 244}
244 245
245/** 246/**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
256 struct drm_file *file_priv) 257 struct drm_file *file_priv)
257{ 258{
258 struct drm_stats *stats = data; 259 struct drm_stats *stats = data;
259 int i;
260 260
261 /* Clear stats to prevent userspace from eating its stack garbage. */
261 memset(stats, 0, sizeof(*stats)); 262 memset(stats, 0, sizeof(*stats));
262 263
263 for (i = 0; i < dev->counters; i++) {
264 if (dev->types[i] == _DRM_STAT_LOCK)
265 stats->data[i].value =
266 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
267 else
268 stats->data[i].value = atomic_read(&dev->counts[i]);
269 stats->data[i].type = dev->types[i];
270 }
271
272 stats->count = dev->counters;
273
274 return 0; 264 return 0;
275} 265}
276 266
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303 case DRM_CAP_TIMESTAMP_MONOTONIC: 293 case DRM_CAP_TIMESTAMP_MONOTONIC:
304 req->value = drm_timestamp_monotonic; 294 req->value = drm_timestamp_monotonic;
305 break; 295 break;
296 case DRM_CAP_ASYNC_PAGE_FLIP:
297 req->value = dev->mode_config.async_page_flip;
298 break;
306 default: 299 default:
307 return -EINVAL; 300 return -EINVAL;
308 } 301 }
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
352 retcode = -EINVAL; 345 retcode = -EINVAL;
353 goto done; 346 goto done;
354 } 347 }
355
356 if (dev->driver->set_version)
357 dev->driver->set_version(dev, sv);
358 } 348 }
359 349
360done: 350done:
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 126d50ea181f..64e44fad8ae8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
86{ 86{
87 agp_free_memory(handle); 87 agp_free_memory(handle);
88} 88}
89EXPORT_SYMBOL(drm_free_agp);
90 89
91/** Wrapper around agp_bind_memory() */ 90/** Wrapper around agp_bind_memory() */
92int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 91int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
99{ 98{
100 return agp_unbind_memory(handle); 99 return agp_unbind_memory(handle);
101} 100}
102EXPORT_SYMBOL(drm_unbind_agp);
103 101
104#else /* __OS_HAS_AGP */ 102#else /* __OS_HAS_AGP */
105static inline void *agp_remap(unsigned long offset, unsigned long size, 103static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index feb267f37e21..af93cc55259f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
49 49
50#define MM_UNUSED_TARGET 4 50#define MM_UNUSED_TARGET 4
51 51
52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 52static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53{ 53 unsigned long size,
54 struct drm_mm_node *child; 54 unsigned alignment,
55 55 unsigned long color,
56 if (atomic) 56 enum drm_mm_search_flags flags);
57 child = kzalloc(sizeof(*child), GFP_ATOMIC); 57static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
58 else 58 unsigned long size,
59 child = kzalloc(sizeof(*child), GFP_KERNEL); 59 unsigned alignment,
60 60 unsigned long color,
61 if (unlikely(child == NULL)) { 61 unsigned long start,
62 spin_lock(&mm->unused_lock); 62 unsigned long end,
63 if (list_empty(&mm->unused_nodes)) 63 enum drm_mm_search_flags flags);
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
70 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
82int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
89 node = kzalloc(sizeof(*node), GFP_KERNEL);
90 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
98 list_add_tail(&node->node_list, &mm->unused_nodes);
99 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104 64
105static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 65static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 struct drm_mm_node *node, 66 struct drm_mm_node *node,
@@ -187,24 +147,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
187} 147}
188EXPORT_SYMBOL(drm_mm_reserve_node); 148EXPORT_SYMBOL(drm_mm_reserve_node);
189 149
190struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
191 unsigned long size,
192 unsigned alignment,
193 unsigned long color,
194 int atomic)
195{
196 struct drm_mm_node *node;
197
198 node = drm_mm_kmalloc(hole_node->mm, atomic);
199 if (unlikely(node == NULL))
200 return NULL;
201
202 drm_mm_insert_helper(hole_node, node, size, alignment, color);
203
204 return node;
205}
206EXPORT_SYMBOL(drm_mm_get_block_generic);
207
208/** 150/**
209 * Search for free space and insert a preallocated memory node. Returns 151 * Search for free space and insert a preallocated memory node. Returns
210 * -ENOSPC if no suitable free area is available. The preallocated memory node 152 * -ENOSPC if no suitable free area is available. The preallocated memory node
@@ -212,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
212 */ 154 */
213int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 155int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
214 unsigned long size, unsigned alignment, 156 unsigned long size, unsigned alignment,
215 unsigned long color) 157 unsigned long color,
158 enum drm_mm_search_flags flags)
216{ 159{
217 struct drm_mm_node *hole_node; 160 struct drm_mm_node *hole_node;
218 161
219 hole_node = drm_mm_search_free_generic(mm, size, alignment, 162 hole_node = drm_mm_search_free_generic(mm, size, alignment,
220 color, 0); 163 color, flags);
221 if (!hole_node) 164 if (!hole_node)
222 return -ENOSPC; 165 return -ENOSPC;
223 166
@@ -226,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
226} 169}
227EXPORT_SYMBOL(drm_mm_insert_node_generic); 170EXPORT_SYMBOL(drm_mm_insert_node_generic);
228 171
229int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
230 unsigned long size, unsigned alignment)
231{
232 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
233}
234EXPORT_SYMBOL(drm_mm_insert_node);
235
236static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 172static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
237 struct drm_mm_node *node, 173 struct drm_mm_node *node,
238 unsigned long size, unsigned alignment, 174 unsigned long size, unsigned alignment,
@@ -285,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
285 } 221 }
286} 222}
287 223
288struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
289 unsigned long size,
290 unsigned alignment,
291 unsigned long color,
292 unsigned long start,
293 unsigned long end,
294 int atomic)
295{
296 struct drm_mm_node *node;
297
298 node = drm_mm_kmalloc(hole_node->mm, atomic);
299 if (unlikely(node == NULL))
300 return NULL;
301
302 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
303 start, end);
304
305 return node;
306}
307EXPORT_SYMBOL(drm_mm_get_block_range_generic);
308
309/** 224/**
310 * Search for free space and insert a preallocated memory node. Returns 225 * Search for free space and insert a preallocated memory node. Returns
311 * -ENOSPC if no suitable free area is available. This is for range 226 * -ENOSPC if no suitable free area is available. This is for range
@@ -313,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
313 */ 228 */
314int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 229int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
315 unsigned long size, unsigned alignment, unsigned long color, 230 unsigned long size, unsigned alignment, unsigned long color,
316 unsigned long start, unsigned long end) 231 unsigned long start, unsigned long end,
232 enum drm_mm_search_flags flags)
317{ 233{
318 struct drm_mm_node *hole_node; 234 struct drm_mm_node *hole_node;
319 235
320 hole_node = drm_mm_search_free_in_range_generic(mm, 236 hole_node = drm_mm_search_free_in_range_generic(mm,
321 size, alignment, color, 237 size, alignment, color,
322 start, end, 0); 238 start, end, flags);
323 if (!hole_node) 239 if (!hole_node)
324 return -ENOSPC; 240 return -ENOSPC;
325 241
@@ -330,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
330} 246}
331EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 247EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
332 248
333int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
334 unsigned long size, unsigned alignment,
335 unsigned long start, unsigned long end)
336{
337 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
338}
339EXPORT_SYMBOL(drm_mm_insert_node_in_range);
340
341/** 249/**
342 * Remove a memory node from the allocator. 250 * Remove a memory node from the allocator.
343 */ 251 */
@@ -375,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
375} 283}
376EXPORT_SYMBOL(drm_mm_remove_node); 284EXPORT_SYMBOL(drm_mm_remove_node);
377 285
378/*
379 * Remove a memory node from the allocator and free the allocated struct
380 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
381 * drm_mm_get_block functions.
382 */
383void drm_mm_put_block(struct drm_mm_node *node)
384{
385
386 struct drm_mm *mm = node->mm;
387
388 drm_mm_remove_node(node);
389
390 spin_lock(&mm->unused_lock);
391 if (mm->num_unused < MM_UNUSED_TARGET) {
392 list_add(&node->node_list, &mm->unused_nodes);
393 ++mm->num_unused;
394 } else
395 kfree(node);
396 spin_unlock(&mm->unused_lock);
397}
398EXPORT_SYMBOL(drm_mm_put_block);
399
400static int check_free_hole(unsigned long start, unsigned long end, 286static int check_free_hole(unsigned long start, unsigned long end,
401 unsigned long size, unsigned alignment) 287 unsigned long size, unsigned alignment)
402{ 288{
@@ -412,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
412 return end >= start + size; 298 return end >= start + size;
413} 299}
414 300
415struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 301static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
416 unsigned long size, 302 unsigned long size,
417 unsigned alignment, 303 unsigned alignment,
418 unsigned long color, 304 unsigned long color,
419 bool best_match) 305 enum drm_mm_search_flags flags)
420{ 306{
421 struct drm_mm_node *entry; 307 struct drm_mm_node *entry;
422 struct drm_mm_node *best; 308 struct drm_mm_node *best;
@@ -439,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
439 if (!check_free_hole(adj_start, adj_end, size, alignment)) 325 if (!check_free_hole(adj_start, adj_end, size, alignment))
440 continue; 326 continue;
441 327
442 if (!best_match) 328 if (!(flags & DRM_MM_SEARCH_BEST))
443 return entry; 329 return entry;
444 330
445 if (entry->size < best_size) { 331 if (entry->size < best_size) {
@@ -450,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
450 336
451 return best; 337 return best;
452} 338}
453EXPORT_SYMBOL(drm_mm_search_free_generic);
454 339
455struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 340static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
456 unsigned long size, 341 unsigned long size,
457 unsigned alignment, 342 unsigned alignment,
458 unsigned long color, 343 unsigned long color,
459 unsigned long start, 344 unsigned long start,
460 unsigned long end, 345 unsigned long end,
461 bool best_match) 346 enum drm_mm_search_flags flags)
462{ 347{
463 struct drm_mm_node *entry; 348 struct drm_mm_node *entry;
464 struct drm_mm_node *best; 349 struct drm_mm_node *best;
@@ -486,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
486 if (!check_free_hole(adj_start, adj_end, size, alignment)) 371 if (!check_free_hole(adj_start, adj_end, size, alignment))
487 continue; 372 continue;
488 373
489 if (!best_match) 374 if (!(flags & DRM_MM_SEARCH_BEST))
490 return entry; 375 return entry;
491 376
492 if (entry->size < best_size) { 377 if (entry->size < best_size) {
@@ -497,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
497 382
498 return best; 383 return best;
499} 384}
500EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
501 385
502/** 386/**
503 * Moves an allocation. To be used with embedded struct drm_mm_node. 387 * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -632,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
632 * corrupted. 516 * corrupted.
633 * 517 *
634 * When the scan list is empty, the selected memory nodes can be freed. An 518 * When the scan list is empty, the selected memory nodes can be freed. An
635 * immediately following drm_mm_search_free with best_match = 0 will then return 519 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
636 * the just freed block (because its at the top of the free_stack list). 520 * return the just freed block (because its at the top of the free_stack list).
637 * 521 *
638 * Returns one if this block should be evicted, zero otherwise. Will always 522 * Returns one if this block should be evicted, zero otherwise. Will always
639 * return zero when no hole has been found. 523 * return zero when no hole has been found.
@@ -670,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
670void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 554void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
671{ 555{
672 INIT_LIST_HEAD(&mm->hole_stack); 556 INIT_LIST_HEAD(&mm->hole_stack);
673 INIT_LIST_HEAD(&mm->unused_nodes);
674 mm->num_unused = 0;
675 mm->scanned_blocks = 0; 557 mm->scanned_blocks = 0;
676 spin_lock_init(&mm->unused_lock);
677 558
678 /* Clever trick to avoid a special case in the free hole tracking. */ 559 /* Clever trick to avoid a special case in the free hole tracking. */
679 INIT_LIST_HEAD(&mm->head_node.node_list); 560 INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -693,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
693 574
694void drm_mm_takedown(struct drm_mm * mm) 575void drm_mm_takedown(struct drm_mm * mm)
695{ 576{
696 struct drm_mm_node *entry, *next; 577 WARN(!list_empty(&mm->head_node.node_list),
697 578 "Memory manager not clean during takedown.\n");
698 if (WARN(!list_empty(&mm->head_node.node_list),
699 "Memory manager not clean. Delaying takedown\n")) {
700 return;
701 }
702
703 spin_lock(&mm->unused_lock);
704 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
705 list_del(&entry->node_list);
706 kfree(entry);
707 --mm->num_unused;
708 }
709 spin_unlock(&mm->unused_lock);
710
711 BUG_ON(mm->num_unused != 0);
712} 579}
713EXPORT_SYMBOL(drm_mm_takedown); 580EXPORT_SYMBOL(drm_mm_takedown);
714 581
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a6729bfe6860..fc2adb62b757 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -596,27 +596,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
596EXPORT_SYMBOL(drm_mode_set_name); 596EXPORT_SYMBOL(drm_mode_set_name);
597 597
598/** 598/**
599 * drm_mode_list_concat - move modes from one list to another
600 * @head: source list
601 * @new: dst list
602 *
603 * LOCKING:
604 * Caller must ensure both lists are locked.
605 *
606 * Move all the modes from @head to @new.
607 */
608void drm_mode_list_concat(struct list_head *head, struct list_head *new)
609{
610
611 struct list_head *entry, *tmp;
612
613 list_for_each_safe(entry, tmp, head) {
614 list_move_tail(entry, new);
615 }
616}
617EXPORT_SYMBOL(drm_mode_list_concat);
618
619/**
620 * drm_mode_width - get the width of a mode 599 * drm_mode_width - get the width of a mode
621 * @mode: mode 600 * @mode: mode
622 * 601 *
@@ -923,43 +902,6 @@ void drm_mode_validate_size(struct drm_device *dev,
923EXPORT_SYMBOL(drm_mode_validate_size); 902EXPORT_SYMBOL(drm_mode_validate_size);
924 903
925/** 904/**
926 * drm_mode_validate_clocks - validate modes against clock limits
927 * @dev: DRM device
928 * @mode_list: list of modes to check
929 * @min: minimum clock rate array
930 * @max: maximum clock rate array
931 * @n_ranges: number of clock ranges (size of arrays)
932 *
933 * LOCKING:
934 * Caller must hold a lock protecting @mode_list.
935 *
936 * Some code may need to check a mode list against the clock limits of the
937 * device in question. This function walks the mode list, testing to make
938 * sure each mode falls within a given range (defined by @min and @max
939 * arrays) and sets @mode->status as needed.
940 */
941void drm_mode_validate_clocks(struct drm_device *dev,
942 struct list_head *mode_list,
943 int *min, int *max, int n_ranges)
944{
945 struct drm_display_mode *mode;
946 int i;
947
948 list_for_each_entry(mode, mode_list, head) {
949 bool good = false;
950 for (i = 0; i < n_ranges; i++) {
951 if (mode->clock >= min[i] && mode->clock <= max[i]) {
952 good = true;
953 break;
954 }
955 }
956 if (!good)
957 mode->status = MODE_CLOCK_RANGE;
958 }
959}
960EXPORT_SYMBOL(drm_mode_validate_clocks);
961
962/**
963 * drm_mode_prune_invalid - remove invalid modes from mode list 905 * drm_mode_prune_invalid - remove invalid modes from mode list
964 * @dev: DRM device 906 * @dev: DRM device
965 * @mode_list: list of modes to check 907 * @mode_list: list of modes to check
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b29801..1f96cee6eee8 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
53{ 53{
54 drm_dma_handle_t *dmah; 54 drm_dma_handle_t *dmah;
55#if 1
56 unsigned long addr; 55 unsigned long addr;
57 size_t sz; 56 size_t sz;
58#endif
59 57
60 /* pci_alloc_consistent only guarantees alignment to the smallest 58 /* pci_alloc_consistent only guarantees alignment to the smallest
61 * PAGE_SIZE order which is greater than or equal to the requested size. 59 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
97 */ 95 */
98void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 96void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
99{ 97{
100#if 1
101 unsigned long addr; 98 unsigned long addr;
102 size_t sz; 99 size_t sz;
103#endif
104 100
105 if (dmah->vaddr) { 101 if (dmah->vaddr) {
106 /* XXX - Is virt_to_page() legal for consistent mem? */ 102 /* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
276 DRM_ERROR("Cannot initialize the agpgart module.\n"); 272 DRM_ERROR("Cannot initialize the agpgart module.\n");
277 return -EINVAL; 273 return -EINVAL;
278 } 274 }
279 if (drm_core_has_MTRR(dev)) { 275 if (dev->agp) {
280 if (dev->agp) 276 dev->agp->agp_mtrr = arch_phys_wc_add(
281 dev->agp->agp_mtrr = arch_phys_wc_add( 277 dev->agp->agp_info.aper_base,
282 dev->agp->agp_info.aper_base, 278 dev->agp->agp_info.aper_size *
283 dev->agp->agp_info.aper_size * 279 1024 * 1024);
284 1024 * 1024);
285 } 280 }
286 } 281 }
287 return 0; 282 return 0;
288} 283}
289 284
285static void drm_pci_agp_destroy(struct drm_device *dev)
286{
287 if (drm_core_has_AGP(dev) && dev->agp) {
288 arch_phys_wc_del(dev->agp->agp_mtrr);
289 drm_agp_clear(dev);
290 drm_agp_destroy(dev->agp);
291 dev->agp = NULL;
292 }
293}
294
290static struct drm_bus drm_pci_bus = { 295static struct drm_bus drm_pci_bus = {
291 .bus_type = DRIVER_BUS_PCI, 296 .bus_type = DRIVER_BUS_PCI,
292 .get_irq = drm_pci_get_irq, 297 .get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
295 .set_unique = drm_pci_set_unique, 300 .set_unique = drm_pci_set_unique,
296 .irq_by_busid = drm_pci_irq_by_busid, 301 .irq_by_busid = drm_pci_irq_by_busid,
297 .agp_init = drm_pci_agp_init, 302 .agp_init = drm_pci_agp_init,
303 .agp_destroy = drm_pci_agp_destroy,
298}; 304};
299 305
300/** 306/**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
348 goto err_g2; 354 goto err_g2;
349 } 355 }
350 356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
351 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
352 goto err_g3; 364 goto err_g3;
353 365
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
377err_g4: 389err_g4:
378 drm_put_minor(&dev->primary); 390 drm_put_minor(&dev->primary);
379err_g3: 391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
380 if (drm_core_check_feature(dev, DRIVER_MODESET)) 395 if (drm_core_check_feature(dev, DRIVER_MODESET))
381 drm_put_minor(&dev->control); 396 drm_put_minor(&dev->control);
382err_g2: 397err_g2:
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index b8a282ea8751..f7a18c6ba4c4 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -28,7 +28,7 @@
28#include <linux/export.h> 28#include <linux/export.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30 30
31/** 31/*
32 * Register. 32 * Register.
33 * 33 *
34 * \param platdev - Platform device struture 34 * \param platdev - Platform device struture
@@ -39,8 +39,8 @@
39 * Try and register, if we fail to register, backout previous work. 39 * Try and register, if we fail to register, backout previous work.
40 */ 40 */
41 41
42int drm_get_platform_dev(struct platform_device *platdev, 42static int drm_get_platform_dev(struct platform_device *platdev,
43 struct drm_driver *driver) 43 struct drm_driver *driver)
44{ 44{
45 struct drm_device *dev; 45 struct drm_device *dev;
46 int ret; 46 int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
69 goto err_g1; 69 goto err_g1;
70 } 70 }
71 71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
72 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
73 if (ret) 79 if (ret)
74 goto err_g2; 80 goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
100err_g3: 106err_g3:
101 drm_put_minor(&dev->primary); 107 drm_put_minor(&dev->primary);
102err_g2: 108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
103 if (drm_core_check_feature(dev, DRIVER_MODESET)) 112 if (drm_core_check_feature(dev, DRIVER_MODESET))
104 drm_put_minor(&dev->control); 113 drm_put_minor(&dev->control);
105err_g1: 114err_g1:
@@ -107,7 +116,6 @@ err_g1:
107 mutex_unlock(&drm_global_mutex); 116 mutex_unlock(&drm_global_mutex);
108 return ret; 117 return ret;
109} 118}
110EXPORT_SYMBOL(drm_get_platform_dev);
111 119
112static int drm_platform_get_irq(struct drm_device *dev) 120static int drm_platform_get_irq(struct drm_device *dev)
113{ 121{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e3241c..276d470f7b3e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
83 return 0; 83 return 0;
84} 84}
85 85
86static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
87 uint32_t handle)
88{
89 struct drm_prime_member *member;
90
91 list_for_each_entry(member, &prime_fpriv->head, entry) {
92 if (member->handle == handle)
93 return member->dma_buf;
94 }
95
96 return NULL;
97}
98
99static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
100 struct dma_buf *dma_buf,
101 uint32_t *handle)
102{
103 struct drm_prime_member *member;
104
105 list_for_each_entry(member, &prime_fpriv->head, entry) {
106 if (member->dma_buf == dma_buf) {
107 *handle = member->handle;
108 return 0;
109 }
110 }
111 return -ENOENT;
112}
113
86static int drm_gem_map_attach(struct dma_buf *dma_buf, 114static int drm_gem_map_attach(struct dma_buf *dma_buf,
87 struct device *target_dev, 115 struct device *target_dev,
88 struct dma_buf_attachment *attach) 116 struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
131 attach->priv = NULL; 159 attach->priv = NULL;
132} 160}
133 161
134static void drm_prime_remove_buf_handle_locked( 162void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
135 struct drm_prime_file_private *prime_fpriv, 163 struct dma_buf *dma_buf)
136 struct dma_buf *dma_buf)
137{ 164{
138 struct drm_prime_member *member, *safe; 165 struct drm_prime_member *member, *safe;
139 166
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
167 if (WARN_ON(prime_attach->dir != DMA_NONE)) 194 if (WARN_ON(prime_attach->dir != DMA_NONE))
168 return ERR_PTR(-EBUSY); 195 return ERR_PTR(-EBUSY);
169 196
170 mutex_lock(&obj->dev->struct_mutex);
171
172 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 197 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
173 198
174 if (!IS_ERR(sgt)) { 199 if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
182 } 207 }
183 } 208 }
184 209
185 mutex_unlock(&obj->dev->struct_mutex);
186 return sgt; 210 return sgt;
187} 211}
188 212
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
192 /* nothing to be done here */ 216 /* nothing to be done here */
193} 217}
194 218
195static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 219void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
196{ 220{
197 struct drm_gem_object *obj = dma_buf->priv; 221 struct drm_gem_object *obj = dma_buf->priv;
198 222
199 if (obj->export_dma_buf == dma_buf) { 223 /* drop the reference on the export fd holds */
200 /* drop the reference on the export fd holds */ 224 drm_gem_object_unreference_unlocked(obj);
201 obj->export_dma_buf = NULL;
202 drm_gem_object_unreference_unlocked(obj);
203 }
204} 225}
226EXPORT_SYMBOL(drm_gem_dmabuf_release);
205 227
206static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 228static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
207{ 229{
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
300} 322}
301EXPORT_SYMBOL(drm_gem_prime_export); 323EXPORT_SYMBOL(drm_gem_prime_export);
302 324
325static struct dma_buf *export_and_register_object(struct drm_device *dev,
326 struct drm_gem_object *obj,
327 uint32_t flags)
328{
329 struct dma_buf *dmabuf;
330
331 /* prevent races with concurrent gem_close. */
332 if (obj->handle_count == 0) {
333 dmabuf = ERR_PTR(-ENOENT);
334 return dmabuf;
335 }
336
337 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
338 if (IS_ERR(dmabuf)) {
339 /* normally the created dma-buf takes ownership of the ref,
340 * but if that fails then drop the ref
341 */
342 return dmabuf;
343 }
344
345 /*
346 * Note that callers do not need to clean up the export cache
347 * since the check for obj->handle_count guarantees that someone
348 * will clean it up.
349 */
350 obj->dma_buf = dmabuf;
351 get_dma_buf(obj->dma_buf);
352 /* Grab a new ref since the callers is now used by the dma-buf */
353 drm_gem_object_reference(obj);
354
355 return dmabuf;
356}
357
303int drm_gem_prime_handle_to_fd(struct drm_device *dev, 358int drm_gem_prime_handle_to_fd(struct drm_device *dev,
304 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 359 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
305 int *prime_fd) 360 int *prime_fd)
306{ 361{
307 struct drm_gem_object *obj; 362 struct drm_gem_object *obj;
308 void *buf;
309 int ret = 0; 363 int ret = 0;
310 struct dma_buf *dmabuf; 364 struct dma_buf *dmabuf;
311 365
366 mutex_lock(&file_priv->prime.lock);
312 obj = drm_gem_object_lookup(dev, file_priv, handle); 367 obj = drm_gem_object_lookup(dev, file_priv, handle);
313 if (!obj) 368 if (!obj) {
314 return -ENOENT; 369 ret = -ENOENT;
370 goto out_unlock;
371 }
315 372
316 mutex_lock(&file_priv->prime.lock); 373 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
374 if (dmabuf) {
375 get_dma_buf(dmabuf);
376 goto out_have_handle;
377 }
378
379 mutex_lock(&dev->object_name_lock);
317 /* re-export the original imported object */ 380 /* re-export the original imported object */
318 if (obj->import_attach) { 381 if (obj->import_attach) {
319 dmabuf = obj->import_attach->dmabuf; 382 dmabuf = obj->import_attach->dmabuf;
383 get_dma_buf(dmabuf);
320 goto out_have_obj; 384 goto out_have_obj;
321 } 385 }
322 386
323 if (obj->export_dma_buf) { 387 if (obj->dma_buf) {
324 dmabuf = obj->export_dma_buf; 388 get_dma_buf(obj->dma_buf);
389 dmabuf = obj->dma_buf;
325 goto out_have_obj; 390 goto out_have_obj;
326 } 391 }
327 392
328 buf = dev->driver->gem_prime_export(dev, obj, flags); 393 dmabuf = export_and_register_object(dev, obj, flags);
329 if (IS_ERR(buf)) { 394 if (IS_ERR(dmabuf)) {
330 /* normally the created dma-buf takes ownership of the ref, 395 /* normally the created dma-buf takes ownership of the ref,
331 * but if that fails then drop the ref 396 * but if that fails then drop the ref
332 */ 397 */
333 ret = PTR_ERR(buf); 398 ret = PTR_ERR(dmabuf);
399 mutex_unlock(&dev->object_name_lock);
334 goto out; 400 goto out;
335 } 401 }
336 obj->export_dma_buf = buf;
337 402
338 /* if we've exported this buffer the cheat and add it to the import list 403out_have_obj:
339 * so we get the correct handle back 404 /*
405 * If we've exported this buffer then cheat and add it to the import list
406 * so we get the correct handle back. We must do this under the
407 * protection of dev->object_name_lock to ensure that a racing gem close
408 * ioctl doesn't miss to remove this buffer handle from the cache.
340 */ 409 */
341 ret = drm_prime_add_buf_handle(&file_priv->prime, 410 ret = drm_prime_add_buf_handle(&file_priv->prime,
342 obj->export_dma_buf, handle); 411 dmabuf, handle);
412 mutex_unlock(&dev->object_name_lock);
343 if (ret) 413 if (ret)
344 goto fail_put_dmabuf; 414 goto fail_put_dmabuf;
345 415
346 ret = dma_buf_fd(buf, flags); 416out_have_handle:
347 if (ret < 0)
348 goto fail_rm_handle;
349
350 *prime_fd = ret;
351 mutex_unlock(&file_priv->prime.lock);
352 return 0;
353
354out_have_obj:
355 get_dma_buf(dmabuf);
356 ret = dma_buf_fd(dmabuf, flags); 417 ret = dma_buf_fd(dmabuf, flags);
418 /*
419 * We must _not_ remove the buffer from the handle cache since the newly
420 * created dma buf is already linked in the global obj->dma_buf pointer,
421 * and that is invariant as long as a userspace gem handle exists.
422 * Closing the handle will clean out the cache anyway, so we don't leak.
423 */
357 if (ret < 0) { 424 if (ret < 0) {
358 dma_buf_put(dmabuf); 425 goto fail_put_dmabuf;
359 } else { 426 } else {
360 *prime_fd = ret; 427 *prime_fd = ret;
361 ret = 0; 428 ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
363 430
364 goto out; 431 goto out;
365 432
366fail_rm_handle:
367 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
368fail_put_dmabuf: 433fail_put_dmabuf:
369 /* clear NOT to be checked when releasing dma_buf */ 434 dma_buf_put(dmabuf);
370 obj->export_dma_buf = NULL;
371 dma_buf_put(buf);
372out: 435out:
373 drm_gem_object_unreference_unlocked(obj); 436 drm_gem_object_unreference_unlocked(obj);
437out_unlock:
374 mutex_unlock(&file_priv->prime.lock); 438 mutex_unlock(&file_priv->prime.lock);
439
375 return ret; 440 return ret;
376} 441}
377EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 442EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
446 511
447 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 512 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
448 dma_buf, handle); 513 dma_buf, handle);
449 if (!ret) { 514 if (ret == 0)
450 ret = 0;
451 goto out_put; 515 goto out_put;
452 }
453 516
454 /* never seen this one, need to import */ 517 /* never seen this one, need to import */
518 mutex_lock(&dev->object_name_lock);
455 obj = dev->driver->gem_prime_import(dev, dma_buf); 519 obj = dev->driver->gem_prime_import(dev, dma_buf);
456 if (IS_ERR(obj)) { 520 if (IS_ERR(obj)) {
457 ret = PTR_ERR(obj); 521 ret = PTR_ERR(obj);
458 goto out_put; 522 goto out_unlock;
459 } 523 }
460 524
461 ret = drm_gem_handle_create(file_priv, obj, handle); 525 if (obj->dma_buf) {
526 WARN_ON(obj->dma_buf != dma_buf);
527 } else {
528 obj->dma_buf = dma_buf;
529 get_dma_buf(dma_buf);
530 }
531
532 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
533 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
462 drm_gem_object_unreference_unlocked(obj); 534 drm_gem_object_unreference_unlocked(obj);
463 if (ret) 535 if (ret)
464 goto out_put; 536 goto out_put;
@@ -478,7 +550,9 @@ fail:
478 /* hmm, if driver attached, we are relying on the free-object path 550 /* hmm, if driver attached, we are relying on the free-object path
479 * to detach.. which seems ok.. 551 * to detach.. which seems ok..
480 */ 552 */
481 drm_gem_object_handle_unreference_unlocked(obj); 553 drm_gem_handle_delete(file_priv, *handle);
554out_unlock:
555 mutex_unlock(&dev->object_name_lock);
482out_put: 556out_put:
483 dma_buf_put(dma_buf); 557 dma_buf_put(dma_buf);
484 mutex_unlock(&file_priv->prime.lock); 558 mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
618 WARN_ON(!list_empty(&prime_fpriv->head)); 692 WARN_ON(!list_empty(&prime_fpriv->head));
619} 693}
620EXPORT_SYMBOL(drm_prime_destroy_file_private); 694EXPORT_SYMBOL(drm_prime_destroy_file_private);
621
622int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
623{
624 struct drm_prime_member *member;
625
626 list_for_each_entry(member, &prime_fpriv->head, entry) {
627 if (member->dma_buf == dma_buf) {
628 *handle = member->handle;
629 return 0;
630 }
631 }
632 return -ENOENT;
633}
634EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
635
636void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
637{
638 mutex_lock(&prime_fpriv->lock);
639 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
640 mutex_unlock(&prime_fpriv->lock);
641}
642EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d7f2324b4fb1..000000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/**
2 * \file drm_proc.c
3 * /proc support for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * \par Acknowledgements:
9 * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
10 * the problem with the proc files not outputting all their information.
11 */
12
13/*
14 * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
15 *
16 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
17 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
35 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
37 * OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <linux/seq_file.h>
41#include <linux/slab.h>
42#include <linux/export.h>
43#include <drm/drmP.h>
44
45/***************************************************
46 * Initialization, etc.
47 **************************************************/
48
49/**
50 * Proc file list.
51 */
52static const struct drm_info_list drm_proc_list[] = {
53 {"name", drm_name_info, 0},
54 {"vm", drm_vm_info, 0},
55 {"clients", drm_clients_info, 0},
56 {"bufs", drm_bufs_info, 0},
57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
58#if DRM_DEBUG_CODE
59 {"vma", drm_vma_info, 0},
60#endif
61};
62#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
63
64static int drm_proc_open(struct inode *inode, struct file *file)
65{
66 struct drm_info_node* node = PDE_DATA(inode);
67
68 return single_open(file, node->info_ent->show, node);
69}
70
71static const struct file_operations drm_proc_fops = {
72 .owner = THIS_MODULE,
73 .open = drm_proc_open,
74 .read = seq_read,
75 .llseek = seq_lseek,
76 .release = single_release,
77};
78
79
80/**
81 * Initialize a given set of proc files for a device
82 *
83 * \param files The array of files to create
84 * \param count The number of files given
85 * \param root DRI proc dir entry.
86 * \param minor device minor number
87 * \return Zero on success, non-zero on failure
88 *
89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory.
91 */
92static int drm_proc_create_files(const struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor)
94{
95 struct drm_device *dev = minor->dev;
96 struct proc_dir_entry *ent;
97 struct drm_info_node *tmp;
98 int i;
99
100 for (i = 0; i < count; i++) {
101 u32 features = files[i].driver_features;
102
103 if (features != 0 &&
104 (dev->driver->driver_features & features) != features)
105 continue;
106
107 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
108 if (!tmp)
109 return -1;
110
111 tmp->minor = minor;
112 tmp->info_ent = &files[i];
113 list_add(&tmp->list, &minor->proc_nodes.list);
114
115 ent = proc_create_data(files[i].name, S_IRUGO, root,
116 &drm_proc_fops, tmp);
117 if (!ent) {
118 DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
119 minor->index, files[i].name);
120 list_del(&tmp->list);
121 kfree(tmp);
122 return -1;
123 }
124 }
125 return 0;
126}
127
128/**
129 * Initialize the DRI proc filesystem for a device
130 *
131 * \param dev DRM device
132 * \param root DRI proc dir entry.
133 * \param dev_root resulting DRI device proc dir entry.
134 * \return root entry pointer on success, or NULL on failure.
135 *
136 * Create the DRI proc root entry "/proc/dri", the device proc root entry
137 * "/proc/dri/%minor%/", and each entry in proc_list as
138 * "/proc/dri/%minor%/%name%".
139 */
140int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
141{
142 char name[12];
143 int ret;
144
145 INIT_LIST_HEAD(&minor->proc_nodes.list);
146 sprintf(name, "%u", minor->index);
147 minor->proc_root = proc_mkdir(name, root);
148 if (!minor->proc_root) {
149 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
150 return -1;
151 }
152
153 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
154 minor->proc_root, minor);
155 if (ret) {
156 remove_proc_subtree(name, root);
157 minor->proc_root = NULL;
158 DRM_ERROR("Failed to create core drm proc files\n");
159 return ret;
160 }
161
162 return 0;
163}
164
165static int drm_proc_remove_files(const struct drm_info_list *files, int count,
166 struct drm_minor *minor)
167{
168 struct list_head *pos, *q;
169 struct drm_info_node *tmp;
170 int i;
171
172 for (i = 0; i < count; i++) {
173 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
174 tmp = list_entry(pos, struct drm_info_node, list);
175 if (tmp->info_ent == &files[i]) {
176 remove_proc_entry(files[i].name,
177 minor->proc_root);
178 list_del(pos);
179 kfree(tmp);
180 }
181 }
182 }
183 return 0;
184}
185
186/**
187 * Cleanup the proc filesystem resources.
188 *
189 * \param minor device minor number.
190 * \param root DRI proc dir entry.
191 * \param dev_root DRI device proc dir entry.
192 * \return always zero.
193 *
194 * Remove all proc entries created by proc_init().
195 */
196int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
197{
198 char name[64];
199
200 if (!root || !minor->proc_root)
201 return 0;
202
203 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
204
205 sprintf(name, "%d", minor->index);
206 remove_proc_subtree(name, root);
207 return 0;
208}
209
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc330..1c78406f6e71 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
46#endif 46#endif
47} 47}
48 48
49void drm_sg_cleanup(struct drm_sg_mem * entry) 49static void drm_sg_cleanup(struct drm_sg_mem * entry)
50{ 50{
51 struct page *page; 51 struct page *page;
52 int i; 52 int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
64 kfree(entry); 64 kfree(entry);
65} 65}
66 66
67void drm_legacy_sg_cleanup(struct drm_device *dev)
68{
69 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
70 !drm_core_check_feature(dev, DRIVER_MODESET)) {
71 drm_sg_cleanup(dev->sg);
72 dev->sg = NULL;
73 }
74}
67#ifdef _LP64 75#ifdef _LP64
68# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) 76# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
69#else 77#else
70# define ScatterHandle(x) (unsigned int)(x) 78# define ScatterHandle(x) (unsigned int)(x)
71#endif 79#endif
72 80
73int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) 81int drm_sg_alloc(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
74{ 83{
84 struct drm_scatter_gather *request = data;
75 struct drm_sg_mem *entry; 85 struct drm_sg_mem *entry;
76 unsigned long pages, i, j; 86 unsigned long pages, i, j;
77 87
78 DRM_DEBUG("\n"); 88 DRM_DEBUG("\n");
79 89
90 if (drm_core_check_feature(dev, DRIVER_MODESET))
91 return -EINVAL;
92
80 if (!drm_core_check_feature(dev, DRIVER_SG)) 93 if (!drm_core_check_feature(dev, DRIVER_SG))
81 return -EINVAL; 94 return -EINVAL;
82 95
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
181 return -ENOMEM; 194 return -ENOMEM;
182} 195}
183 196
184int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
185 struct drm_file *file_priv)
186{
187 struct drm_scatter_gather *request = data;
188
189 return drm_sg_alloc(dev, request);
190
191}
192
193int drm_sg_free(struct drm_device *dev, void *data, 197int drm_sg_free(struct drm_device *dev, void *data,
194 struct drm_file *file_priv) 198 struct drm_file *file_priv)
195{ 199{
196 struct drm_scatter_gather *request = data; 200 struct drm_scatter_gather *request = data;
197 struct drm_sg_mem *entry; 201 struct drm_sg_mem *entry;
198 202
203 if (drm_core_check_feature(dev, DRIVER_MODESET))
204 return -EINVAL;
205
199 if (!drm_core_check_feature(dev, DRIVER_SG)) 206 if (!drm_core_check_feature(dev, DRIVER_SG))
200 return -EINVAL; 207 return -EINVAL;
201 208
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda85..e7eb0276f7f1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,6 +40,9 @@
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
42 42
43unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
44EXPORT_SYMBOL(drm_rnodes);
45
43unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 46unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
44EXPORT_SYMBOL(drm_vblank_offdelay); 47EXPORT_SYMBOL(drm_vblank_offdelay);
45 48
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
56MODULE_DESCRIPTION(CORE_DESC); 59MODULE_DESCRIPTION(CORE_DESC);
57MODULE_LICENSE("GPL and additional rights"); 60MODULE_LICENSE("GPL and additional rights");
58MODULE_PARM_DESC(debug, "Enable debug output"); 61MODULE_PARM_DESC(debug, "Enable debug output");
62MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
59MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 63MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
60MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 64MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
61MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 65MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
62 66
63module_param_named(debug, drm_debug, int, 0600); 67module_param_named(debug, drm_debug, int, 0600);
68module_param_named(rnodes, drm_rnodes, int, 0600);
64module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 69module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
65module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 70module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
66module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 71module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
68struct idr drm_minors_idr; 73struct idr drm_minors_idr;
69 74
70struct class *drm_class; 75struct class *drm_class;
71struct proc_dir_entry *drm_proc_root;
72struct dentry *drm_debugfs_root; 76struct dentry *drm_debugfs_root;
73 77
74int drm_err(const char *func, const char *format, ...) 78int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
113 int base = 0, limit = 63; 117 int base = 0, limit = 63;
114 118
115 if (type == DRM_MINOR_CONTROL) { 119 if (type == DRM_MINOR_CONTROL) {
116 base += 64; 120 base += 64;
117 limit = base + 127; 121 limit = base + 63;
118 } else if (type == DRM_MINOR_RENDER) { 122 } else if (type == DRM_MINOR_RENDER) {
119 base += 128; 123 base += 128;
120 limit = base + 255; 124 limit = base + 63;
121 } 125 }
122 126
123 mutex_lock(&dev->struct_mutex); 127 mutex_lock(&dev->struct_mutex);
124 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); 128 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
288 goto error_out_unreg; 292 goto error_out_unreg;
289 } 293 }
290 294
291 295 drm_legacy_ctxbitmap_init(dev);
292
293 retcode = drm_ctxbitmap_init(dev);
294 if (retcode) {
295 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
296 goto error_out_unreg;
297 }
298 296
299 if (driver->driver_features & DRIVER_GEM) { 297 if (driver->driver_features & DRIVER_GEM) {
300 retcode = drm_gem_init(dev); 298 retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
321 * \param sec-minor structure to hold the assigned minor 319 * \param sec-minor structure to hold the assigned minor
322 * \return negative number on failure. 320 * \return negative number on failure.
323 * 321 *
324 * Search an empty entry and initialize it to the given parameters, and 322 * Search an empty entry and initialize it to the given parameters. This
325 * create the proc init entry via proc_init(). This routines assigns 323 * routines assigns minor numbers to secondary heads of multi-headed cards
326 * minor numbers to secondary heads of multi-headed cards
327 */ 324 */
328int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) 325int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
329{ 326{
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
351 348
352 idr_replace(&drm_minors_idr, new_minor, minor_id); 349 idr_replace(&drm_minors_idr, new_minor, minor_id);
353 350
354 if (type == DRM_MINOR_LEGACY) {
355 ret = drm_proc_init(new_minor, drm_proc_root);
356 if (ret) {
357 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
358 goto err_mem;
359 }
360 } else
361 new_minor->proc_root = NULL;
362
363#if defined(CONFIG_DEBUG_FS) 351#if defined(CONFIG_DEBUG_FS)
364 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); 352 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
365 if (ret) { 353 if (ret) {
366 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 354 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
367 goto err_g2; 355 goto err_mem;
368 } 356 }
369#endif 357#endif
370 358
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
372 if (ret) { 360 if (ret) {
373 printk(KERN_ERR 361 printk(KERN_ERR
374 "DRM: Error sysfs_device_add.\n"); 362 "DRM: Error sysfs_device_add.\n");
375 goto err_g2; 363 goto err_debugfs;
376 } 364 }
377 *minor = new_minor; 365 *minor = new_minor;
378 366
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
380 return 0; 368 return 0;
381 369
382 370
383err_g2: 371err_debugfs:
384 if (new_minor->type == DRM_MINOR_LEGACY) 372#if defined(CONFIG_DEBUG_FS)
385 drm_proc_cleanup(new_minor, drm_proc_root); 373 drm_debugfs_cleanup(new_minor);
386err_mem: 374err_mem:
375#endif
387 kfree(new_minor); 376 kfree(new_minor);
388err_idr: 377err_idr:
389 idr_remove(&drm_minors_idr, minor_id); 378 idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
397 * 386 *
398 * \param sec_minor - structure to be released 387 * \param sec_minor - structure to be released
399 * \return always zero 388 * \return always zero
400 *
401 * Cleans up the proc resources. Not legal for this to be the
402 * last minor released.
403 *
404 */ 389 */
405int drm_put_minor(struct drm_minor **minor_p) 390int drm_put_minor(struct drm_minor **minor_p)
406{ 391{
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
408 393
409 DRM_DEBUG("release secondary minor %d\n", minor->index); 394 DRM_DEBUG("release secondary minor %d\n", minor->index);
410 395
411 if (minor->type == DRM_MINOR_LEGACY)
412 drm_proc_cleanup(minor, drm_proc_root);
413#if defined(CONFIG_DEBUG_FS) 396#if defined(CONFIG_DEBUG_FS)
414 drm_debugfs_cleanup(minor); 397 drm_debugfs_cleanup(minor);
415#endif 398#endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
451 434
452 drm_lastclose(dev); 435 drm_lastclose(dev);
453 436
454 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
455 arch_phys_wc_del(dev->agp->agp_mtrr);
456
457 if (dev->driver->unload) 437 if (dev->driver->unload)
458 dev->driver->unload(dev); 438 dev->driver->unload(dev);
459 439
460 if (drm_core_has_AGP(dev) && dev->agp) { 440 if (dev->driver->bus->agp_destroy)
461 kfree(dev->agp); 441 dev->driver->bus->agp_destroy(dev);
462 dev->agp = NULL;
463 }
464 442
465 drm_vblank_cleanup(dev); 443 drm_vblank_cleanup(dev);
466 444
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
468 drm_rmmap(dev, r_list->map); 446 drm_rmmap(dev, r_list->map);
469 drm_ht_remove(&dev->map_hash); 447 drm_ht_remove(&dev->map_hash);
470 448
471 drm_ctxbitmap_cleanup(dev); 449 drm_legacy_ctxbitmap_cleanup(dev);
472 450
473 if (drm_core_check_feature(dev, DRIVER_MODESET)) 451 if (drm_core_check_feature(dev, DRIVER_MODESET))
474 drm_put_minor(&dev->control); 452 drm_put_minor(&dev->control);
475 453
454 if (dev->render)
455 drm_put_minor(&dev->render);
456
476 if (driver->driver_features & DRIVER_GEM) 457 if (driver->driver_features & DRIVER_GEM)
477 drm_gem_destroy(dev); 458 drm_gem_destroy(dev);
478 459
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
489 /* for a USB device */ 470 /* for a USB device */
490 if (drm_core_check_feature(dev, DRIVER_MODESET)) 471 if (drm_core_check_feature(dev, DRIVER_MODESET))
491 drm_unplug_minor(dev->control); 472 drm_unplug_minor(dev->control);
473 if (dev->render)
474 drm_unplug_minor(dev->render);
492 drm_unplug_minor(dev->primary); 475 drm_unplug_minor(dev->primary);
493 476
494 mutex_lock(&drm_global_mutex); 477 mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 34a156f0c336..87664723b9ce 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
33 if (ret) 33 if (ret)
34 goto err_g1; 34 goto err_g1;
35 35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41
36 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
37 if (ret) 43 if (ret)
38 goto err_g2; 44 goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
62err_g3: 68err_g3:
63 drm_put_minor(&dev->primary); 69 drm_put_minor(&dev->primary);
64err_g2: 70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
65 drm_put_minor(&dev->control); 74 drm_put_minor(&dev->control);
66err_g1: 75err_g1:
67 kfree(dev); 76 kfree(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index feb20035b2c4..b5c5af7328df 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
251 switch (map->type) { 251 switch (map->type) {
252 case _DRM_REGISTERS: 252 case _DRM_REGISTERS:
253 case _DRM_FRAME_BUFFER: 253 case _DRM_FRAME_BUFFER:
254 if (drm_core_has_MTRR(dev)) 254 arch_phys_wc_del(map->mtrr);
255 arch_phys_wc_del(map->mtrr);
256 iounmap(map->handle); 255 iounmap(map->handle);
257 break; 256 break;
258 case _DRM_SHM: 257 case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 000000000000..63b471205072
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/drm_mm.h>
27#include <drm/drm_vma_manager.h>
28#include <linux/fs.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/types.h>
35
36/**
37 * DOC: vma offset manager
38 *
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
42 * takes care to not overlap regions, size them appropriately and to not
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
44 * Drivers shouldn't use this for object placement in VMEM. This manager should
45 * only be used to manage mappings into linear user-space VMs.
46 *
47 * We use drm_mm as backend to manage object allocations. But it is highly
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49 * speed up offset lookups.
50 *
51 * You must not use multiple offset managers on a single address_space.
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53 * no longer be linear. Please use VM_NONLINEAR in that case and implement your
54 * own offset managers.
55 *
56 * This offset manager works on page-based addresses. That is, every argument
57 * and return code (with the exception of drm_vma_node_offset_addr()) is given
58 * in number of pages, not number of bytes. That means, object sizes and offsets
59 * must always be page-aligned (as usual).
60 * If you want to get a valid byte-based user-space address for a given offset,
61 * please see drm_vma_node_offset_addr().
62 *
63 * Additionally to offset management, the vma offset manager also handles access
64 * management. For every open-file context that is allowed to access a given
65 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
66 * open-file with the offset of the node will fail with -EACCES. To revoke
67 * access again, use drm_vma_node_revoke(). However, the caller is responsible
68 * for destroying already existing mappings, if required.
69 */
70
71/**
72 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @mgr: Manager object
74 * @page_offset: Offset of available memory area (page-based)
75 * @size: Size of available address space range (page-based)
76 *
77 * Initialize a new offset-manager. The offset and area size available for the
78 * manager are given as @page_offset and @size. Both are interpreted as
79 * page-numbers, not bytes.
80 *
81 * Adding/removing nodes from the manager is locked internally and protected
82 * against concurrent access. However, node allocation and destruction is left
83 * for the caller. While calling into the vma-manager, a given node must
84 * always be guaranteed to be referenced.
85 */
86void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
87 unsigned long page_offset, unsigned long size)
88{
89 rwlock_init(&mgr->vm_lock);
90 mgr->vm_addr_space_rb = RB_ROOT;
91 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
92}
93EXPORT_SYMBOL(drm_vma_offset_manager_init);
94
95/**
96 * drm_vma_offset_manager_destroy() - Destroy offset manager
97 * @mgr: Manager object
98 *
99 * Destroy an object manager which was previously created via
100 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
101 * before destroying the manager. Otherwise, drm_mm will refuse to free the
102 * requested resources.
103 *
104 * The manager must not be accessed after this function is called.
105 */
106void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
107{
108 /* take the lock to protect against buggy drivers */
109 write_lock(&mgr->vm_lock);
110 drm_mm_takedown(&mgr->vm_addr_space_mm);
111 write_unlock(&mgr->vm_lock);
112}
113EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114
115/**
116 * drm_vma_offset_lookup() - Find node in offset space
117 * @mgr: Manager object
118 * @start: Start address for object (page-based)
119 * @pages: Size of object (page-based)
120 *
121 * Find a node given a start address and object size. This returns the _best_
122 * match for the given node. That is, @start may point somewhere into a valid
123 * region and the given node will be returned, as long as the node spans the
124 * whole requested area (given the size in number of pages as @pages).
125 *
126 * RETURNS:
127 * Returns NULL if no suitable node can be found. Otherwise, the best match
128 * is returned. It's the caller's responsibility to make sure the node doesn't
129 * get destroyed before the caller can access it.
130 */
131struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
132 unsigned long start,
133 unsigned long pages)
134{
135 struct drm_vma_offset_node *node;
136
137 read_lock(&mgr->vm_lock);
138 node = drm_vma_offset_lookup_locked(mgr, start, pages);
139 read_unlock(&mgr->vm_lock);
140
141 return node;
142}
143EXPORT_SYMBOL(drm_vma_offset_lookup);
144
145/**
146 * drm_vma_offset_lookup_locked() - Find node in offset space
147 * @mgr: Manager object
148 * @start: Start address for object (page-based)
149 * @pages: Size of object (page-based)
150 *
151 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
152 * manually. See drm_vma_offset_lock_lookup() for an example.
153 *
154 * RETURNS:
155 * Returns NULL if no suitable node can be found. Otherwise, the best match
156 * is returned.
157 */
158struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
159 unsigned long start,
160 unsigned long pages)
161{
162 struct drm_vma_offset_node *node, *best;
163 struct rb_node *iter;
164 unsigned long offset;
165
166 iter = mgr->vm_addr_space_rb.rb_node;
167 best = NULL;
168
169 while (likely(iter)) {
170 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
171 offset = node->vm_node.start;
172 if (start >= offset) {
173 iter = iter->rb_right;
174 best = node;
175 if (start == offset)
176 break;
177 } else {
178 iter = iter->rb_left;
179 }
180 }
181
182 /* verify that the node spans the requested area */
183 if (best) {
184 offset = best->vm_node.start + best->vm_node.size;
185 if (offset < start + pages)
186 best = NULL;
187 }
188
189 return best;
190}
191EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
192
193/* internal helper to link @node into the rb-tree */
194static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
195 struct drm_vma_offset_node *node)
196{
197 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
198 struct rb_node *parent = NULL;
199 struct drm_vma_offset_node *iter_node;
200
201 while (likely(*iter)) {
202 parent = *iter;
203 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
204
205 if (node->vm_node.start < iter_node->vm_node.start)
206 iter = &(*iter)->rb_left;
207 else if (node->vm_node.start > iter_node->vm_node.start)
208 iter = &(*iter)->rb_right;
209 else
210 BUG();
211 }
212
213 rb_link_node(&node->vm_rb, parent, iter);
214 rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
215}
216
217/**
218 * drm_vma_offset_add() - Add offset node to manager
219 * @mgr: Manager object
220 * @node: Node to be added
221 * @pages: Allocation size visible to user-space (in number of pages)
222 *
223 * Add a node to the offset-manager. If the node was already added, this does
224 * nothing and return 0. @pages is the size of the object given in number of
225 * pages.
226 * After this call succeeds, you can access the offset of the node until it
227 * is removed again.
228 *
229 * If this call fails, it is safe to retry the operation or call
230 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
231 * case.
232 *
233 * @pages is not required to be the same size as the underlying memory object
234 * that you want to map. It only limits the size that user-space can map into
235 * their address space.
236 *
237 * RETURNS:
238 * 0 on success, negative error code on failure.
239 */
240int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
241 struct drm_vma_offset_node *node, unsigned long pages)
242{
243 int ret;
244
245 write_lock(&mgr->vm_lock);
246
247 if (drm_mm_node_allocated(&node->vm_node)) {
248 ret = 0;
249 goto out_unlock;
250 }
251
252 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
253 pages, 0, DRM_MM_SEARCH_DEFAULT);
254 if (ret)
255 goto out_unlock;
256
257 _drm_vma_offset_add_rb(mgr, node);
258
259out_unlock:
260 write_unlock(&mgr->vm_lock);
261 return ret;
262}
263EXPORT_SYMBOL(drm_vma_offset_add);
264
265/**
266 * drm_vma_offset_remove() - Remove offset node from manager
267 * @mgr: Manager object
268 * @node: Node to be removed
269 *
270 * Remove a node from the offset manager. If the node wasn't added before, this
271 * does nothing. After this call returns, the offset and size will be 0 until a
272 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
273 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
274 * offset is allocated.
275 */
276void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
277 struct drm_vma_offset_node *node)
278{
279 write_lock(&mgr->vm_lock);
280
281 if (drm_mm_node_allocated(&node->vm_node)) {
282 rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
283 drm_mm_remove_node(&node->vm_node);
284 memset(&node->vm_node, 0, sizeof(node->vm_node));
285 }
286
287 write_unlock(&mgr->vm_lock);
288}
289EXPORT_SYMBOL(drm_vma_offset_remove);
290
291/**
292 * drm_vma_node_allow - Add open-file to list of allowed users
293 * @node: Node to modify
294 * @filp: Open file to add
295 *
296 * Add @filp to the list of allowed open-files for this node. If @filp is
297 * already on this list, the ref-count is incremented.
298 *
299 * The list of allowed-users is preserved across drm_vma_offset_add() and
300 * drm_vma_offset_remove() calls. You may even call it if the node is currently
301 * not added to any offset-manager.
302 *
303 * You must remove all open-files the same number of times as you added them
304 * before destroying the node. Otherwise, you will leak memory.
305 *
306 * This is locked against concurrent access internally.
307 *
308 * RETURNS:
309 * 0 on success, negative error code on internal failure (out-of-mem)
310 */
311int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
312{
313 struct rb_node **iter;
314 struct rb_node *parent = NULL;
315 struct drm_vma_offset_file *new, *entry;
316 int ret = 0;
317
318 /* Preallocate entry to avoid atomic allocations below. It is quite
319 * unlikely that an open-file is added twice to a single node so we
320 * don't optimize for this case. OOM is checked below only if the entry
321 * is actually used. */
322 new = kmalloc(sizeof(*entry), GFP_KERNEL);
323
324 write_lock(&node->vm_lock);
325
326 iter = &node->vm_files.rb_node;
327
328 while (likely(*iter)) {
329 parent = *iter;
330 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
331
332 if (filp == entry->vm_filp) {
333 entry->vm_count++;
334 goto unlock;
335 } else if (filp > entry->vm_filp) {
336 iter = &(*iter)->rb_right;
337 } else {
338 iter = &(*iter)->rb_left;
339 }
340 }
341
342 if (!new) {
343 ret = -ENOMEM;
344 goto unlock;
345 }
346
347 new->vm_filp = filp;
348 new->vm_count = 1;
349 rb_link_node(&new->vm_rb, parent, iter);
350 rb_insert_color(&new->vm_rb, &node->vm_files);
351 new = NULL;
352
353unlock:
354 write_unlock(&node->vm_lock);
355 kfree(new);
356 return ret;
357}
358EXPORT_SYMBOL(drm_vma_node_allow);
359
360/**
361 * drm_vma_node_revoke - Remove open-file from list of allowed users
362 * @node: Node to modify
363 * @filp: Open file to remove
364 *
365 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
366 * If the ref-count drops to zero, remove @filp from the list. You must call
367 * this once for every drm_vma_node_allow() on @filp.
368 *
369 * This is locked against concurrent access internally.
370 *
371 * If @filp is not on the list, nothing is done.
372 */
373void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
374{
375 struct drm_vma_offset_file *entry;
376 struct rb_node *iter;
377
378 write_lock(&node->vm_lock);
379
380 iter = node->vm_files.rb_node;
381 while (likely(iter)) {
382 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
383 if (filp == entry->vm_filp) {
384 if (!--entry->vm_count) {
385 rb_erase(&entry->vm_rb, &node->vm_files);
386 kfree(entry);
387 }
388 break;
389 } else if (filp > entry->vm_filp) {
390 iter = iter->rb_right;
391 } else {
392 iter = iter->rb_left;
393 }
394 }
395
396 write_unlock(&node->vm_lock);
397}
398EXPORT_SYMBOL(drm_vma_node_revoke);
399
400/**
401 * drm_vma_node_is_allowed - Check whether an open-file is granted access
402 * @node: Node to check
403 * @filp: Open-file to check for
404 *
405 * Search the list in @node whether @filp is currently on the list of allowed
406 * open-files (see drm_vma_node_allow()).
407 *
408 * This is locked against concurrent access internally.
409 *
410 * RETURNS:
411 * true iff @filp is on the list
412 */
413bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
414 struct file *filp)
415{
416 struct drm_vma_offset_file *entry;
417 struct rb_node *iter;
418
419 read_lock(&node->vm_lock);
420
421 iter = node->vm_files.rb_node;
422 while (likely(iter)) {
423 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
424 if (filp == entry->vm_filp)
425 break;
426 else if (filp > entry->vm_filp)
427 iter = iter->rb_right;
428 else
429 iter = iter->rb_left;
430 }
431
432 read_unlock(&node->vm_lock);
433
434 return iter;
435}
436EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9a35d171a6d3..14f5c1d34028 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -184,8 +184,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
184}; 184};
185 185
186static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, 186static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
187 struct drm_framebuffer *fb, 187 struct drm_framebuffer *fb,
188 struct drm_pending_vblank_event *event) 188 struct drm_pending_vblank_event *event,
189 uint32_t page_flip_flags)
189{ 190{
190 struct drm_device *dev = crtc->dev; 191 struct drm_device *dev = crtc->dev;
191 struct exynos_drm_private *dev_priv = dev->dev_private; 192 struct exynos_drm_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e0cbdf..fd76449cf452 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -22,6 +22,11 @@ struct exynos_drm_dmabuf_attachment {
22 bool is_mapped; 22 bool is_mapped;
23}; 23};
24 24
25static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
26{
27 return to_exynos_gem_obj(buf->priv);
28}
29
25static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, 30static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
26 struct device *dev, 31 struct device *dev,
27 struct dma_buf_attachment *attach) 32 struct dma_buf_attachment *attach)
@@ -63,7 +68,7 @@ static struct sg_table *
63 enum dma_data_direction dir) 68 enum dma_data_direction dir)
64{ 69{
65 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; 70 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 71 struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
67 struct drm_device *dev = gem_obj->base.dev; 72 struct drm_device *dev = gem_obj->base.dev;
68 struct exynos_drm_gem_buf *buf; 73 struct exynos_drm_gem_buf *buf;
69 struct scatterlist *rd, *wr; 74 struct scatterlist *rd, *wr;
@@ -127,27 +132,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
127 /* Nothing to do. */ 132 /* Nothing to do. */
128} 133}
129 134
130static void exynos_dmabuf_release(struct dma_buf *dmabuf)
131{
132 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
133
134 /*
135 * exynos_dmabuf_release() call means that file object's
136 * f_count is 0 and it calls drm_gem_object_handle_unreference()
137 * to drop the references that these values had been increased
138 * at drm_prime_handle_to_fd()
139 */
140 if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
141 exynos_gem_obj->base.export_dma_buf = NULL;
142
143 /*
144 * drop this gem object refcount to release allocated buffer
145 * and resources.
146 */
147 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
148 }
149}
150
151static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 135static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
152 unsigned long page_num) 136 unsigned long page_num)
153{ 137{
@@ -193,7 +177,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
193 .kunmap = exynos_gem_dmabuf_kunmap, 177 .kunmap = exynos_gem_dmabuf_kunmap,
194 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, 178 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
195 .mmap = exynos_gem_dmabuf_mmap, 179 .mmap = exynos_gem_dmabuf_mmap,
196 .release = exynos_dmabuf_release, 180 .release = drm_gem_dmabuf_release,
197}; 181};
198 182
199struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, 183struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +185,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
201{ 185{
202 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 186 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
203 187
204 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, 188 return dma_buf_export(obj, &exynos_dmabuf_ops,
205 exynos_gem_obj->base.size, flags); 189 exynos_gem_obj->base.size, flags);
206} 190}
207 191
@@ -219,8 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
219 if (dma_buf->ops == &exynos_dmabuf_ops) { 203 if (dma_buf->ops == &exynos_dmabuf_ops) {
220 struct drm_gem_object *obj; 204 struct drm_gem_object *obj;
221 205
222 exynos_gem_obj = dma_buf->priv; 206 obj = dma_buf->priv;
223 obj = &exynos_gem_obj->base;
224 207
225 /* is it from our device? */ 208 /* is it from our device? */
226 if (obj->dev == drm_dev) { 209 if (obj->dev == drm_dev) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a85129..df81d3c959b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
213 .close = drm_gem_vm_close, 213 .close = drm_gem_vm_close,
214}; 214};
215 215
216static struct drm_ioctl_desc exynos_ioctls[] = { 216static const struct drm_ioctl_desc exynos_ioctls[] = {
217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 217 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
218 DRM_UNLOCKED | DRM_AUTH), 218 DRM_UNLOCKED | DRM_AUTH),
219 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, 219 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +271,13 @@ static struct drm_driver exynos_drm_driver = {
271 .gem_vm_ops = &exynos_drm_gem_vm_ops, 271 .gem_vm_ops = &exynos_drm_gem_vm_ops,
272 .dumb_create = exynos_drm_gem_dumb_create, 272 .dumb_create = exynos_drm_gem_dumb_create,
273 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 273 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
274 .dumb_destroy = exynos_drm_gem_dumb_destroy, 274 .dumb_destroy = drm_gem_dumb_destroy,
275 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 275 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
276 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 276 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
277 .gem_prime_export = exynos_dmabuf_prime_export, 277 .gem_prime_export = exynos_dmabuf_prime_export,
278 .gem_prime_import = exynos_dmabuf_prime_import, 278 .gem_prime_import = exynos_dmabuf_prime_import,
279 .ioctls = exynos_ioctls, 279 .ioctls = exynos_ioctls,
280 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
280 .fops = &exynos_drm_driver_fops, 281 .fops = &exynos_drm_driver_fops,
281 .name = DRIVER_NAME, 282 .name = DRIVER_NAME,
282 .desc = DRIVER_DESC, 283 .desc = DRIVER_DESC,
@@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = {
288static int exynos_drm_platform_probe(struct platform_device *pdev) 289static int exynos_drm_platform_probe(struct platform_device *pdev)
289{ 290{
290 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 291 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
291 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
292 292
293 return drm_platform_init(&exynos_drm_driver, pdev); 293 return drm_platform_init(&exynos_drm_driver, pdev);
294} 294}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c3364..f3c6f40666e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_vma_manager.h>
13 14
14#include <linux/shmem_fs.h> 15#include <linux/shmem_fs.h>
15#include <drm/exynos_drm.h> 16#include <drm/exynos_drm.h>
@@ -135,7 +136,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
135 obj = &exynos_gem_obj->base; 136 obj = &exynos_gem_obj->base;
136 buf = exynos_gem_obj->buffer; 137 buf = exynos_gem_obj->buffer;
137 138
138 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 139 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
139 140
140 /* 141 /*
141 * do not release memory region from exporter. 142 * do not release memory region from exporter.
@@ -152,8 +153,7 @@ out:
152 exynos_drm_fini_buf(obj->dev, buf); 153 exynos_drm_fini_buf(obj->dev, buf);
153 exynos_gem_obj->buffer = NULL; 154 exynos_gem_obj->buffer = NULL;
154 155
155 if (obj->map_list.map) 156 drm_gem_free_mmap_offset(obj);
156 drm_gem_free_mmap_offset(obj);
157 157
158 /* release file pointer to gem object. */ 158 /* release file pointer to gem object. */
159 drm_gem_object_release(obj); 159 drm_gem_object_release(obj);
@@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
703 goto unlock; 703 goto unlock;
704 } 704 }
705 705
706 if (!obj->map_list.map) { 706 ret = drm_gem_create_mmap_offset(obj);
707 ret = drm_gem_create_mmap_offset(obj); 707 if (ret)
708 if (ret) 708 goto out;
709 goto out;
710 }
711 709
712 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 710 *offset = drm_vma_node_offset_addr(&obj->vma_node);
713 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 711 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
714 712
715out: 713out:
@@ -719,26 +717,6 @@ unlock:
719 return ret; 717 return ret;
720} 718}
721 719
722int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
723 struct drm_device *dev,
724 unsigned int handle)
725{
726 int ret;
727
728 /*
729 * obj->refcount and obj->handle_count are decreased and
730 * if both them are 0 then exynos_drm_gem_free_object()
731 * would be called by callback to release resources.
732 */
733 ret = drm_gem_handle_delete(file_priv, handle);
734 if (ret < 0) {
735 DRM_ERROR("failed to delete drm_gem_handle.\n");
736 return ret;
737 }
738
739 return 0;
740}
741
742int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 720int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
743{ 721{
744 struct drm_gem_object *obj = vma->vm_private_data; 722 struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee450..09555afdfe9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
151 struct drm_device *dev, uint32_t handle, 151 struct drm_device *dev, uint32_t handle,
152 uint64_t *offset); 152 uint64_t *offset);
153 153
154/*
155 * destroy memory region allocated.
156 * - a gem handle and physical memory region pointed by a gem object
157 * would be released by drm_gem_handle_delete().
158 */
159int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
160 struct drm_device *dev,
161 unsigned int handle);
162
163/* page fault handler and mmap fault address(virtual) to physical memory. */ 154/* page fault handler and mmap fault address(virtual) to physical memory. */
164int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 155int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
165 156
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 7a2d40a5c1e1..e9064dd9045d 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -15,6 +15,7 @@ gma500_gfx-y += \
15 mmu.o \ 15 mmu.o \
16 power.o \ 16 power.o \
17 psb_drv.o \ 17 psb_drv.o \
18 gma_display.o \
18 psb_intel_display.o \ 19 psb_intel_display.o \
19 psb_intel_lvds.o \ 20 psb_intel_lvds.o \
20 psb_intel_modes.o \ 21 psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991f..162f686c532d 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
641 641
642 .crtc_helper = &cdv_intel_helper_funcs, 642 .crtc_helper = &cdv_intel_helper_funcs,
643 .crtc_funcs = &cdv_intel_crtc_funcs, 643 .crtc_funcs = &cdv_intel_crtc_funcs,
644 .clock_funcs = &cdv_clock_funcs,
644 645
645 .output_init = cdv_output_init, 646 .output_init = cdv_output_init,
646 .hotplug = cdv_hotplug_event, 647 .hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
655 .restore_regs = cdv_restore_display_registers, 656 .restore_regs = cdv_restore_display_registers,
656 .power_down = cdv_power_down, 657 .power_down = cdv_power_down,
657 .power_up = cdv_power_up, 658 .power_up = cdv_power_up,
659 .update_wm = cdv_update_wm,
660 .disable_sr = cdv_disable_sr,
658}; 661};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 9561e17621b3..705c11d47d45 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -17,6 +17,7 @@
17 17
18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs; 18extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs; 19extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
20extern const struct gma_clock_funcs cdv_clock_funcs;
20extern void cdv_intel_crt_init(struct drm_device *dev, 21extern void cdv_intel_crt_init(struct drm_device *dev,
21 struct psb_intel_mode_device *mode_dev); 22 struct psb_intel_mode_device *mode_dev);
22extern void cdv_intel_lvds_init(struct drm_device *dev, 23extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
25 int reg); 26 int reg);
26extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 27extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
27 struct drm_crtc *crtc); 28 struct drm_crtc *crtc);
28 29extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
29static inline void cdv_intel_wait_for_vblank(struct drm_device *dev) 30extern void cdv_disable_sr(struct drm_device *dev);
30{
31 /* Wait for 20ms, i.e. one cycle at 50hz. */
32 /* FIXME: msleep ?? */
33 mdelay(20);
34}
35
36
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7b8386fc3024..661af492173d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
95 95
96 struct drm_device *dev = encoder->dev; 96 struct drm_device *dev = encoder->dev;
97 struct drm_crtc *crtc = encoder->crtc; 97 struct drm_crtc *crtc = encoder->crtc;
98 struct psb_intel_crtc *psb_intel_crtc = 98 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
99 to_psb_intel_crtc(crtc);
100 int dpll_md_reg; 99 int dpll_md_reg;
101 u32 adpa, dpll_md; 100 u32 adpa, dpll_md;
102 u32 adpa_reg; 101 u32 adpa_reg;
103 102
104 if (psb_intel_crtc->pipe == 0) 103 if (gma_crtc->pipe == 0)
105 dpll_md_reg = DPLL_A_MD; 104 dpll_md_reg = DPLL_A_MD;
106 else 105 else
107 dpll_md_reg = DPLL_B_MD; 106 dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
124 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
125 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
126 125
127 if (psb_intel_crtc->pipe == 0) 126 if (gma_crtc->pipe == 0)
128 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
129 else 128 else
130 adpa |= ADPA_PIPE_B_SELECT; 129 adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
197 196
198static void cdv_intel_crt_destroy(struct drm_connector *connector) 197static void cdv_intel_crt_destroy(struct drm_connector *connector)
199{ 198{
200 struct psb_intel_encoder *psb_intel_encoder = 199 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
201 psb_intel_attached_encoder(connector);
202 200
203 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 201 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
204 drm_sysfs_connector_remove(connector); 202 drm_sysfs_connector_remove(connector);
205 drm_connector_cleanup(connector); 203 drm_connector_cleanup(connector);
206 kfree(connector); 204 kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
208 206
209static int cdv_intel_crt_get_modes(struct drm_connector *connector) 207static int cdv_intel_crt_get_modes(struct drm_connector *connector)
210{ 208{
211 struct psb_intel_encoder *psb_intel_encoder = 209 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
212 psb_intel_attached_encoder(connector); 210 return psb_intel_ddc_get_modes(connector,
213 return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter); 211 &gma_encoder->ddc_bus->adapter);
214} 212}
215 213
216static int cdv_intel_crt_set_property(struct drm_connector *connector, 214static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
227static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { 225static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
228 .dpms = cdv_intel_crt_dpms, 226 .dpms = cdv_intel_crt_dpms,
229 .mode_fixup = cdv_intel_crt_mode_fixup, 227 .mode_fixup = cdv_intel_crt_mode_fixup,
230 .prepare = psb_intel_encoder_prepare, 228 .prepare = gma_encoder_prepare,
231 .commit = psb_intel_encoder_commit, 229 .commit = gma_encoder_commit,
232 .mode_set = cdv_intel_crt_mode_set, 230 .mode_set = cdv_intel_crt_mode_set,
233}; 231};
234 232
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
244 cdv_intel_crt_connector_helper_funcs = { 242 cdv_intel_crt_connector_helper_funcs = {
245 .mode_valid = cdv_intel_crt_mode_valid, 243 .mode_valid = cdv_intel_crt_mode_valid,
246 .get_modes = cdv_intel_crt_get_modes, 244 .get_modes = cdv_intel_crt_get_modes,
247 .best_encoder = psb_intel_best_encoder, 245 .best_encoder = gma_best_encoder,
248}; 246};
249 247
250static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) 248static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
260 struct psb_intel_mode_device *mode_dev) 258 struct psb_intel_mode_device *mode_dev)
261{ 259{
262 260
263 struct psb_intel_connector *psb_intel_connector; 261 struct gma_connector *gma_connector;
264 struct psb_intel_encoder *psb_intel_encoder; 262 struct gma_encoder *gma_encoder;
265 struct drm_connector *connector; 263 struct drm_connector *connector;
266 struct drm_encoder *encoder; 264 struct drm_encoder *encoder;
267 265
268 u32 i2c_reg; 266 u32 i2c_reg;
269 267
270 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 268 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
271 if (!psb_intel_encoder) 269 if (!gma_encoder)
272 return; 270 return;
273 271
274 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 272 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
275 if (!psb_intel_connector) 273 if (!gma_connector)
276 goto failed_connector; 274 goto failed_connector;
277 275
278 connector = &psb_intel_connector->base; 276 connector = &gma_connector->base;
279 connector->polled = DRM_CONNECTOR_POLL_HPD; 277 connector->polled = DRM_CONNECTOR_POLL_HPD;
280 drm_connector_init(dev, connector, 278 drm_connector_init(dev, connector,
281 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 279 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
282 280
283 encoder = &psb_intel_encoder->base; 281 encoder = &gma_encoder->base;
284 drm_encoder_init(dev, encoder, 282 drm_encoder_init(dev, encoder,
285 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); 283 &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
286 284
287 psb_intel_connector_attach_encoder(psb_intel_connector, 285 gma_connector_attach_encoder(gma_connector, gma_encoder);
288 psb_intel_encoder);
289 286
290 /* Set up the DDC bus. */ 287 /* Set up the DDC bus. */
291 i2c_reg = GPIOA; 288 i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
294 if (dev_priv->crt_ddc_bus != 0) 291 if (dev_priv->crt_ddc_bus != 0)
295 i2c_reg = dev_priv->crt_ddc_bus; 292 i2c_reg = dev_priv->crt_ddc_bus;
296 }*/ 293 }*/
297 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 294 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
298 i2c_reg, "CRTDDC_A"); 295 i2c_reg, "CRTDDC_A");
299 if (!psb_intel_encoder->ddc_bus) { 296 if (!gma_encoder->ddc_bus) {
300 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 297 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
301 "failed.\n"); 298 "failed.\n");
302 goto failed_ddc; 299 goto failed_ddc;
303 } 300 }
304 301
305 psb_intel_encoder->type = INTEL_OUTPUT_ANALOG; 302 gma_encoder->type = INTEL_OUTPUT_ANALOG;
306 /* 303 /*
307 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); 304 psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
308 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); 305 psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
318 315
319 return; 316 return;
320failed_ddc: 317failed_ddc:
321 drm_encoder_cleanup(&psb_intel_encoder->base); 318 drm_encoder_cleanup(&gma_encoder->base);
322 drm_connector_cleanup(&psb_intel_connector->base); 319 drm_connector_cleanup(&gma_connector->base);
323 kfree(psb_intel_connector); 320 kfree(gma_connector);
324failed_connector: 321failed_connector:
325 kfree(psb_intel_encoder); 322 kfree(gma_encoder);
326 return; 323 return;
327} 324}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 82430ad8ba62..8fbfa06da62d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -19,54 +19,20 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31#include "cdv_device.h" 30#include "cdv_device.h"
32 31
32static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
33 struct drm_crtc *crtc, int target,
34 int refclk, struct gma_clock_t *best_clock);
33 35
34struct cdv_intel_range_t {
35 int min, max;
36};
37
38struct cdv_intel_p2_t {
39 int dot_limit;
40 int p2_slow, p2_fast;
41};
42
43struct cdv_intel_clock_t {
44 /* given values */
45 int n;
46 int m1, m2;
47 int p1, p2;
48 /* derived values */
49 int dot;
50 int vco;
51 int m;
52 int p;
53};
54
55#define INTEL_P2_NUM 2
56
57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2;
60 bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
61 int, int, struct cdv_intel_clock_t *);
62};
63
64static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
65 struct drm_crtc *crtc, int target, int refclk,
66 struct cdv_intel_clock_t *best_clock);
67static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
68 int refclk,
69 struct cdv_intel_clock_t *best_clock);
70 36
71#define CDV_LIMIT_SINGLE_LVDS_96 0 37#define CDV_LIMIT_SINGLE_LVDS_96 0
72#define CDV_LIMIT_SINGLE_LVDS_100 1 38#define CDV_LIMIT_SINGLE_LVDS_100 1
@@ -75,7 +41,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
75#define CDV_LIMIT_DP_27 4 41#define CDV_LIMIT_DP_27 4
76#define CDV_LIMIT_DP_100 5 42#define CDV_LIMIT_DP_100 5
77 43
78static const struct cdv_intel_limit_t cdv_intel_limits[] = { 44static const struct gma_limit_t cdv_intel_limits[] = {
79 { /* CDV_SINGLE_LVDS_96MHz */ 45 { /* CDV_SINGLE_LVDS_96MHz */
80 .dot = {.min = 20000, .max = 115500}, 46 .dot = {.min = 20000, .max = 115500},
81 .vco = {.min = 1800000, .max = 3600000}, 47 .vco = {.min = 1800000, .max = 3600000},
@@ -85,9 +51,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
85 .m2 = {.min = 58, .max = 158}, 51 .m2 = {.min = 58, .max = 158},
86 .p = {.min = 28, .max = 140}, 52 .p = {.min = 28, .max = 140},
87 .p1 = {.min = 2, .max = 10}, 53 .p1 = {.min = 2, .max = 10},
88 .p2 = {.dot_limit = 200000, 54 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
89 .p2_slow = 14, .p2_fast = 14}, 55 .find_pll = gma_find_best_pll,
90 .find_pll = cdv_intel_find_best_PLL,
91 }, 56 },
92 { /* CDV_SINGLE_LVDS_100MHz */ 57 { /* CDV_SINGLE_LVDS_100MHz */
93 .dot = {.min = 20000, .max = 115500}, 58 .dot = {.min = 20000, .max = 115500},
@@ -102,7 +67,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
102 * is 80-224Mhz. Prefer single channel as much as possible. 67 * is 80-224Mhz. Prefer single channel as much as possible.
103 */ 68 */
104 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, 69 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
105 .find_pll = cdv_intel_find_best_PLL, 70 .find_pll = gma_find_best_pll,
106 }, 71 },
107 { /* CDV_DAC_HDMI_27MHz */ 72 { /* CDV_DAC_HDMI_27MHz */
108 .dot = {.min = 20000, .max = 400000}, 73 .dot = {.min = 20000, .max = 400000},
@@ -114,7 +79,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
114 .p = {.min = 5, .max = 90}, 79 .p = {.min = 5, .max = 90},
115 .p1 = {.min = 1, .max = 9}, 80 .p1 = {.min = 1, .max = 9},
116 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 81 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
117 .find_pll = cdv_intel_find_best_PLL, 82 .find_pll = gma_find_best_pll,
118 }, 83 },
119 { /* CDV_DAC_HDMI_96MHz */ 84 { /* CDV_DAC_HDMI_96MHz */
120 .dot = {.min = 20000, .max = 400000}, 85 .dot = {.min = 20000, .max = 400000},
@@ -126,7 +91,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
126 .p = {.min = 5, .max = 100}, 91 .p = {.min = 5, .max = 100},
127 .p1 = {.min = 1, .max = 10}, 92 .p1 = {.min = 1, .max = 10},
128 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 93 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
129 .find_pll = cdv_intel_find_best_PLL, 94 .find_pll = gma_find_best_pll,
130 }, 95 },
131 { /* CDV_DP_27MHz */ 96 { /* CDV_DP_27MHz */
132 .dot = {.min = 160000, .max = 272000}, 97 .dot = {.min = 160000, .max = 272000},
@@ -255,10 +220,10 @@ void cdv_sb_reset(struct drm_device *dev)
255 */ 220 */
256static int 221static int
257cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, 222cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
258 struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) 223 struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
259{ 224{
260 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); 225 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
261 int pipe = psb_crtc->pipe; 226 int pipe = gma_crtc->pipe;
262 u32 m, n_vco, p; 227 u32 m, n_vco, p;
263 int ret = 0; 228 int ret = 0;
264 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 229 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -405,31 +370,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
405 return 0; 370 return 0;
406} 371}
407 372
408/* 373static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
409 * Returns whether any encoder on the specified pipe is of the specified type 374 int refclk)
410 */
411static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
412{
413 struct drm_device *dev = crtc->dev;
414 struct drm_mode_config *mode_config = &dev->mode_config;
415 struct drm_connector *l_entry;
416
417 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
418 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
419 struct psb_intel_encoder *psb_intel_encoder =
420 psb_intel_attached_encoder(l_entry);
421 if (psb_intel_encoder->type == type)
422 return true;
423 }
424 }
425 return false;
426}
427
428static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
429 int refclk)
430{ 375{
431 const struct cdv_intel_limit_t *limit; 376 const struct gma_limit_t *limit;
432 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 377 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
433 /* 378 /*
434 * Now only single-channel LVDS is supported on CDV. If it is 379 * Now only single-channel LVDS is supported on CDV. If it is
435 * incorrect, please add the dual-channel LVDS. 380 * incorrect, please add the dual-channel LVDS.
@@ -438,8 +383,8 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
438 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; 383 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
439 else 384 else
440 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; 385 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
441 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 386 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
442 psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 387 gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
443 if (refclk == 27000) 388 if (refclk == 27000)
444 limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; 389 limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
445 else 390 else
@@ -454,8 +399,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
454} 399}
455 400
456/* m1 is reserved as 0 in CDV, n is a ring counter */ 401/* m1 is reserved as 0 in CDV, n is a ring counter */
457static void cdv_intel_clock(struct drm_device *dev, 402static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
458 int refclk, struct cdv_intel_clock_t *clock)
459{ 403{
460 clock->m = clock->m2 + 2; 404 clock->m = clock->m2 + 2;
461 clock->p = clock->p1 * clock->p2; 405 clock->p = clock->p1 * clock->p2;
@@ -463,93 +407,12 @@ static void cdv_intel_clock(struct drm_device *dev,
463 clock->dot = clock->vco / clock->p; 407 clock->dot = clock->vco / clock->p;
464} 408}
465 409
466 410static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
467#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 411 struct drm_crtc *crtc, int target,
468static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, 412 int refclk,
469 const struct cdv_intel_limit_t *limit, 413 struct gma_clock_t *best_clock)
470 struct cdv_intel_clock_t *clock)
471{
472 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
473 INTELPllInvalid("p1 out of range\n");
474 if (clock->p < limit->p.min || limit->p.max < clock->p)
475 INTELPllInvalid("p out of range\n");
476 /* unnecessary to check the range of m(m1/M2)/n again */
477 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
478 INTELPllInvalid("vco out of range\n");
479 /* XXX: We may need to be checking "Dot clock"
480 * depending on the multiplier, connector, etc.,
481 * rather than just a single range.
482 */
483 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
484 INTELPllInvalid("dot out of range\n");
485
486 return true;
487}
488
489static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
490 struct drm_crtc *crtc, int target, int refclk,
491 struct cdv_intel_clock_t *best_clock)
492{ 414{
493 struct drm_device *dev = crtc->dev; 415 struct gma_clock_t clock;
494 struct cdv_intel_clock_t clock;
495 int err = target;
496
497
498 if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
499 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
500 /*
501 * For LVDS, if the panel is on, just rely on its current
502 * settings for dual-channel. We haven't figured out how to
503 * reliably set up different single/dual channel state, if we
504 * even can.
505 */
506 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
507 LVDS_CLKB_POWER_UP)
508 clock.p2 = limit->p2.p2_fast;
509 else
510 clock.p2 = limit->p2.p2_slow;
511 } else {
512 if (target < limit->p2.dot_limit)
513 clock.p2 = limit->p2.p2_slow;
514 else
515 clock.p2 = limit->p2.p2_fast;
516 }
517
518 memset(best_clock, 0, sizeof(*best_clock));
519 clock.m1 = 0;
520 /* m1 is reserved as 0 in CDV, n is a ring counter.
521 So skip the m1 loop */
522 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
523 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
524 clock.m2++) {
525 for (clock.p1 = limit->p1.min;
526 clock.p1 <= limit->p1.max;
527 clock.p1++) {
528 int this_err;
529
530 cdv_intel_clock(dev, refclk, &clock);
531
532 if (!cdv_intel_PLL_is_valid(crtc,
533 limit, &clock))
534 continue;
535
536 this_err = abs(clock.dot - target);
537 if (this_err < err) {
538 *best_clock = clock;
539 err = this_err;
540 }
541 }
542 }
543 }
544
545 return err != target;
546}
547
548static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
549 int refclk,
550 struct cdv_intel_clock_t *best_clock)
551{
552 struct cdv_intel_clock_t clock;
553 if (refclk == 27000) { 416 if (refclk == 27000) {
554 if (target < 200000) { 417 if (target < 200000) {
555 clock.p1 = 2; 418 clock.p1 = 2;
@@ -584,85 +447,10 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
584 clock.p = clock.p1 * clock.p2; 447 clock.p = clock.p1 * clock.p2;
585 clock.vco = (refclk * clock.m) / clock.n; 448 clock.vco = (refclk * clock.m) / clock.n;
586 clock.dot = clock.vco / clock.p; 449 clock.dot = clock.vco / clock.p;
587 memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); 450 memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
588 return true; 451 return true;
589} 452}
590 453
591static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
592 int x, int y, struct drm_framebuffer *old_fb)
593{
594 struct drm_device *dev = crtc->dev;
595 struct drm_psb_private *dev_priv = dev->dev_private;
596 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
597 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
598 int pipe = psb_intel_crtc->pipe;
599 const struct psb_offset *map = &dev_priv->regmap[pipe];
600 unsigned long start, offset;
601 u32 dspcntr;
602 int ret = 0;
603
604 if (!gma_power_begin(dev, true))
605 return 0;
606
607 /* no fb bound */
608 if (!crtc->fb) {
609 dev_err(dev->dev, "No FB bound\n");
610 goto psb_intel_pipe_cleaner;
611 }
612
613
614 /* We are displaying this buffer, make sure it is actually loaded
615 into the GTT */
616 ret = psb_gtt_pin(psbfb->gtt);
617 if (ret < 0)
618 goto psb_intel_pipe_set_base_exit;
619 start = psbfb->gtt->offset;
620 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
621
622 REG_WRITE(map->stride, crtc->fb->pitches[0]);
623
624 dspcntr = REG_READ(map->cntr);
625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
626
627 switch (crtc->fb->bits_per_pixel) {
628 case 8:
629 dspcntr |= DISPPLANE_8BPP;
630 break;
631 case 16:
632 if (crtc->fb->depth == 15)
633 dspcntr |= DISPPLANE_15_16BPP;
634 else
635 dspcntr |= DISPPLANE_16BPP;
636 break;
637 case 24:
638 case 32:
639 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
640 break;
641 default:
642 dev_err(dev->dev, "Unknown color depth\n");
643 ret = -EINVAL;
644 goto psb_intel_pipe_set_base_exit;
645 }
646 REG_WRITE(map->cntr, dspcntr);
647
648 dev_dbg(dev->dev,
649 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
650
651 REG_WRITE(map->base, offset);
652 REG_READ(map->base);
653 REG_WRITE(map->surf, start);
654 REG_READ(map->surf);
655
656psb_intel_pipe_cleaner:
657 /* If there was a previous display we can now unpin it */
658 if (old_fb)
659 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
660
661psb_intel_pipe_set_base_exit:
662 gma_power_end(dev);
663 return ret;
664}
665
666#define FIFO_PIPEA (1 << 0) 454#define FIFO_PIPEA (1 << 0)
667#define FIFO_PIPEB (1 << 1) 455#define FIFO_PIPEB (1 << 1)
668 456
@@ -670,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
670{ 458{
671 struct drm_crtc *crtc; 459 struct drm_crtc *crtc;
672 struct drm_psb_private *dev_priv = dev->dev_private; 460 struct drm_psb_private *dev_priv = dev->dev_private;
673 struct psb_intel_crtc *psb_intel_crtc = NULL; 461 struct gma_crtc *gma_crtc = NULL;
674 462
675 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 463 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
676 psb_intel_crtc = to_psb_intel_crtc(crtc); 464 gma_crtc = to_gma_crtc(crtc);
677 465
678 if (crtc->fb == NULL || !psb_intel_crtc->active) 466 if (crtc->fb == NULL || !gma_crtc->active)
679 return false; 467 return false;
680 return true; 468 return true;
681} 469}
@@ -701,29 +489,29 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev)
701 489
702static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc) 490static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
703{ 491{
704 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 492 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
705 struct drm_mode_config *mode_config = &dev->mode_config; 493 struct drm_mode_config *mode_config = &dev->mode_config;
706 struct drm_connector *connector; 494 struct drm_connector *connector;
707 495
708 if (psb_intel_crtc->pipe != 1) 496 if (gma_crtc->pipe != 1)
709 return false; 497 return false;
710 498
711 list_for_each_entry(connector, &mode_config->connector_list, head) { 499 list_for_each_entry(connector, &mode_config->connector_list, head) {
712 struct psb_intel_encoder *psb_intel_encoder = 500 struct gma_encoder *gma_encoder =
713 psb_intel_attached_encoder(connector); 501 gma_attached_encoder(connector);
714 502
715 if (!connector->encoder 503 if (!connector->encoder
716 || connector->encoder->crtc != crtc) 504 || connector->encoder->crtc != crtc)
717 continue; 505 continue;
718 506
719 if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS) 507 if (gma_encoder->type == INTEL_OUTPUT_LVDS)
720 return true; 508 return true;
721 } 509 }
722 510
723 return false; 511 return false;
724} 512}
725 513
726static void cdv_intel_disable_self_refresh (struct drm_device *dev) 514void cdv_disable_sr(struct drm_device *dev)
727{ 515{
728 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { 516 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
729 517
@@ -731,7 +519,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
731 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN)); 519 REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
732 REG_READ(FW_BLC_SELF); 520 REG_READ(FW_BLC_SELF);
733 521
734 cdv_intel_wait_for_vblank(dev); 522 gma_wait_for_vblank(dev);
735 523
736 /* Cedarview workaround to write ovelay plane, which force to leave 524 /* Cedarview workaround to write ovelay plane, which force to leave
737 * MAX_FIFO state. 525 * MAX_FIFO state.
@@ -739,13 +527,14 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
739 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/); 527 REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
740 REG_READ(OV_OVADD); 528 REG_READ(OV_OVADD);
741 529
742 cdv_intel_wait_for_vblank(dev); 530 gma_wait_for_vblank(dev);
743 } 531 }
744 532
745} 533}
746 534
747static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc) 535void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
748{ 536{
537 struct drm_psb_private *dev_priv = dev->dev_private;
749 538
750 if (cdv_intel_single_pipe_active(dev)) { 539 if (cdv_intel_single_pipe_active(dev)) {
751 u32 fw; 540 u32 fw;
@@ -780,12 +569,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
780 569
781 REG_WRITE(DSPFW6, 0x10); 570 REG_WRITE(DSPFW6, 0x10);
782 571
783 cdv_intel_wait_for_vblank(dev); 572 gma_wait_for_vblank(dev);
784 573
785 /* enable self-refresh for single pipe active */ 574 /* enable self-refresh for single pipe active */
786 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 575 REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
787 REG_READ(FW_BLC_SELF); 576 REG_READ(FW_BLC_SELF);
788 cdv_intel_wait_for_vblank(dev); 577 gma_wait_for_vblank(dev);
789 578
790 } else { 579 } else {
791 580
@@ -797,216 +586,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
797 REG_WRITE(DSPFW5, 0x01010101); 586 REG_WRITE(DSPFW5, 0x01010101);
798 REG_WRITE(DSPFW6, 0x1d0); 587 REG_WRITE(DSPFW6, 0x1d0);
799 588
800 cdv_intel_wait_for_vblank(dev); 589 gma_wait_for_vblank(dev);
801
802 cdv_intel_disable_self_refresh(dev);
803
804 }
805}
806
807/** Loads the palette/gamma unit for the CRTC with the prepared values */
808static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
809{
810 struct drm_device *dev = crtc->dev;
811 struct drm_psb_private *dev_priv = dev->dev_private;
812 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
813 int palreg = PALETTE_A;
814 int i;
815
816 /* The clocks have to be on to load the palette. */
817 if (!crtc->enabled)
818 return;
819
820 switch (psb_intel_crtc->pipe) {
821 case 0:
822 break;
823 case 1:
824 palreg = PALETTE_B;
825 break;
826 case 2:
827 palreg = PALETTE_C;
828 break;
829 default:
830 dev_err(dev->dev, "Illegal Pipe Number.\n");
831 return;
832 }
833
834 if (gma_power_begin(dev, false)) {
835 for (i = 0; i < 256; i++) {
836 REG_WRITE(palreg + 4 * i,
837 ((psb_intel_crtc->lut_r[i] +
838 psb_intel_crtc->lut_adj[i]) << 16) |
839 ((psb_intel_crtc->lut_g[i] +
840 psb_intel_crtc->lut_adj[i]) << 8) |
841 (psb_intel_crtc->lut_b[i] +
842 psb_intel_crtc->lut_adj[i]));
843 }
844 gma_power_end(dev);
845 } else {
846 for (i = 0; i < 256; i++) {
847 dev_priv->regs.pipe[0].palette[i] =
848 ((psb_intel_crtc->lut_r[i] +
849 psb_intel_crtc->lut_adj[i]) << 16) |
850 ((psb_intel_crtc->lut_g[i] +
851 psb_intel_crtc->lut_adj[i]) << 8) |
852 (psb_intel_crtc->lut_b[i] +
853 psb_intel_crtc->lut_adj[i]);
854 }
855
856 }
857}
858
859/**
860 * Sets the power management mode of the pipe and plane.
861 *
862 * This code should probably grow support for turning the cursor off and back
863 * on appropriately at the same time as we're turning the pipe off/on.
864 */
865static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
866{
867 struct drm_device *dev = crtc->dev;
868 struct drm_psb_private *dev_priv = dev->dev_private;
869 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
870 int pipe = psb_intel_crtc->pipe;
871 const struct psb_offset *map = &dev_priv->regmap[pipe];
872 u32 temp;
873
874 /* XXX: When our outputs are all unaware of DPMS modes other than off
875 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
876 */
877 cdv_intel_disable_self_refresh(dev);
878
879 switch (mode) {
880 case DRM_MODE_DPMS_ON:
881 case DRM_MODE_DPMS_STANDBY:
882 case DRM_MODE_DPMS_SUSPEND:
883 if (psb_intel_crtc->active)
884 break;
885
886 psb_intel_crtc->active = true;
887
888 /* Enable the DPLL */
889 temp = REG_READ(map->dpll);
890 if ((temp & DPLL_VCO_ENABLE) == 0) {
891 REG_WRITE(map->dpll, temp);
892 REG_READ(map->dpll);
893 /* Wait for the clocks to stabilize. */
894 udelay(150);
895 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
896 REG_READ(map->dpll);
897 /* Wait for the clocks to stabilize. */
898 udelay(150);
899 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
900 REG_READ(map->dpll);
901 /* Wait for the clocks to stabilize. */
902 udelay(150);
903 }
904
905 /* Jim Bish - switch plan and pipe per scott */
906 /* Enable the plane */
907 temp = REG_READ(map->cntr);
908 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
909 REG_WRITE(map->cntr,
910 temp | DISPLAY_PLANE_ENABLE);
911 /* Flush the plane changes */
912 REG_WRITE(map->base, REG_READ(map->base));
913 }
914
915 udelay(150);
916
917 /* Enable the pipe */
918 temp = REG_READ(map->conf);
919 if ((temp & PIPEACONF_ENABLE) == 0)
920 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
921
922 temp = REG_READ(map->status);
923 temp &= ~(0xFFFF);
924 temp |= PIPE_FIFO_UNDERRUN;
925 REG_WRITE(map->status, temp);
926 REG_READ(map->status);
927
928 cdv_intel_crtc_load_lut(crtc);
929
930 /* Give the overlay scaler a chance to enable
931 * if it's on this pipe */
932 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
933 break;
934 case DRM_MODE_DPMS_OFF:
935 if (!psb_intel_crtc->active)
936 break;
937
938 psb_intel_crtc->active = false;
939
940 /* Give the overlay scaler a chance to disable
941 * if it's on this pipe */
942 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
943
944 /* Disable the VGA plane that we never use */
945 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
946
947 /* Jim Bish - changed pipe/plane here as well. */
948
949 drm_vblank_off(dev, pipe);
950 /* Wait for vblank for the disable to take effect */
951 cdv_intel_wait_for_vblank(dev);
952
953 /* Next, disable display pipes */
954 temp = REG_READ(map->conf);
955 if ((temp & PIPEACONF_ENABLE) != 0) {
956 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
957 REG_READ(map->conf);
958 }
959
960 /* Wait for vblank for the disable to take effect. */
961 cdv_intel_wait_for_vblank(dev);
962
963 udelay(150);
964
965 /* Disable display plane */
966 temp = REG_READ(map->cntr);
967 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
968 REG_WRITE(map->cntr,
969 temp & ~DISPLAY_PLANE_ENABLE);
970 /* Flush the plane changes */
971 REG_WRITE(map->base, REG_READ(map->base));
972 REG_READ(map->base);
973 }
974
975 temp = REG_READ(map->dpll);
976 if ((temp & DPLL_VCO_ENABLE) != 0) {
977 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
978 REG_READ(map->dpll);
979 }
980 590
981 /* Wait for the clocks to turn off. */ 591 dev_priv->ops->disable_sr(dev);
982 udelay(150);
983 break;
984 } 592 }
985 cdv_intel_update_watermark(dev, crtc);
986 /*Set FIFO Watermarks*/
987 REG_WRITE(DSPARB, 0x3F3E);
988}
989
990static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
991{
992 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
993 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
994}
995
996static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
997{
998 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
999 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1000}
1001
1002static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
1003 const struct drm_display_mode *mode,
1004 struct drm_display_mode *adjusted_mode)
1005{
1006 return true;
1007} 593}
1008 594
1009
1010/** 595/**
1011 * Return the pipe currently connected to the panel fitter, 596 * Return the pipe currently connected to the panel fitter,
1012 * or -1 if the panel fitter is not present or not in use 597 * or -1 if the panel fitter is not present or not in use
@@ -1031,31 +616,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1031{ 616{
1032 struct drm_device *dev = crtc->dev; 617 struct drm_device *dev = crtc->dev;
1033 struct drm_psb_private *dev_priv = dev->dev_private; 618 struct drm_psb_private *dev_priv = dev->dev_private;
1034 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 619 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1035 int pipe = psb_intel_crtc->pipe; 620 int pipe = gma_crtc->pipe;
1036 const struct psb_offset *map = &dev_priv->regmap[pipe]; 621 const struct psb_offset *map = &dev_priv->regmap[pipe];
1037 int refclk; 622 int refclk;
1038 struct cdv_intel_clock_t clock; 623 struct gma_clock_t clock;
1039 u32 dpll = 0, dspcntr, pipeconf; 624 u32 dpll = 0, dspcntr, pipeconf;
1040 bool ok; 625 bool ok;
1041 bool is_crt = false, is_lvds = false, is_tv = false; 626 bool is_crt = false, is_lvds = false, is_tv = false;
1042 bool is_hdmi = false, is_dp = false; 627 bool is_hdmi = false, is_dp = false;
1043 struct drm_mode_config *mode_config = &dev->mode_config; 628 struct drm_mode_config *mode_config = &dev->mode_config;
1044 struct drm_connector *connector; 629 struct drm_connector *connector;
1045 const struct cdv_intel_limit_t *limit; 630 const struct gma_limit_t *limit;
1046 u32 ddi_select = 0; 631 u32 ddi_select = 0;
1047 bool is_edp = false; 632 bool is_edp = false;
1048 633
1049 list_for_each_entry(connector, &mode_config->connector_list, head) { 634 list_for_each_entry(connector, &mode_config->connector_list, head) {
1050 struct psb_intel_encoder *psb_intel_encoder = 635 struct gma_encoder *gma_encoder =
1051 psb_intel_attached_encoder(connector); 636 gma_attached_encoder(connector);
1052 637
1053 if (!connector->encoder 638 if (!connector->encoder
1054 || connector->encoder->crtc != crtc) 639 || connector->encoder->crtc != crtc)
1055 continue; 640 continue;
1056 641
1057 ddi_select = psb_intel_encoder->ddi_select; 642 ddi_select = gma_encoder->ddi_select;
1058 switch (psb_intel_encoder->type) { 643 switch (gma_encoder->type) {
1059 case INTEL_OUTPUT_LVDS: 644 case INTEL_OUTPUT_LVDS:
1060 is_lvds = true; 645 is_lvds = true;
1061 break; 646 break;
@@ -1108,12 +693,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1108 693
1109 drm_mode_debug_printmodeline(adjusted_mode); 694 drm_mode_debug_printmodeline(adjusted_mode);
1110 695
1111 limit = cdv_intel_limit(crtc, refclk); 696 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
1112 697
1113 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, 698 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
1114 &clock); 699 &clock);
1115 if (!ok) { 700 if (!ok) {
1116 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 701 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
702 adjusted_mode->clock, clock.dot);
1117 return 0; 703 return 0;
1118 } 704 }
1119 705
@@ -1264,7 +850,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1264 REG_WRITE(map->conf, pipeconf); 850 REG_WRITE(map->conf, pipeconf);
1265 REG_READ(map->conf); 851 REG_READ(map->conf);
1266 852
1267 cdv_intel_wait_for_vblank(dev); 853 gma_wait_for_vblank(dev);
1268 854
1269 REG_WRITE(map->cntr, dspcntr); 855 REG_WRITE(map->cntr, dspcntr);
1270 856
@@ -1275,344 +861,16 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1275 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 861 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
1276 } 862 }
1277 863
1278 cdv_intel_wait_for_vblank(dev); 864 gma_wait_for_vblank(dev);
1279
1280 return 0;
1281}
1282
1283
1284/**
1285 * Save HW states of giving crtc
1286 */
1287static void cdv_intel_crtc_save(struct drm_crtc *crtc)
1288{
1289 struct drm_device *dev = crtc->dev;
1290 struct drm_psb_private *dev_priv = dev->dev_private;
1291 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1292 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1293 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1294 uint32_t paletteReg;
1295 int i;
1296
1297 if (!crtc_state) {
1298 dev_dbg(dev->dev, "No CRTC state found\n");
1299 return;
1300 }
1301
1302 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
1303 crtc_state->savePIPECONF = REG_READ(map->conf);
1304 crtc_state->savePIPESRC = REG_READ(map->src);
1305 crtc_state->saveFP0 = REG_READ(map->fp0);
1306 crtc_state->saveFP1 = REG_READ(map->fp1);
1307 crtc_state->saveDPLL = REG_READ(map->dpll);
1308 crtc_state->saveHTOTAL = REG_READ(map->htotal);
1309 crtc_state->saveHBLANK = REG_READ(map->hblank);
1310 crtc_state->saveHSYNC = REG_READ(map->hsync);
1311 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
1312 crtc_state->saveVBLANK = REG_READ(map->vblank);
1313 crtc_state->saveVSYNC = REG_READ(map->vsync);
1314 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
1315
1316 /*NOTE: DSPSIZE DSPPOS only for psb*/
1317 crtc_state->saveDSPSIZE = REG_READ(map->size);
1318 crtc_state->saveDSPPOS = REG_READ(map->pos);
1319
1320 crtc_state->saveDSPBASE = REG_READ(map->base);
1321
1322 DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1323 crtc_state->saveDSPCNTR,
1324 crtc_state->savePIPECONF,
1325 crtc_state->savePIPESRC,
1326 crtc_state->saveFP0,
1327 crtc_state->saveFP1,
1328 crtc_state->saveDPLL,
1329 crtc_state->saveHTOTAL,
1330 crtc_state->saveHBLANK,
1331 crtc_state->saveHSYNC,
1332 crtc_state->saveVTOTAL,
1333 crtc_state->saveVBLANK,
1334 crtc_state->saveVSYNC,
1335 crtc_state->saveDSPSTRIDE,
1336 crtc_state->saveDSPSIZE,
1337 crtc_state->saveDSPPOS,
1338 crtc_state->saveDSPBASE
1339 );
1340
1341 paletteReg = map->palette;
1342 for (i = 0; i < 256; ++i)
1343 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
1344}
1345
1346/**
1347 * Restore HW states of giving crtc
1348 */
1349static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
1350{
1351 struct drm_device *dev = crtc->dev;
1352 struct drm_psb_private *dev_priv = dev->dev_private;
1353 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1354 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
1355 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
1356 uint32_t paletteReg;
1357 int i;
1358
1359 if (!crtc_state) {
1360 dev_dbg(dev->dev, "No crtc state\n");
1361 return;
1362 }
1363
1364 DRM_DEBUG(
1365 "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1366 REG_READ(map->cntr),
1367 REG_READ(map->conf),
1368 REG_READ(map->src),
1369 REG_READ(map->fp0),
1370 REG_READ(map->fp1),
1371 REG_READ(map->dpll),
1372 REG_READ(map->htotal),
1373 REG_READ(map->hblank),
1374 REG_READ(map->hsync),
1375 REG_READ(map->vtotal),
1376 REG_READ(map->vblank),
1377 REG_READ(map->vsync),
1378 REG_READ(map->stride),
1379 REG_READ(map->size),
1380 REG_READ(map->pos),
1381 REG_READ(map->base)
1382 );
1383
1384 DRM_DEBUG(
1385 "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
1386 crtc_state->saveDSPCNTR,
1387 crtc_state->savePIPECONF,
1388 crtc_state->savePIPESRC,
1389 crtc_state->saveFP0,
1390 crtc_state->saveFP1,
1391 crtc_state->saveDPLL,
1392 crtc_state->saveHTOTAL,
1393 crtc_state->saveHBLANK,
1394 crtc_state->saveHSYNC,
1395 crtc_state->saveVTOTAL,
1396 crtc_state->saveVBLANK,
1397 crtc_state->saveVSYNC,
1398 crtc_state->saveDSPSTRIDE,
1399 crtc_state->saveDSPSIZE,
1400 crtc_state->saveDSPPOS,
1401 crtc_state->saveDSPBASE
1402 );
1403
1404
1405 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
1406 REG_WRITE(map->dpll,
1407 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
1408 REG_READ(map->dpll);
1409 DRM_DEBUG("write dpll: %x\n",
1410 REG_READ(map->dpll));
1411 udelay(150);
1412 }
1413
1414 REG_WRITE(map->fp0, crtc_state->saveFP0);
1415 REG_READ(map->fp0);
1416
1417 REG_WRITE(map->fp1, crtc_state->saveFP1);
1418 REG_READ(map->fp1);
1419
1420 REG_WRITE(map->dpll, crtc_state->saveDPLL);
1421 REG_READ(map->dpll);
1422 udelay(150);
1423
1424 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
1425 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
1426 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
1427 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
1428 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
1429 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
1430 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
1431
1432 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
1433 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
1434
1435 REG_WRITE(map->src, crtc_state->savePIPESRC);
1436 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1437 REG_WRITE(map->conf, crtc_state->savePIPECONF);
1438
1439 cdv_intel_wait_for_vblank(dev);
1440
1441 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
1442 REG_WRITE(map->base, crtc_state->saveDSPBASE);
1443
1444 cdv_intel_wait_for_vblank(dev);
1445
1446 paletteReg = map->palette;
1447 for (i = 0; i < 256; ++i)
1448 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
1449}
1450
1451static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1452 struct drm_file *file_priv,
1453 uint32_t handle,
1454 uint32_t width, uint32_t height)
1455{
1456 struct drm_device *dev = crtc->dev;
1457 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1458 int pipe = psb_intel_crtc->pipe;
1459 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1460 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1461 uint32_t temp;
1462 size_t addr = 0;
1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj;
1465 int ret = 0;
1466
1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) {
1469 /* turn off the cursor */
1470 temp = CURSOR_MODE_DISABLE;
1471
1472 if (gma_power_begin(dev, false)) {
1473 REG_WRITE(control, temp);
1474 REG_WRITE(base, 0);
1475 gma_power_end(dev);
1476 }
1477
1478 /* unpin the old GEM object */
1479 if (psb_intel_crtc->cursor_obj) {
1480 gt = container_of(psb_intel_crtc->cursor_obj,
1481 struct gtt_range, gem);
1482 psb_gtt_unpin(gt);
1483 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1484 psb_intel_crtc->cursor_obj = NULL;
1485 }
1486
1487 return 0;
1488 }
1489
1490 /* Currently we only support 64x64 cursors */
1491 if (width != 64 || height != 64) {
1492 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
1493 return -EINVAL;
1494 }
1495
1496 obj = drm_gem_object_lookup(dev, file_priv, handle);
1497 if (!obj)
1498 return -ENOENT;
1499
1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 ret = -ENOMEM;
1503 goto unref_cursor;
1504 }
1505
1506 gt = container_of(obj, struct gtt_range, gem);
1507
1508 /* Pin the memory into the GTT */
1509 ret = psb_gtt_pin(gt);
1510 if (ret) {
1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1512 goto unref_cursor;
1513 }
1514
1515 addr = gt->offset; /* Or resource.start ??? */
1516
1517 psb_intel_crtc->cursor_addr = addr;
1518
1519 temp = 0;
1520 /* set the pipe for the cursor */
1521 temp |= (pipe << 28);
1522 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1523
1524 if (gma_power_begin(dev, false)) {
1525 REG_WRITE(control, temp);
1526 REG_WRITE(base, addr);
1527 gma_power_end(dev);
1528 }
1529
1530 /* unpin the old GEM object */
1531 if (psb_intel_crtc->cursor_obj) {
1532 gt = container_of(psb_intel_crtc->cursor_obj,
1533 struct gtt_range, gem);
1534 psb_gtt_unpin(gt);
1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1536 }
1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1544}
1545
1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1547{
1548 struct drm_device *dev = crtc->dev;
1549 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1550 int pipe = psb_intel_crtc->pipe;
1551 uint32_t temp = 0;
1552 uint32_t adder;
1553
1554
1555 if (x < 0) {
1556 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1557 x = -x;
1558 }
1559 if (y < 0) {
1560 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1561 y = -y;
1562 }
1563
1564 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1565 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1566 865
1567 adder = psb_intel_crtc->cursor_addr;
1568
1569 if (gma_power_begin(dev, false)) {
1570 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1571 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1572 gma_power_end(dev);
1573 }
1574 return 0; 866 return 0;
1575} 867}
1576 868
1577static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1578 u16 *green, u16 *blue, uint32_t start, uint32_t size)
1579{
1580 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1581 int i;
1582 int end = (start + size > 256) ? 256 : start + size;
1583
1584 for (i = start; i < end; i++) {
1585 psb_intel_crtc->lut_r[i] = red[i] >> 8;
1586 psb_intel_crtc->lut_g[i] = green[i] >> 8;
1587 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
1588 }
1589
1590 cdv_intel_crtc_load_lut(crtc);
1591}
1592
1593static int cdv_crtc_set_config(struct drm_mode_set *set)
1594{
1595 int ret = 0;
1596 struct drm_device *dev = set->crtc->dev;
1597 struct drm_psb_private *dev_priv = dev->dev_private;
1598
1599 if (!dev_priv->rpm_enabled)
1600 return drm_crtc_helper_set_config(set);
1601
1602 pm_runtime_forbid(&dev->pdev->dev);
1603
1604 ret = drm_crtc_helper_set_config(set);
1605
1606 pm_runtime_allow(&dev->pdev->dev);
1607
1608 return ret;
1609}
1610
1611/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 869/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
1612 870
1613/* FIXME: why are we using this, should it be cdv_ in this tree ? */ 871/* FIXME: why are we using this, should it be cdv_ in this tree ? */
1614 872
1615static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock) 873static void i8xx_clock(int refclk, struct gma_clock_t *clock)
1616{ 874{
1617 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 875 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
1618 clock->p = clock->p1 * clock->p2; 876 clock->p = clock->p1 * clock->p2;
@@ -1625,12 +883,12 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1625 struct drm_crtc *crtc) 883 struct drm_crtc *crtc)
1626{ 884{
1627 struct drm_psb_private *dev_priv = dev->dev_private; 885 struct drm_psb_private *dev_priv = dev->dev_private;
1628 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 886 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1629 int pipe = psb_intel_crtc->pipe; 887 int pipe = gma_crtc->pipe;
1630 const struct psb_offset *map = &dev_priv->regmap[pipe]; 888 const struct psb_offset *map = &dev_priv->regmap[pipe];
1631 u32 dpll; 889 u32 dpll;
1632 u32 fp; 890 u32 fp;
1633 struct cdv_intel_clock_t clock; 891 struct gma_clock_t clock;
1634 bool is_lvds; 892 bool is_lvds;
1635 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 893 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1636 894
@@ -1703,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
1703struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, 961struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1704 struct drm_crtc *crtc) 962 struct drm_crtc *crtc)
1705{ 963{
1706 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 964 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1707 int pipe = psb_intel_crtc->pipe; 965 int pipe = gma_crtc->pipe;
1708 struct drm_psb_private *dev_priv = dev->dev_private; 966 struct drm_psb_private *dev_priv = dev->dev_private;
1709 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 967 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1710 const struct psb_offset *map = &dev_priv->regmap[pipe]; 968 const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -1747,44 +1005,28 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
1747 return mode; 1005 return mode;
1748} 1006}
1749 1007
1750static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1751{
1752 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1753
1754 kfree(psb_intel_crtc->crtc_state);
1755 drm_crtc_cleanup(crtc);
1756 kfree(psb_intel_crtc);
1757}
1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1008const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1773 .dpms = cdv_intel_crtc_dpms, 1009 .dpms = gma_crtc_dpms,
1774 .mode_fixup = cdv_intel_crtc_mode_fixup, 1010 .mode_fixup = gma_crtc_mode_fixup,
1775 .mode_set = cdv_intel_crtc_mode_set, 1011 .mode_set = cdv_intel_crtc_mode_set,
1776 .mode_set_base = cdv_intel_pipe_set_base, 1012 .mode_set_base = gma_pipe_set_base,
1777 .prepare = cdv_intel_crtc_prepare, 1013 .prepare = gma_crtc_prepare,
1778 .commit = cdv_intel_crtc_commit, 1014 .commit = gma_crtc_commit,
1779 .disable = cdv_intel_crtc_disable, 1015 .disable = gma_crtc_disable,
1780}; 1016};
1781 1017
1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1018const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
1783 .save = cdv_intel_crtc_save, 1019 .save = gma_crtc_save,
1784 .restore = cdv_intel_crtc_restore, 1020 .restore = gma_crtc_restore,
1785 .cursor_set = cdv_intel_crtc_cursor_set, 1021 .cursor_set = gma_crtc_cursor_set,
1786 .cursor_move = cdv_intel_crtc_cursor_move, 1022 .cursor_move = gma_crtc_cursor_move,
1787 .gamma_set = cdv_intel_crtc_gamma_set, 1023 .gamma_set = gma_crtc_gamma_set,
1788 .set_config = cdv_crtc_set_config, 1024 .set_config = gma_crtc_set_config,
1789 .destroy = cdv_intel_crtc_destroy, 1025 .destroy = gma_crtc_destroy,
1026};
1027
1028const struct gma_clock_funcs cdv_clock_funcs = {
1029 .clock = cdv_intel_clock,
1030 .limit = cdv_intel_limit,
1031 .pll_is_valid = gma_pll_is_valid,
1790}; 1032};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 88d9ef6b5b4a..f4eb43573cad 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
34#include "psb_drv.h" 34#include "psb_drv.h"
35#include "psb_intel_drv.h" 35#include "psb_intel_drv.h"
36#include "psb_intel_reg.h" 36#include "psb_intel_reg.h"
37#include "gma_display.h"
37#include <drm/drm_dp_helper.h> 38#include <drm/drm_dp_helper.h>
38 39
39#define _wait_for(COND, MS, W) ({ \ 40#define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
68 uint8_t link_bw; 69 uint8_t link_bw;
69 uint8_t lane_count; 70 uint8_t lane_count;
70 uint8_t dpcd[4]; 71 uint8_t dpcd[4];
71 struct psb_intel_encoder *encoder; 72 struct gma_encoder *encoder;
72 struct i2c_adapter adapter; 73 struct i2c_adapter adapter;
73 struct i2c_algo_dp_aux_data algo; 74 struct i2c_algo_dp_aux_data algo;
74 uint8_t train_set[4]; 75 uint8_t train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
114 * If a CPU or PCH DP output is attached to an eDP panel, this function 115 * If a CPU or PCH DP output is attached to an eDP panel, this function
115 * will return true, and false otherwise. 116 * will return true, and false otherwise.
116 */ 117 */
117static bool is_edp(struct psb_intel_encoder *encoder) 118static bool is_edp(struct gma_encoder *encoder)
118{ 119{
119 return encoder->type == INTEL_OUTPUT_EDP; 120 return encoder->type == INTEL_OUTPUT_EDP;
120} 121}
121 122
122 123
123static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); 124static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
124static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); 125static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
125static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); 126static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
126 127
127static int 128static int
128cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) 129cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
129{ 130{
130 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 131 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
131 int max_lane_count = 4; 132 int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
143} 144}
144 145
145static int 146static int
146cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) 147cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
147{ 148{
148 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 149 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
149 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
180 return (max_link_clock * max_lanes * 19) / 20; 181 return (max_link_clock * max_lanes * 19) / 20;
181} 182}
182 183
183static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) 184static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
184{ 185{
185 struct drm_device *dev = intel_encoder->base.dev; 186 struct drm_device *dev = intel_encoder->base.dev;
186 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 187 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
200 msleep(intel_dp->panel_power_up_delay); 201 msleep(intel_dp->panel_power_up_delay);
201} 202}
202 203
203static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) 204static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
204{ 205{
205 struct drm_device *dev = intel_encoder->base.dev; 206 struct drm_device *dev = intel_encoder->base.dev;
206 u32 pp; 207 u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
215} 216}
216 217
217/* Returns true if the panel was already on when called */ 218/* Returns true if the panel was already on when called */
218static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) 219static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
219{ 220{
220 struct drm_device *dev = intel_encoder->base.dev; 221 struct drm_device *dev = intel_encoder->base.dev;
221 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 222 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
242 return false; 243 return false;
243} 244}
244 245
245static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) 246static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
246{ 247{
247 struct drm_device *dev = intel_encoder->base.dev; 248 struct drm_device *dev = intel_encoder->base.dev;
248 u32 pp, idle_off_mask = PP_ON ; 249 u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
274 DRM_DEBUG_KMS("Over\n"); 275 DRM_DEBUG_KMS("Over\n");
275} 276}
276 277
277static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) 278static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
278{ 279{
279 struct drm_device *dev = intel_encoder->base.dev; 280 struct drm_device *dev = intel_encoder->base.dev;
280 u32 pp; 281 u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
294 gma_backlight_enable(dev); 295 gma_backlight_enable(dev);
295} 296}
296 297
297static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) 298static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
298{ 299{
299 struct drm_device *dev = intel_encoder->base.dev; 300 struct drm_device *dev = intel_encoder->base.dev;
300 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 301 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
314cdv_intel_dp_mode_valid(struct drm_connector *connector, 315cdv_intel_dp_mode_valid(struct drm_connector *connector,
315 struct drm_display_mode *mode) 316 struct drm_display_mode *mode)
316{ 317{
317 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 318 struct gma_encoder *encoder = gma_attached_encoder(connector);
318 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 319 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
319 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); 320 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
320 int max_lanes = cdv_intel_dp_max_lane_count(encoder); 321 int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
370} 371}
371 372
372static int 373static int
373cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, 374cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
374 uint8_t *send, int send_bytes, 375 uint8_t *send, int send_bytes,
375 uint8_t *recv, int recv_size) 376 uint8_t *recv, int recv_size)
376{ 377{
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
472 473
473/* Write data to the aux channel in native mode */ 474/* Write data to the aux channel in native mode */
474static int 475static int
475cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, 476cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
476 uint16_t address, uint8_t *send, int send_bytes) 477 uint16_t address, uint8_t *send, int send_bytes)
477{ 478{
478 int ret; 479 int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
504 505
505/* Write a single byte to the aux channel in native mode */ 506/* Write a single byte to the aux channel in native mode */
506static int 507static int
507cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, 508cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
508 uint16_t address, uint8_t byte) 509 uint16_t address, uint8_t byte)
509{ 510{
510 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); 511 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
512 513
513/* read bytes from a native aux channel */ 514/* read bytes from a native aux channel */
514static int 515static int
515cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, 516cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
516 uint16_t address, uint8_t *recv, int recv_bytes) 517 uint16_t address, uint8_t *recv, int recv_bytes)
517{ 518{
518 uint8_t msg[4]; 519 uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
557 struct cdv_intel_dp *intel_dp = container_of(adapter, 558 struct cdv_intel_dp *intel_dp = container_of(adapter,
558 struct cdv_intel_dp, 559 struct cdv_intel_dp,
559 adapter); 560 adapter);
560 struct psb_intel_encoder *encoder = intel_dp->encoder; 561 struct gma_encoder *encoder = intel_dp->encoder;
561 uint16_t address = algo_data->address; 562 uint16_t address = algo_data->address;
562 uint8_t msg[5]; 563 uint8_t msg[5];
563 uint8_t reply[2]; 564 uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
647} 648}
648 649
649static int 650static int
650cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) 651cdv_intel_dp_i2c_init(struct gma_connector *connector,
652 struct gma_encoder *encoder, const char *name)
651{ 653{
652 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 654 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
653 int ret; 655 int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
698 struct drm_display_mode *adjusted_mode) 700 struct drm_display_mode *adjusted_mode)
699{ 701{
700 struct drm_psb_private *dev_priv = encoder->dev->dev_private; 702 struct drm_psb_private *dev_priv = encoder->dev->dev_private;
701 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 703 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
702 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 704 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
703 int lane_count, clock; 705 int lane_count, clock;
704 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); 706 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
792 struct drm_psb_private *dev_priv = dev->dev_private; 794 struct drm_psb_private *dev_priv = dev->dev_private;
793 struct drm_mode_config *mode_config = &dev->mode_config; 795 struct drm_mode_config *mode_config = &dev->mode_config;
794 struct drm_encoder *encoder; 796 struct drm_encoder *encoder;
795 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 797 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
796 int lane_count = 4, bpp = 24; 798 int lane_count = 4, bpp = 24;
797 struct cdv_intel_dp_m_n m_n; 799 struct cdv_intel_dp_m_n m_n;
798 int pipe = intel_crtc->pipe; 800 int pipe = gma_crtc->pipe;
799 801
800 /* 802 /*
801 * Find the lane count in the intel_encoder private 803 * Find the lane count in the intel_encoder private
802 */ 804 */
803 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 805 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
804 struct psb_intel_encoder *intel_encoder; 806 struct gma_encoder *intel_encoder;
805 struct cdv_intel_dp *intel_dp; 807 struct cdv_intel_dp *intel_dp;
806 808
807 if (encoder->crtc != crtc) 809 if (encoder->crtc != crtc)
808 continue; 810 continue;
809 811
810 intel_encoder = to_psb_intel_encoder(encoder); 812 intel_encoder = to_gma_encoder(encoder);
811 intel_dp = intel_encoder->dev_priv; 813 intel_dp = intel_encoder->dev_priv;
812 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 814 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
813 lane_count = intel_dp->lane_count; 815 lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
841cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 843cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
842 struct drm_display_mode *adjusted_mode) 844 struct drm_display_mode *adjusted_mode)
843{ 845{
844 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 846 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
845 struct drm_crtc *crtc = encoder->crtc; 847 struct drm_crtc *crtc = encoder->crtc;
846 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 848 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
847 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 849 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
848 struct drm_device *dev = encoder->dev; 850 struct drm_device *dev = encoder->dev;
849 851
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
885 } 887 }
886 888
887 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 889 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
888 if (intel_crtc->pipe == 1) 890 if (gma_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT; 891 intel_dp->DP |= DP_PIPEB_SELECT;
890 892
891 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); 893 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
900 else 902 else
901 pfit_control = 0; 903 pfit_control = 0;
902 904
903 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; 905 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
904 906
905 REG_WRITE(PFIT_CONTROL, pfit_control); 907 REG_WRITE(PFIT_CONTROL, pfit_control);
906 } 908 }
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
908 910
909 911
910/* If the sink supports it, try to set the power state appropriately */ 912/* If the sink supports it, try to set the power state appropriately */
911static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) 913static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
912{ 914{
913 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 915 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
914 int ret, i; 916 int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
940 942
941static void cdv_intel_dp_prepare(struct drm_encoder *encoder) 943static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
942{ 944{
943 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 945 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
944 int edp = is_edp(intel_encoder); 946 int edp = is_edp(intel_encoder);
945 947
946 if (edp) { 948 if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
957 959
958static void cdv_intel_dp_commit(struct drm_encoder *encoder) 960static void cdv_intel_dp_commit(struct drm_encoder *encoder)
959{ 961{
960 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 962 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
961 int edp = is_edp(intel_encoder); 963 int edp = is_edp(intel_encoder);
962 964
963 if (edp) 965 if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
971static void 973static void
972cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) 974cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
973{ 975{
974 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); 976 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
975 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 977 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
976 struct drm_device *dev = encoder->dev; 978 struct drm_device *dev = encoder->dev;
977 uint32_t dp_reg = REG_READ(intel_dp->output_reg); 979 uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
1006 * cases where the sink may still be asleep. 1008 * cases where the sink may still be asleep.
1007 */ 1009 */
1008static bool 1010static bool
1009cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, 1011cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
1010 uint8_t *recv, int recv_bytes) 1012 uint8_t *recv, int recv_bytes)
1011{ 1013{
1012 int ret, i; 1014 int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
1031 * link status information 1033 * link status information
1032 */ 1034 */
1033static bool 1035static bool
1034cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) 1036cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
1035{ 1037{
1036 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1038 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1037 return cdv_intel_dp_aux_native_read_retry(encoder, 1039 return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1105} 1107}
1106*/ 1108*/
1107static void 1109static void
1108cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) 1110cdv_intel_get_adjust_train(struct gma_encoder *encoder)
1109{ 1111{
1110 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1112 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1111 uint8_t v = 0; 1113 uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
1164 DP_LANE_CHANNEL_EQ_DONE|\ 1166 DP_LANE_CHANNEL_EQ_DONE|\
1165 DP_LANE_SYMBOL_LOCKED) 1167 DP_LANE_SYMBOL_LOCKED)
1166static bool 1168static bool
1167cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) 1169cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
1168{ 1170{
1169 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1171 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1170 uint8_t lane_align; 1172 uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
1184} 1186}
1185 1187
1186static bool 1188static bool
1187cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, 1189cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
1188 uint32_t dp_reg_value, 1190 uint32_t dp_reg_value,
1189 uint8_t dp_train_pat) 1191 uint8_t dp_train_pat)
1190{ 1192{
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
1211 1213
1212 1214
1213static bool 1215static bool
1214cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, 1216cdv_intel_dplink_set_level(struct gma_encoder *encoder,
1215 uint8_t dp_train_pat) 1217 uint8_t dp_train_pat)
1216{ 1218{
1217 1219
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
1232} 1234}
1233 1235
1234static void 1236static void
1235cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) 1237cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
1236{ 1238{
1237 struct drm_device *dev = encoder->base.dev; 1239 struct drm_device *dev = encoder->base.dev;
1238 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1240 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
1298 1300
1299/* Enable corresponding port and start training pattern 1 */ 1301/* Enable corresponding port and start training pattern 1 */
1300static void 1302static void
1301cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) 1303cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
1302{ 1304{
1303 struct drm_device *dev = encoder->base.dev; 1305 struct drm_device *dev = encoder->base.dev;
1304 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1306 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1317 /* Enable output, wait for it to become active */ 1319 /* Enable output, wait for it to become active */
1318 REG_WRITE(intel_dp->output_reg, reg); 1320 REG_WRITE(intel_dp->output_reg, reg);
1319 REG_READ(intel_dp->output_reg); 1321 REG_READ(intel_dp->output_reg);
1320 psb_intel_wait_for_vblank(dev); 1322 gma_wait_for_vblank(dev);
1321 1323
1322 DRM_DEBUG_KMS("Link config\n"); 1324 DRM_DEBUG_KMS("Link config\n");
1323 /* Write the link configuration data */ 1325 /* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1392} 1394}
1393 1395
1394static void 1396static void
1395cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) 1397cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
1396{ 1398{
1397 struct drm_device *dev = encoder->base.dev; 1399 struct drm_device *dev = encoder->base.dev;
1398 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1400 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
1478} 1480}
1479 1481
1480static void 1482static void
1481cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) 1483cdv_intel_dp_link_down(struct gma_encoder *encoder)
1482{ 1484{
1483 struct drm_device *dev = encoder->base.dev; 1485 struct drm_device *dev = encoder->base.dev;
1484 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1486 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
1502 REG_READ(intel_dp->output_reg); 1504 REG_READ(intel_dp->output_reg);
1503} 1505}
1504 1506
1505static enum drm_connector_status 1507static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
1506cdv_dp_detect(struct psb_intel_encoder *encoder)
1507{ 1508{
1508 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1509 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1509 enum drm_connector_status status; 1510 enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
1531static enum drm_connector_status 1532static enum drm_connector_status
1532cdv_intel_dp_detect(struct drm_connector *connector, bool force) 1533cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1533{ 1534{
1534 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1535 struct gma_encoder *encoder = gma_attached_encoder(connector);
1535 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1536 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1536 enum drm_connector_status status; 1537 enum drm_connector_status status;
1537 struct edid *edid = NULL; 1538 struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1565 1566
1566static int cdv_intel_dp_get_modes(struct drm_connector *connector) 1567static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1567{ 1568{
1568 struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); 1569 struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
1569 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; 1570 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
1570 struct edid *edid = NULL; 1571 struct edid *edid = NULL;
1571 int ret = 0; 1572 int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1621static bool 1622static bool
1622cdv_intel_dp_detect_audio(struct drm_connector *connector) 1623cdv_intel_dp_detect_audio(struct drm_connector *connector)
1623{ 1624{
1624 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1625 struct gma_encoder *encoder = gma_attached_encoder(connector);
1625 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1626 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1626 struct edid *edid; 1627 struct edid *edid;
1627 bool has_audio = false; 1628 bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
1647 uint64_t val) 1648 uint64_t val)
1648{ 1649{
1649 struct drm_psb_private *dev_priv = connector->dev->dev_private; 1650 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1650 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); 1651 struct gma_encoder *encoder = gma_attached_encoder(connector);
1651 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1652 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1652 int ret; 1653 int ret;
1653 1654
@@ -1700,11 +1701,10 @@ done:
1700static void 1701static void
1701cdv_intel_dp_destroy(struct drm_connector *connector) 1702cdv_intel_dp_destroy(struct drm_connector *connector)
1702{ 1703{
1703 struct psb_intel_encoder *psb_intel_encoder = 1704 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1704 psb_intel_attached_encoder(connector); 1705 struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
1705 struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
1706 1706
1707 if (is_edp(psb_intel_encoder)) { 1707 if (is_edp(gma_encoder)) {
1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */ 1708 /* cdv_intel_panel_destroy_backlight(connector->dev); */
1709 if (intel_dp->panel_fixed_mode) { 1709 if (intel_dp->panel_fixed_mode) {
1710 kfree(intel_dp->panel_fixed_mode); 1710 kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { 1741static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
1742 .get_modes = cdv_intel_dp_get_modes, 1742 .get_modes = cdv_intel_dp_get_modes,
1743 .mode_valid = cdv_intel_dp_mode_valid, 1743 .mode_valid = cdv_intel_dp_mode_valid,
1744 .best_encoder = psb_intel_best_encoder, 1744 .best_encoder = gma_best_encoder,
1745}; 1745};
1746 1746
1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { 1747static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
1800void 1800void
1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) 1801cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
1802{ 1802{
1803 struct psb_intel_encoder *psb_intel_encoder; 1803 struct gma_encoder *gma_encoder;
1804 struct psb_intel_connector *psb_intel_connector; 1804 struct gma_connector *gma_connector;
1805 struct drm_connector *connector; 1805 struct drm_connector *connector;
1806 struct drm_encoder *encoder; 1806 struct drm_encoder *encoder;
1807 struct cdv_intel_dp *intel_dp; 1807 struct cdv_intel_dp *intel_dp;
1808 const char *name = NULL; 1808 const char *name = NULL;
1809 int type = DRM_MODE_CONNECTOR_DisplayPort; 1809 int type = DRM_MODE_CONNECTOR_DisplayPort;
1810 1810
1811 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 1811 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
1812 if (!psb_intel_encoder) 1812 if (!gma_encoder)
1813 return; 1813 return;
1814 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 1814 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
1815 if (!psb_intel_connector) 1815 if (!gma_connector)
1816 goto err_connector; 1816 goto err_connector;
1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); 1817 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
1818 if (!intel_dp) 1818 if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) 1821 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
1822 type = DRM_MODE_CONNECTOR_eDP; 1822 type = DRM_MODE_CONNECTOR_eDP;
1823 1823
1824 connector = &psb_intel_connector->base; 1824 connector = &gma_connector->base;
1825 encoder = &psb_intel_encoder->base; 1825 encoder = &gma_encoder->base;
1826 1826
1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); 1827 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); 1828 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
1829 1829
1830 psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); 1830 gma_connector_attach_encoder(gma_connector, gma_encoder);
1831 1831
1832 if (type == DRM_MODE_CONNECTOR_DisplayPort) 1832 if (type == DRM_MODE_CONNECTOR_DisplayPort)
1833 psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 1833 gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1834 else 1834 else
1835 psb_intel_encoder->type = INTEL_OUTPUT_EDP; 1835 gma_encoder->type = INTEL_OUTPUT_EDP;
1836 1836
1837 1837
1838 psb_intel_encoder->dev_priv=intel_dp; 1838 gma_encoder->dev_priv=intel_dp;
1839 intel_dp->encoder = psb_intel_encoder; 1839 intel_dp->encoder = gma_encoder;
1840 intel_dp->output_reg = output_reg; 1840 intel_dp->output_reg = output_reg;
1841 1841
1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); 1842 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1852 switch (output_reg) { 1852 switch (output_reg) {
1853 case DP_B: 1853 case DP_B:
1854 name = "DPDDC-B"; 1854 name = "DPDDC-B";
1855 psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); 1855 gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
1856 break; 1856 break;
1857 case DP_C: 1857 case DP_C:
1858 name = "DPDDC-C"; 1858 name = "DPDDC-C";
1859 psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); 1859 gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
1860 break; 1860 break;
1861 } 1861 }
1862 1862
1863 cdv_disable_intel_clock_gating(dev); 1863 cdv_disable_intel_clock_gating(dev);
1864 1864
1865 cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); 1865 cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
1866 /* FIXME:fail check */ 1866 /* FIXME:fail check */
1867 cdv_intel_dp_add_properties(connector); 1867 cdv_intel_dp_add_properties(connector);
1868 1868
1869 if (is_edp(psb_intel_encoder)) { 1869 if (is_edp(gma_encoder)) {
1870 int ret; 1870 int ret;
1871 struct edp_power_seq cur; 1871 struct edp_power_seq cur;
1872 u32 pp_on, pp_off, pp_div; 1872 u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 1920 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
1921 1921
1922 1922
1923 cdv_intel_edp_panel_vdd_on(psb_intel_encoder); 1923 cdv_intel_edp_panel_vdd_on(gma_encoder);
1924 ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, 1924 ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
1925 intel_dp->dpcd, 1925 intel_dp->dpcd,
1926 sizeof(intel_dp->dpcd)); 1926 sizeof(intel_dp->dpcd));
1927 cdv_intel_edp_panel_vdd_off(psb_intel_encoder); 1927 cdv_intel_edp_panel_vdd_off(gma_encoder);
1928 if (ret == 0) { 1928 if (ret == 0) {
1929 /* if this fails, presume the device is a ghost */ 1929 /* if this fails, presume the device is a ghost */
1930 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 1930 DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1945 return; 1945 return;
1946 1946
1947err_priv: 1947err_priv:
1948 kfree(psb_intel_connector); 1948 kfree(gma_connector);
1949err_connector: 1949err_connector:
1950 kfree(psb_intel_encoder); 1950 kfree(gma_encoder);
1951} 1951}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 464153d9d2df..1c0d723b8d24 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
64 struct drm_display_mode *adjusted_mode) 64 struct drm_display_mode *adjusted_mode)
65{ 65{
66 struct drm_device *dev = encoder->dev; 66 struct drm_device *dev = encoder->dev;
67 struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); 67 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
68 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; 68 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
69 u32 hdmib; 69 u32 hdmib;
70 struct drm_crtc *crtc = encoder->crtc; 70 struct drm_crtc *crtc = encoder->crtc;
71 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 71 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
72 72
73 hdmib = (2 << 10); 73 hdmib = (2 << 10);
74 74
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 77 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH; 78 hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
79 79
80 if (intel_crtc->pipe == 1) 80 if (gma_crtc->pipe == 1)
81 hdmib |= HDMIB_PIPE_B_SELECT; 81 hdmib |= HDMIB_PIPE_B_SELECT;
82 82
83 if (hdmi_priv->has_hdmi_audio) { 83 if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) 99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
100{ 100{
101 struct drm_device *dev = encoder->dev; 101 struct drm_device *dev = encoder->dev;
102 struct psb_intel_encoder *psb_intel_encoder = 102 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
103 to_psb_intel_encoder(encoder); 103 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
104 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
105 u32 hdmib; 104 u32 hdmib;
106 105
107 hdmib = REG_READ(hdmi_priv->hdmi_reg); 106 hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
116static void cdv_hdmi_save(struct drm_connector *connector) 115static void cdv_hdmi_save(struct drm_connector *connector)
117{ 116{
118 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
119 struct psb_intel_encoder *psb_intel_encoder = 118 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
120 psb_intel_attached_encoder(connector); 119 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
121 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
122 120
123 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); 121 hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
124} 122}
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
126static void cdv_hdmi_restore(struct drm_connector *connector) 124static void cdv_hdmi_restore(struct drm_connector *connector)
127{ 125{
128 struct drm_device *dev = connector->dev; 126 struct drm_device *dev = connector->dev;
129 struct psb_intel_encoder *psb_intel_encoder = 127 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
130 psb_intel_attached_encoder(connector); 128 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
131 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
132 129
133 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); 130 REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
134 REG_READ(hdmi_priv->hdmi_reg); 131 REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
137static enum drm_connector_status cdv_hdmi_detect( 134static enum drm_connector_status cdv_hdmi_detect(
138 struct drm_connector *connector, bool force) 135 struct drm_connector *connector, bool force)
139{ 136{
140 struct psb_intel_encoder *psb_intel_encoder = 137 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
141 psb_intel_attached_encoder(connector); 138 struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
142 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
143 struct edid *edid = NULL; 139 struct edid *edid = NULL;
144 enum drm_connector_status status = connector_status_disconnected; 140 enum drm_connector_status status = connector_status_disconnected;
145 141
146 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 142 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
147 143
148 hdmi_priv->has_hdmi_sink = false; 144 hdmi_priv->has_hdmi_sink = false;
149 hdmi_priv->has_hdmi_audio = false; 145 hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
167 struct drm_encoder *encoder = connector->encoder; 163 struct drm_encoder *encoder = connector->encoder;
168 164
169 if (!strcmp(property->name, "scaling mode") && encoder) { 165 if (!strcmp(property->name, "scaling mode") && encoder) {
170 struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); 166 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
171 bool centre; 167 bool centre;
172 uint64_t curValue; 168 uint64_t curValue;
173 169
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
221 */ 217 */
222static int cdv_hdmi_get_modes(struct drm_connector *connector) 218static int cdv_hdmi_get_modes(struct drm_connector *connector)
223{ 219{
224 struct psb_intel_encoder *psb_intel_encoder = 220 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
225 psb_intel_attached_encoder(connector);
226 struct edid *edid = NULL; 221 struct edid *edid = NULL;
227 int ret = 0; 222 int ret = 0;
228 223
229 edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); 224 edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
230 if (edid) { 225 if (edid) {
231 drm_mode_connector_update_edid_property(connector, edid); 226 drm_mode_connector_update_edid_property(connector, edid);
232 ret = drm_add_edid_modes(connector, edid); 227 ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
256 251
257static void cdv_hdmi_destroy(struct drm_connector *connector) 252static void cdv_hdmi_destroy(struct drm_connector *connector)
258{ 253{
259 struct psb_intel_encoder *psb_intel_encoder = 254 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
260 psb_intel_attached_encoder(connector);
261 255
262 if (psb_intel_encoder->i2c_bus) 256 if (gma_encoder->i2c_bus)
263 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 257 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
264 drm_sysfs_connector_remove(connector); 258 drm_sysfs_connector_remove(connector);
265 drm_connector_cleanup(connector); 259 drm_connector_cleanup(connector);
266 kfree(connector); 260 kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
269static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { 263static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
270 .dpms = cdv_hdmi_dpms, 264 .dpms = cdv_hdmi_dpms,
271 .mode_fixup = cdv_hdmi_mode_fixup, 265 .mode_fixup = cdv_hdmi_mode_fixup,
272 .prepare = psb_intel_encoder_prepare, 266 .prepare = gma_encoder_prepare,
273 .mode_set = cdv_hdmi_mode_set, 267 .mode_set = cdv_hdmi_mode_set,
274 .commit = psb_intel_encoder_commit, 268 .commit = gma_encoder_commit,
275}; 269};
276 270
277static const struct drm_connector_helper_funcs 271static const struct drm_connector_helper_funcs
278 cdv_hdmi_connector_helper_funcs = { 272 cdv_hdmi_connector_helper_funcs = {
279 .get_modes = cdv_hdmi_get_modes, 273 .get_modes = cdv_hdmi_get_modes,
280 .mode_valid = cdv_hdmi_mode_valid, 274 .mode_valid = cdv_hdmi_mode_valid,
281 .best_encoder = psb_intel_best_encoder, 275 .best_encoder = gma_best_encoder,
282}; 276};
283 277
284static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { 278static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
294void cdv_hdmi_init(struct drm_device *dev, 288void cdv_hdmi_init(struct drm_device *dev,
295 struct psb_intel_mode_device *mode_dev, int reg) 289 struct psb_intel_mode_device *mode_dev, int reg)
296{ 290{
297 struct psb_intel_encoder *psb_intel_encoder; 291 struct gma_encoder *gma_encoder;
298 struct psb_intel_connector *psb_intel_connector; 292 struct gma_connector *gma_connector;
299 struct drm_connector *connector; 293 struct drm_connector *connector;
300 struct drm_encoder *encoder; 294 struct drm_encoder *encoder;
301 struct mid_intel_hdmi_priv *hdmi_priv; 295 struct mid_intel_hdmi_priv *hdmi_priv;
302 int ddc_bus; 296 int ddc_bus;
303 297
304 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 298 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
305 GFP_KERNEL);
306 299
307 if (!psb_intel_encoder) 300 if (!gma_encoder)
308 return; 301 return;
309 302
310 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 303 gma_connector = kzalloc(sizeof(struct gma_connector),
311 GFP_KERNEL); 304 GFP_KERNEL);
312 305
313 if (!psb_intel_connector) 306 if (!gma_connector)
314 goto err_connector; 307 goto err_connector;
315 308
316 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL); 309 hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
318 if (!hdmi_priv) 311 if (!hdmi_priv)
319 goto err_priv; 312 goto err_priv;
320 313
321 connector = &psb_intel_connector->base; 314 connector = &gma_connector->base;
322 connector->polled = DRM_CONNECTOR_POLL_HPD; 315 connector->polled = DRM_CONNECTOR_POLL_HPD;
323 encoder = &psb_intel_encoder->base; 316 encoder = &gma_encoder->base;
324 drm_connector_init(dev, connector, 317 drm_connector_init(dev, connector,
325 &cdv_hdmi_connector_funcs, 318 &cdv_hdmi_connector_funcs,
326 DRM_MODE_CONNECTOR_DVID); 319 DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
328 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 321 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
329 DRM_MODE_ENCODER_TMDS); 322 DRM_MODE_ENCODER_TMDS);
330 323
331 psb_intel_connector_attach_encoder(psb_intel_connector, 324 gma_connector_attach_encoder(gma_connector, gma_encoder);
332 psb_intel_encoder); 325 gma_encoder->type = INTEL_OUTPUT_HDMI;
333 psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
334 hdmi_priv->hdmi_reg = reg; 326 hdmi_priv->hdmi_reg = reg;
335 hdmi_priv->has_hdmi_sink = false; 327 hdmi_priv->has_hdmi_sink = false;
336 psb_intel_encoder->dev_priv = hdmi_priv; 328 gma_encoder->dev_priv = hdmi_priv;
337 329
338 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs); 330 drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
339 drm_connector_helper_add(connector, 331 drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
349 switch (reg) { 341 switch (reg) {
350 case SDVOB: 342 case SDVOB:
351 ddc_bus = GPIOE; 343 ddc_bus = GPIOE;
352 psb_intel_encoder->ddi_select = DDI0_SELECT; 344 gma_encoder->ddi_select = DDI0_SELECT;
353 break; 345 break;
354 case SDVOC: 346 case SDVOC:
355 ddc_bus = GPIOD; 347 ddc_bus = GPIOD;
356 psb_intel_encoder->ddi_select = DDI1_SELECT; 348 gma_encoder->ddi_select = DDI1_SELECT;
357 break; 349 break;
358 default: 350 default:
359 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); 351 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
361 break; 353 break;
362 } 354 }
363 355
364 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 356 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
365 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC"); 357 ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
366 358
367 if (!psb_intel_encoder->i2c_bus) { 359 if (!gma_encoder->i2c_bus) {
368 dev_err(dev->dev, "No ddc adapter available!\n"); 360 dev_err(dev->dev, "No ddc adapter available!\n");
369 goto failed_ddc; 361 goto failed_ddc;
370 } 362 }
371 363
372 hdmi_priv->hdmi_i2c_adapter = 364 hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
373 &(psb_intel_encoder->i2c_bus->adapter);
374 hdmi_priv->dev = dev; 365 hdmi_priv->dev = dev;
375 drm_sysfs_connector_add(connector); 366 drm_sysfs_connector_add(connector);
376 return; 367 return;
@@ -379,7 +370,7 @@ failed_ddc:
379 drm_encoder_cleanup(encoder); 370 drm_encoder_cleanup(encoder);
380 drm_connector_cleanup(connector); 371 drm_connector_cleanup(connector);
381err_priv: 372err_priv:
382 kfree(psb_intel_connector); 373 kfree(gma_connector);
383err_connector: 374err_connector:
384 kfree(psb_intel_encoder); 375 kfree(gma_encoder);
385} 376}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f0..20e08e65d46c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
356{ 356{
357 struct drm_device *dev = encoder->dev; 357 struct drm_device *dev = encoder->dev;
358 struct drm_psb_private *dev_priv = dev->dev_private; 358 struct drm_psb_private *dev_priv = dev->dev_private;
359 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc( 359 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
360 encoder->crtc);
361 u32 pfit_control; 360 u32 pfit_control;
362 361
363 /* 362 /*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
379 else 378 else
380 pfit_control = 0; 379 pfit_control = 0;
381 380
382 pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; 381 pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
383 382
384 if (dev_priv->lvds_dither) 383 if (dev_priv->lvds_dither)
385 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 384 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
407{ 406{
408 struct drm_device *dev = connector->dev; 407 struct drm_device *dev = connector->dev;
409 struct drm_psb_private *dev_priv = dev->dev_private; 408 struct drm_psb_private *dev_priv = dev->dev_private;
410 struct psb_intel_encoder *psb_intel_encoder = 409 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
411 psb_intel_attached_encoder(connector);
412 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 410 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
413 int ret; 411 int ret;
414 412
415 ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter); 413 ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
416 414
417 if (ret) 415 if (ret)
418 return ret; 416 return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
444 */ 442 */
445static void cdv_intel_lvds_destroy(struct drm_connector *connector) 443static void cdv_intel_lvds_destroy(struct drm_connector *connector)
446{ 444{
447 struct psb_intel_encoder *psb_intel_encoder = 445 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
448 psb_intel_attached_encoder(connector);
449 446
450 if (psb_intel_encoder->i2c_bus) 447 if (gma_encoder->i2c_bus)
451 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 448 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
452 drm_sysfs_connector_remove(connector); 449 drm_sysfs_connector_remove(connector);
453 drm_connector_cleanup(connector); 450 drm_connector_cleanup(connector);
454 kfree(connector); 451 kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
461 struct drm_encoder *encoder = connector->encoder; 458 struct drm_encoder *encoder = connector->encoder;
462 459
463 if (!strcmp(property->name, "scaling mode") && encoder) { 460 if (!strcmp(property->name, "scaling mode") && encoder) {
464 struct psb_intel_crtc *crtc = 461 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
465 to_psb_intel_crtc(encoder->crtc);
466 uint64_t curValue; 462 uint64_t curValue;
467 463
468 if (!crtc) 464 if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
529 cdv_intel_lvds_connector_helper_funcs = { 525 cdv_intel_lvds_connector_helper_funcs = {
530 .get_modes = cdv_intel_lvds_get_modes, 526 .get_modes = cdv_intel_lvds_get_modes,
531 .mode_valid = cdv_intel_lvds_mode_valid, 527 .mode_valid = cdv_intel_lvds_mode_valid,
532 .best_encoder = psb_intel_best_encoder, 528 .best_encoder = gma_best_encoder,
533}; 529};
534 530
535static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { 531static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
612void cdv_intel_lvds_init(struct drm_device *dev, 608void cdv_intel_lvds_init(struct drm_device *dev,
613 struct psb_intel_mode_device *mode_dev) 609 struct psb_intel_mode_device *mode_dev)
614{ 610{
615 struct psb_intel_encoder *psb_intel_encoder; 611 struct gma_encoder *gma_encoder;
616 struct psb_intel_connector *psb_intel_connector; 612 struct gma_connector *gma_connector;
617 struct cdv_intel_lvds_priv *lvds_priv; 613 struct cdv_intel_lvds_priv *lvds_priv;
618 struct drm_connector *connector; 614 struct drm_connector *connector;
619 struct drm_encoder *encoder; 615 struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
630 return; 626 return;
631 } 627 }
632 628
633 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), 629 gma_encoder = kzalloc(sizeof(struct gma_encoder),
634 GFP_KERNEL); 630 GFP_KERNEL);
635 if (!psb_intel_encoder) 631 if (!gma_encoder)
636 return; 632 return;
637 633
638 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), 634 gma_connector = kzalloc(sizeof(struct gma_connector),
639 GFP_KERNEL); 635 GFP_KERNEL);
640 if (!psb_intel_connector) 636 if (!gma_connector)
641 goto failed_connector; 637 goto failed_connector;
642 638
643 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL); 639 lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
644 if (!lvds_priv) 640 if (!lvds_priv)
645 goto failed_lvds_priv; 641 goto failed_lvds_priv;
646 642
647 psb_intel_encoder->dev_priv = lvds_priv; 643 gma_encoder->dev_priv = lvds_priv;
648 644
649 connector = &psb_intel_connector->base; 645 connector = &gma_connector->base;
650 encoder = &psb_intel_encoder->base; 646 encoder = &gma_encoder->base;
651 647
652 648
653 drm_connector_init(dev, connector, 649 drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
659 DRM_MODE_ENCODER_LVDS); 655 DRM_MODE_ENCODER_LVDS);
660 656
661 657
662 psb_intel_connector_attach_encoder(psb_intel_connector, 658 gma_connector_attach_encoder(gma_connector, gma_encoder);
663 psb_intel_encoder); 659 gma_encoder->type = INTEL_OUTPUT_LVDS;
664 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
665 660
666 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs); 661 drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
667 drm_connector_helper_add(connector, 662 drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
682 * Set up I2C bus 677 * Set up I2C bus
683 * FIXME: distroy i2c_bus when exit 678 * FIXME: distroy i2c_bus when exit
684 */ 679 */
685 psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, 680 gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
686 GPIOB, 681 GPIOB,
687 "LVDSBLC_B"); 682 "LVDSBLC_B");
688 if (!psb_intel_encoder->i2c_bus) { 683 if (!gma_encoder->i2c_bus) {
689 dev_printk(KERN_ERR, 684 dev_printk(KERN_ERR,
690 &dev->pdev->dev, "I2C bus registration failed.\n"); 685 &dev->pdev->dev, "I2C bus registration failed.\n");
691 goto failed_blc_i2c; 686 goto failed_blc_i2c;
692 } 687 }
693 psb_intel_encoder->i2c_bus->slave_addr = 0x2C; 688 gma_encoder->i2c_bus->slave_addr = 0x2C;
694 dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus; 689 dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
695 690
696 /* 691 /*
697 * LVDS discovery: 692 * LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
704 */ 699 */
705 700
706 /* Set up the DDC bus. */ 701 /* Set up the DDC bus. */
707 psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev, 702 gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
708 GPIOC, 703 GPIOC,
709 "LVDSDDC_C"); 704 "LVDSDDC_C");
710 if (!psb_intel_encoder->ddc_bus) { 705 if (!gma_encoder->ddc_bus) {
711 dev_printk(KERN_ERR, &dev->pdev->dev, 706 dev_printk(KERN_ERR, &dev->pdev->dev,
712 "DDC bus registration " "failed.\n"); 707 "DDC bus registration " "failed.\n");
713 goto failed_ddc; 708 goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
718 * preferred mode is the right one. 713 * preferred mode is the right one.
719 */ 714 */
720 psb_intel_ddc_get_modes(connector, 715 psb_intel_ddc_get_modes(connector,
721 &psb_intel_encoder->ddc_bus->adapter); 716 &gma_encoder->ddc_bus->adapter);
722 list_for_each_entry(scan, &connector->probed_modes, head) { 717 list_for_each_entry(scan, &connector->probed_modes, head) {
723 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 718 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
724 mode_dev->panel_fixed_mode = 719 mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
782 777
783failed_find: 778failed_find:
784 printk(KERN_ERR "Failed find\n"); 779 printk(KERN_ERR "Failed find\n");
785 if (psb_intel_encoder->ddc_bus) 780 if (gma_encoder->ddc_bus)
786 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 781 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
787failed_ddc: 782failed_ddc:
788 printk(KERN_ERR "Failed DDC\n"); 783 printk(KERN_ERR "Failed DDC\n");
789 if (psb_intel_encoder->i2c_bus) 784 if (gma_encoder->i2c_bus)
790 psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); 785 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
791failed_blc_i2c: 786failed_blc_i2c:
792 printk(KERN_ERR "Failed BLC\n"); 787 printk(KERN_ERR "Failed BLC\n");
793 drm_encoder_cleanup(encoder); 788 drm_encoder_cleanup(encoder);
794 drm_connector_cleanup(connector); 789 drm_connector_cleanup(connector);
795 kfree(lvds_priv); 790 kfree(lvds_priv);
796failed_lvds_priv: 791failed_lvds_priv:
797 kfree(psb_intel_connector); 792 kfree(gma_connector);
798failed_connector: 793failed_connector:
799 kfree(psb_intel_encoder); 794 kfree(gma_encoder);
800} 795}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923abe..01dd7d225762 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
321 /* Begin by trying to use stolen memory backing */ 321 /* Begin by trying to use stolen memory backing */
322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); 322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
323 if (backing) { 323 if (backing) {
324 if (drm_gem_private_object_init(dev, 324 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
325 &backing->gem, aligned_size) == 0) 325 return backing;
326 return backing;
327 psb_gtt_free_range(dev, backing);
328 } 326 }
329 return NULL; 327 return NULL;
330} 328}
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
522static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 520static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
523 u16 blue, int regno) 521 u16 blue, int regno)
524{ 522{
525 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 523 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
526 524
527 intel_crtc->lut_r[regno] = red >> 8; 525 gma_crtc->lut_r[regno] = red >> 8;
528 intel_crtc->lut_g[regno] = green >> 8; 526 gma_crtc->lut_g[regno] = green >> 8;
529 intel_crtc->lut_b[regno] = blue >> 8; 527 gma_crtc->lut_b[regno] = blue >> 8;
530} 528}
531 529
532static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, 530static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
533 u16 *green, u16 *blue, int regno) 531 u16 *green, u16 *blue, int regno)
534{ 532{
535 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); 533 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
536 534
537 *red = intel_crtc->lut_r[regno] << 8; 535 *red = gma_crtc->lut_r[regno] << 8;
538 *green = intel_crtc->lut_g[regno] << 8; 536 *green = gma_crtc->lut_g[regno] << 8;
539 *blue = intel_crtc->lut_b[regno] << 8; 537 *blue = gma_crtc->lut_b[regno] << 8;
540} 538}
541 539
542static int psbfb_probe(struct drm_fb_helper *helper, 540static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
705 703
706 list_for_each_entry(connector, &dev->mode_config.connector_list, 704 list_for_each_entry(connector, &dev->mode_config.connector_list,
707 head) { 705 head) {
708 struct psb_intel_encoder *psb_intel_encoder = 706 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
709 psb_intel_attached_encoder(connector); 707 struct drm_encoder *encoder = &gma_encoder->base;
710 struct drm_encoder *encoder = &psb_intel_encoder->base;
711 int crtc_mask = 0, clone_mask = 0; 708 int crtc_mask = 0, clone_mask = 0;
712 709
713 /* valid crtcs */ 710 /* valid crtcs */
714 switch (psb_intel_encoder->type) { 711 switch (gma_encoder->type) {
715 case INTEL_OUTPUT_ANALOG: 712 case INTEL_OUTPUT_ANALOG:
716 crtc_mask = (1 << 0); 713 crtc_mask = (1 << 0);
717 clone_mask = (1 << INTEL_OUTPUT_ANALOG); 714 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
746 } 743 }
747 encoder->possible_crtcs = crtc_mask; 744 encoder->possible_crtcs = crtc_mask;
748 encoder->possible_clones = 745 encoder->possible_clones =
749 psb_intel_connector_clones(dev, clone_mask); 746 gma_connector_clones(dev, clone_mask);
750 } 747 }
751} 748}
752 749
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 989558a9e6ee..395f20b07aab 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -41,7 +41,7 @@ struct psb_fbdev {
41 41
42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) 42#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
43 43
44extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); 44extern int gma_connector_clones(struct drm_device *dev, int type_mask);
45 45
46#endif 46#endif
47 47
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80d..10ae8c52d06f 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm.h> 27#include <drm/drm.h>
28#include <drm/gma_drm.h> 28#include <drm/gma_drm.h>
29#include <drm/drm_vma_manager.h>
29#include "psb_drv.h" 30#include "psb_drv.h"
30 31
31int psb_gem_init_object(struct drm_gem_object *obj) 32int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 40
40 /* Remove the list map if one is present */ 41 /* Remove the list map if one is present */
41 if (obj->map_list.map) 42 drm_gem_free_mmap_offset(obj);
42 drm_gem_free_mmap_offset(obj);
43 drm_gem_object_release(obj); 43 drm_gem_object_release(obj);
44 44
45 /* This must occur last as it frees up the memory of the GEM object */ 45 /* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
81 /* What validation is needed here ? */ 81 /* What validation is needed here ? */
82 82
83 /* Make it mmapable */ 83 /* Make it mmapable */
84 if (!obj->map_list.map) { 84 ret = drm_gem_create_mmap_offset(obj);
85 ret = drm_gem_create_mmap_offset(obj); 85 if (ret)
86 if (ret) 86 goto out;
87 goto out; 87 *offset = drm_vma_node_offset_addr(&obj->vma_node);
88 }
89 /* GEM should really work out the hash offsets for us */
90 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
91out: 88out:
92 drm_gem_object_unreference(obj); 89 drm_gem_object_unreference(obj);
93unlock: 90unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
165} 162}
166 163
167/** 164/**
168 * psb_gem_dumb_destroy - destroy a dumb buffer
169 * @file: client file
170 * @dev: our DRM device
171 * @handle: the object handle
172 *
173 * Destroy a handle that was created via psb_gem_dumb_create, at least
174 * we hope it was created that way. i915 seems to assume the caller
175 * does the checking but that might be worth review ! FIXME
176 */
177int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
178 uint32_t handle)
179{
180 /* No special work needed, drop the reference and see what falls out */
181 return drm_gem_handle_delete(file, handle);
182}
183
184/**
185 * psb_gem_fault - pagefault handler for GEM objects 165 * psb_gem_fault - pagefault handler for GEM objects
186 * @vma: the VMA of the GEM object 166 * @vma: the VMA of the GEM object
187 * @vmf: fault detail 167 * @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
261 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1); 241 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
262 if (gtt == NULL) 242 if (gtt == NULL)
263 return -ENOMEM; 243 return -ENOMEM;
264 if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0) 244
265 goto free_gtt; 245 drm_gem_private_object_init(dev, &gtt->gem, size);
266 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0) 246 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
267 return 0; 247 return 0;
268free_gtt: 248
249 drm_gem_object_release(&gtt->gem);
269 psb_gtt_free_range(dev, gtt); 250 psb_gtt_free_range(dev, gtt);
270 return -ENOMEM; 251 return -ENOMEM;
271} 252}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 000000000000..24e8af3d22bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#include <drm/drmP.h>
23#include "gma_display.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_drv.h"
27#include "framebuffer.h"
28
29/**
30 * Returns whether any output on the specified pipe is of the specified type
31 */
32bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
33{
34 struct drm_device *dev = crtc->dev;
35 struct drm_mode_config *mode_config = &dev->mode_config;
36 struct drm_connector *l_entry;
37
38 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
39 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
40 struct gma_encoder *gma_encoder =
41 gma_attached_encoder(l_entry);
42 if (gma_encoder->type == type)
43 return true;
44 }
45 }
46
47 return false;
48}
49
50void gma_wait_for_vblank(struct drm_device *dev)
51{
52 /* Wait for 20ms, i.e. one cycle at 50hz. */
53 mdelay(20);
54}
55
56int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 struct drm_framebuffer *old_fb)
58{
59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
63 int pipe = gma_crtc->pipe;
64 const struct psb_offset *map = &dev_priv->regmap[pipe];
65 unsigned long start, offset;
66 u32 dspcntr;
67 int ret = 0;
68
69 if (!gma_power_begin(dev, true))
70 return 0;
71
72 /* no fb bound */
73 if (!crtc->fb) {
74 dev_err(dev->dev, "No FB bound\n");
75 goto gma_pipe_cleaner;
76 }
77
78 /* We are displaying this buffer, make sure it is actually loaded
79 into the GTT */
80 ret = psb_gtt_pin(psbfb->gtt);
81 if (ret < 0)
82 goto gma_pipe_set_base_exit;
83 start = psbfb->gtt->offset;
84 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
85
86 REG_WRITE(map->stride, crtc->fb->pitches[0]);
87
88 dspcntr = REG_READ(map->cntr);
89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
90
91 switch (crtc->fb->bits_per_pixel) {
92 case 8:
93 dspcntr |= DISPPLANE_8BPP;
94 break;
95 case 16:
96 if (crtc->fb->depth == 15)
97 dspcntr |= DISPPLANE_15_16BPP;
98 else
99 dspcntr |= DISPPLANE_16BPP;
100 break;
101 case 24:
102 case 32:
103 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
104 break;
105 default:
106 dev_err(dev->dev, "Unknown color depth\n");
107 ret = -EINVAL;
108 goto gma_pipe_set_base_exit;
109 }
110 REG_WRITE(map->cntr, dspcntr);
111
112 dev_dbg(dev->dev,
113 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
114
115 /* FIXME: Investigate whether this really is the base for psb and why
116 the linear offset is named base for the other chips. map->surf
117 should be the base and map->linoff the offset for all chips */
118 if (IS_PSB(dev)) {
119 REG_WRITE(map->base, offset + start);
120 REG_READ(map->base);
121 } else {
122 REG_WRITE(map->base, offset);
123 REG_READ(map->base);
124 REG_WRITE(map->surf, start);
125 REG_READ(map->surf);
126 }
127
128gma_pipe_cleaner:
129 /* If there was a previous display we can now unpin it */
130 if (old_fb)
131 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
132
133gma_pipe_set_base_exit:
134 gma_power_end(dev);
135 return ret;
136}
137
138/* Loads the palette/gamma unit for the CRTC with the prepared values */
139void gma_crtc_load_lut(struct drm_crtc *crtc)
140{
141 struct drm_device *dev = crtc->dev;
142 struct drm_psb_private *dev_priv = dev->dev_private;
143 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
144 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
145 int palreg = map->palette;
146 int i;
147
148 /* The clocks have to be on to load the palette. */
149 if (!crtc->enabled)
150 return;
151
152 if (gma_power_begin(dev, false)) {
153 for (i = 0; i < 256; i++) {
154 REG_WRITE(palreg + 4 * i,
155 ((gma_crtc->lut_r[i] +
156 gma_crtc->lut_adj[i]) << 16) |
157 ((gma_crtc->lut_g[i] +
158 gma_crtc->lut_adj[i]) << 8) |
159 (gma_crtc->lut_b[i] +
160 gma_crtc->lut_adj[i]));
161 }
162 gma_power_end(dev);
163 } else {
164 for (i = 0; i < 256; i++) {
165 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
166 dev_priv->regs.pipe[0].palette[i] =
167 ((gma_crtc->lut_r[i] +
168 gma_crtc->lut_adj[i]) << 16) |
169 ((gma_crtc->lut_g[i] +
170 gma_crtc->lut_adj[i]) << 8) |
171 (gma_crtc->lut_b[i] +
172 gma_crtc->lut_adj[i]);
173 }
174
175 }
176}
177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size)
180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184
185 for (i = start; i < end; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8;
189 }
190
191 gma_crtc_load_lut(crtc);
192}
193
194/**
195 * Sets the power management mode of the pipe and plane.
196 *
197 * This code should probably grow support for turning the cursor off and back
198 * on appropriately at the same time as we're turning the pipe off/on.
199 */
200void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
201{
202 struct drm_device *dev = crtc->dev;
203 struct drm_psb_private *dev_priv = dev->dev_private;
204 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
205 int pipe = gma_crtc->pipe;
206 const struct psb_offset *map = &dev_priv->regmap[pipe];
207 u32 temp;
208
209 /* XXX: When our outputs are all unaware of DPMS modes other than off
210 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
211 */
212
213 if (IS_CDV(dev))
214 dev_priv->ops->disable_sr(dev);
215
216 switch (mode) {
217 case DRM_MODE_DPMS_ON:
218 case DRM_MODE_DPMS_STANDBY:
219 case DRM_MODE_DPMS_SUSPEND:
220 if (gma_crtc->active)
221 break;
222
223 gma_crtc->active = true;
224
225 /* Enable the DPLL */
226 temp = REG_READ(map->dpll);
227 if ((temp & DPLL_VCO_ENABLE) == 0) {
228 REG_WRITE(map->dpll, temp);
229 REG_READ(map->dpll);
230 /* Wait for the clocks to stabilize. */
231 udelay(150);
232 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
233 REG_READ(map->dpll);
234 /* Wait for the clocks to stabilize. */
235 udelay(150);
236 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
237 REG_READ(map->dpll);
238 /* Wait for the clocks to stabilize. */
239 udelay(150);
240 }
241
242 /* Enable the plane */
243 temp = REG_READ(map->cntr);
244 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
245 REG_WRITE(map->cntr,
246 temp | DISPLAY_PLANE_ENABLE);
247 /* Flush the plane changes */
248 REG_WRITE(map->base, REG_READ(map->base));
249 }
250
251 udelay(150);
252
253 /* Enable the pipe */
254 temp = REG_READ(map->conf);
255 if ((temp & PIPEACONF_ENABLE) == 0)
256 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
257
258 temp = REG_READ(map->status);
259 temp &= ~(0xFFFF);
260 temp |= PIPE_FIFO_UNDERRUN;
261 REG_WRITE(map->status, temp);
262 REG_READ(map->status);
263
264 gma_crtc_load_lut(crtc);
265
266 /* Give the overlay scaler a chance to enable
267 * if it's on this pipe */
268 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
269 break;
270 case DRM_MODE_DPMS_OFF:
271 if (!gma_crtc->active)
272 break;
273
274 gma_crtc->active = false;
275
276 /* Give the overlay scaler a chance to disable
277 * if it's on this pipe */
278 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
279
280 /* Disable the VGA plane that we never use */
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282
283 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe);
285
286 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev);
288
289 /* Disable plane */
290 temp = REG_READ(map->cntr);
291 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
292 REG_WRITE(map->cntr,
293 temp & ~DISPLAY_PLANE_ENABLE);
294 /* Flush the plane changes */
295 REG_WRITE(map->base, REG_READ(map->base));
296 REG_READ(map->base);
297 }
298
299 /* Disable pipe */
300 temp = REG_READ(map->conf);
301 if ((temp & PIPEACONF_ENABLE) != 0) {
302 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
303 REG_READ(map->conf);
304 }
305
306 /* Wait for vblank for the disable to take effect. */
307 gma_wait_for_vblank(dev);
308
309 udelay(150);
310
311 /* Disable DPLL */
312 temp = REG_READ(map->dpll);
313 if ((temp & DPLL_VCO_ENABLE) != 0) {
314 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
315 REG_READ(map->dpll);
316 }
317
318 /* Wait for the clocks to turn off. */
319 udelay(150);
320 break;
321 }
322
323 if (IS_CDV(dev))
324 dev_priv->ops->update_wm(dev, crtc);
325
326 /* Set FIFO watermarks */
327 REG_WRITE(DSPARB, 0x3F3E);
328}
329
330int gma_crtc_cursor_set(struct drm_crtc *crtc,
331 struct drm_file *file_priv,
332 uint32_t handle,
333 uint32_t width, uint32_t height)
334{
335 struct drm_device *dev = crtc->dev;
336 struct drm_psb_private *dev_priv = dev->dev_private;
337 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
338 int pipe = gma_crtc->pipe;
339 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
340 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
341 uint32_t temp;
342 size_t addr = 0;
343 struct gtt_range *gt;
344 struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
345 struct drm_gem_object *obj;
346 void *tmp_dst, *tmp_src;
347 int ret = 0, i, cursor_pages;
348
349 /* If we didn't get a handle then turn the cursor off */
350 if (!handle) {
351 temp = CURSOR_MODE_DISABLE;
352
353 if (gma_power_begin(dev, false)) {
354 REG_WRITE(control, temp);
355 REG_WRITE(base, 0);
356 gma_power_end(dev);
357 }
358
359 /* Unpin the old GEM object */
360 if (gma_crtc->cursor_obj) {
361 gt = container_of(gma_crtc->cursor_obj,
362 struct gtt_range, gem);
363 psb_gtt_unpin(gt);
364 drm_gem_object_unreference(gma_crtc->cursor_obj);
365 gma_crtc->cursor_obj = NULL;
366 }
367
368 return 0;
369 }
370
371 /* Currently we only support 64x64 cursors */
372 if (width != 64 || height != 64) {
373 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
374 return -EINVAL;
375 }
376
377 obj = drm_gem_object_lookup(dev, file_priv, handle);
378 if (!obj)
379 return -ENOENT;
380
381 if (obj->size < width * height * 4) {
382 dev_dbg(dev->dev, "Buffer is too small\n");
383 ret = -ENOMEM;
384 goto unref_cursor;
385 }
386
387 gt = container_of(obj, struct gtt_range, gem);
388
389 /* Pin the memory into the GTT */
390 ret = psb_gtt_pin(gt);
391 if (ret) {
392 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
393 goto unref_cursor;
394 }
395
396 if (dev_priv->ops->cursor_needs_phys) {
397 if (cursor_gt == NULL) {
398 dev_err(dev->dev, "No hardware cursor mem available");
399 ret = -ENOMEM;
400 goto unref_cursor;
401 }
402
403 /* Prevent overflow */
404 if (gt->npage > 4)
405 cursor_pages = 4;
406 else
407 cursor_pages = gt->npage;
408
409 /* Copy the cursor to cursor mem */
410 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
411 for (i = 0; i < cursor_pages; i++) {
412 tmp_src = kmap(gt->pages[i]);
413 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
414 kunmap(gt->pages[i]);
415 tmp_dst += PAGE_SIZE;
416 }
417
418 addr = gma_crtc->cursor_addr;
419 } else {
420 addr = gt->offset;
421 gma_crtc->cursor_addr = addr;
422 }
423
424 temp = 0;
425 /* set the pipe for the cursor */
426 temp |= (pipe << 28);
427 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
428
429 if (gma_power_begin(dev, false)) {
430 REG_WRITE(control, temp);
431 REG_WRITE(base, addr);
432 gma_power_end(dev);
433 }
434
435 /* unpin the old bo */
436 if (gma_crtc->cursor_obj) {
437 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
438 psb_gtt_unpin(gt);
439 drm_gem_object_unreference(gma_crtc->cursor_obj);
440 }
441
442 gma_crtc->cursor_obj = obj;
443 return ret;
444
445unref_cursor:
446 drm_gem_object_unreference(obj);
447 return ret;
448}
449
450int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
451{
452 struct drm_device *dev = crtc->dev;
453 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
454 int pipe = gma_crtc->pipe;
455 uint32_t temp = 0;
456 uint32_t addr;
457
458 if (x < 0) {
459 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
460 x = -x;
461 }
462 if (y < 0) {
463 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
464 y = -y;
465 }
466
467 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
468 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
469
470 addr = gma_crtc->cursor_addr;
471
472 if (gma_power_begin(dev, false)) {
473 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
474 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
475 gma_power_end(dev);
476 }
477 return 0;
478}
479
480bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
481 const struct drm_display_mode *mode,
482 struct drm_display_mode *adjusted_mode)
483{
484 return true;
485}
486
487void gma_crtc_prepare(struct drm_crtc *crtc)
488{
489 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
490 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
491}
492
493void gma_crtc_commit(struct drm_crtc *crtc)
494{
495 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
496 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
497}
498
499void gma_crtc_disable(struct drm_crtc *crtc)
500{
501 struct gtt_range *gt;
502 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
503
504 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
505
506 if (crtc->fb) {
507 gt = to_psb_fb(crtc->fb)->gtt;
508 psb_gtt_unpin(gt);
509 }
510}
511
512void gma_crtc_destroy(struct drm_crtc *crtc)
513{
514 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
515
516 kfree(gma_crtc->crtc_state);
517 drm_crtc_cleanup(crtc);
518 kfree(gma_crtc);
519}
520
521int gma_crtc_set_config(struct drm_mode_set *set)
522{
523 struct drm_device *dev = set->crtc->dev;
524 struct drm_psb_private *dev_priv = dev->dev_private;
525 int ret;
526
527 if (!dev_priv->rpm_enabled)
528 return drm_crtc_helper_set_config(set);
529
530 pm_runtime_forbid(&dev->pdev->dev);
531 ret = drm_crtc_helper_set_config(set);
532 pm_runtime_allow(&dev->pdev->dev);
533
534 return ret;
535}
536
537/**
538 * Save HW states of given crtc
539 */
540void gma_crtc_save(struct drm_crtc *crtc)
541{
542 struct drm_device *dev = crtc->dev;
543 struct drm_psb_private *dev_priv = dev->dev_private;
544 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
545 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
546 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
547 uint32_t palette_reg;
548 int i;
549
550 if (!crtc_state) {
551 dev_err(dev->dev, "No CRTC state found\n");
552 return;
553 }
554
555 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
556 crtc_state->savePIPECONF = REG_READ(map->conf);
557 crtc_state->savePIPESRC = REG_READ(map->src);
558 crtc_state->saveFP0 = REG_READ(map->fp0);
559 crtc_state->saveFP1 = REG_READ(map->fp1);
560 crtc_state->saveDPLL = REG_READ(map->dpll);
561 crtc_state->saveHTOTAL = REG_READ(map->htotal);
562 crtc_state->saveHBLANK = REG_READ(map->hblank);
563 crtc_state->saveHSYNC = REG_READ(map->hsync);
564 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
565 crtc_state->saveVBLANK = REG_READ(map->vblank);
566 crtc_state->saveVSYNC = REG_READ(map->vsync);
567 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
568
569 /* NOTE: DSPSIZE DSPPOS only for psb */
570 crtc_state->saveDSPSIZE = REG_READ(map->size);
571 crtc_state->saveDSPPOS = REG_READ(map->pos);
572
573 crtc_state->saveDSPBASE = REG_READ(map->base);
574
575 palette_reg = map->palette;
576 for (i = 0; i < 256; ++i)
577 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
578}
579
580/**
581 * Restore HW states of given crtc
582 */
583void gma_crtc_restore(struct drm_crtc *crtc)
584{
585 struct drm_device *dev = crtc->dev;
586 struct drm_psb_private *dev_priv = dev->dev_private;
587 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
588 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
589 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
590 uint32_t palette_reg;
591 int i;
592
593 if (!crtc_state) {
594 dev_err(dev->dev, "No crtc state\n");
595 return;
596 }
597
598 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
599 REG_WRITE(map->dpll,
600 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
601 REG_READ(map->dpll);
602 udelay(150);
603 }
604
605 REG_WRITE(map->fp0, crtc_state->saveFP0);
606 REG_READ(map->fp0);
607
608 REG_WRITE(map->fp1, crtc_state->saveFP1);
609 REG_READ(map->fp1);
610
611 REG_WRITE(map->dpll, crtc_state->saveDPLL);
612 REG_READ(map->dpll);
613 udelay(150);
614
615 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
616 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
617 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
618 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
619 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
620 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
621 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
622
623 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
624 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
625
626 REG_WRITE(map->src, crtc_state->savePIPESRC);
627 REG_WRITE(map->base, crtc_state->saveDSPBASE);
628 REG_WRITE(map->conf, crtc_state->savePIPECONF);
629
630 gma_wait_for_vblank(dev);
631
632 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
633 REG_WRITE(map->base, crtc_state->saveDSPBASE);
634
635 gma_wait_for_vblank(dev);
636
637 palette_reg = map->palette;
638 for (i = 0; i < 256; ++i)
639 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
640}
641
642void gma_encoder_prepare(struct drm_encoder *encoder)
643{
644 struct drm_encoder_helper_funcs *encoder_funcs =
645 encoder->helper_private;
646 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
647 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
648}
649
650void gma_encoder_commit(struct drm_encoder *encoder)
651{
652 struct drm_encoder_helper_funcs *encoder_funcs =
653 encoder->helper_private;
654 /* lvds has its own version of commit see psb_intel_lvds_commit */
655 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
656}
657
658void gma_encoder_destroy(struct drm_encoder *encoder)
659{
660 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
661
662 drm_encoder_cleanup(encoder);
663 kfree(intel_encoder);
664}
665
666/* Currently there is only a 1:1 mapping of encoders and connectors */
667struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
668{
669 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
670
671 return &gma_encoder->base;
672}
673
674void gma_connector_attach_encoder(struct gma_connector *connector,
675 struct gma_encoder *encoder)
676{
677 connector->encoder = encoder;
678 drm_mode_connector_attach_encoder(&connector->base,
679 &encoder->base);
680}
681
682#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
683
684bool gma_pll_is_valid(struct drm_crtc *crtc,
685 const struct gma_limit_t *limit,
686 struct gma_clock_t *clock)
687{
688 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
689 GMA_PLL_INVALID("p1 out of range");
690 if (clock->p < limit->p.min || limit->p.max < clock->p)
691 GMA_PLL_INVALID("p out of range");
692 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
693 GMA_PLL_INVALID("m2 out of range");
694 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
695 GMA_PLL_INVALID("m1 out of range");
696 /* On CDV m1 is always 0 */
697 if (clock->m1 <= clock->m2 && clock->m1 != 0)
698 GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
699 if (clock->m < limit->m.min || limit->m.max < clock->m)
700 GMA_PLL_INVALID("m out of range");
701 if (clock->n < limit->n.min || limit->n.max < clock->n)
702 GMA_PLL_INVALID("n out of range");
703 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
704 GMA_PLL_INVALID("vco out of range");
705 /* XXX: We may need to be checking "Dot clock"
706 * depending on the multiplier, connector, etc.,
707 * rather than just a single range.
708 */
709 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
710 GMA_PLL_INVALID("dot out of range");
711
712 return true;
713}
714
715bool gma_find_best_pll(const struct gma_limit_t *limit,
716 struct drm_crtc *crtc, int target, int refclk,
717 struct gma_clock_t *best_clock)
718{
719 struct drm_device *dev = crtc->dev;
720 const struct gma_clock_funcs *clock_funcs =
721 to_gma_crtc(crtc)->clock_funcs;
722 struct gma_clock_t clock;
723 int err = target;
724
725 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
726 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
727 /*
728 * For LVDS, if the panel is on, just rely on its current
729 * settings for dual-channel. We haven't figured out how to
730 * reliably set up different single/dual channel state, if we
731 * even can.
732 */
733 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
734 LVDS_CLKB_POWER_UP)
735 clock.p2 = limit->p2.p2_fast;
736 else
737 clock.p2 = limit->p2.p2_slow;
738 } else {
739 if (target < limit->p2.dot_limit)
740 clock.p2 = limit->p2.p2_slow;
741 else
742 clock.p2 = limit->p2.p2_fast;
743 }
744
745 memset(best_clock, 0, sizeof(*best_clock));
746
747 /* m1 is always 0 on CDV so the outmost loop will run just once */
748 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
749 for (clock.m2 = limit->m2.min;
750 (clock.m2 < clock.m1 || clock.m1 == 0) &&
751 clock.m2 <= limit->m2.max; clock.m2++) {
752 for (clock.n = limit->n.min;
753 clock.n <= limit->n.max; clock.n++) {
754 for (clock.p1 = limit->p1.min;
755 clock.p1 <= limit->p1.max;
756 clock.p1++) {
757 int this_err;
758
759 clock_funcs->clock(refclk, &clock);
760
761 if (!clock_funcs->pll_is_valid(crtc,
762 limit, &clock))
763 continue;
764
765 this_err = abs(clock.dot - target);
766 if (this_err < err) {
767 *best_clock = clock;
768 err = this_err;
769 }
770 }
771 }
772 }
773 }
774
775 return err != target;
776}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 000000000000..78b9f986a6e5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,103 @@
1/*
2 * Copyright © 2006-2011 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
20 */
21
22#ifndef _GMA_DISPLAY_H_
23#define _GMA_DISPLAY_H_
24
25#include <linux/pm_runtime.h>
26
27struct gma_clock_t {
28 /* given values */
29 int n;
30 int m1, m2;
31 int p1, p2;
32 /* derived values */
33 int dot;
34 int vco;
35 int m;
36 int p;
37};
38
39struct gma_range_t {
40 int min, max;
41};
42
43struct gma_p2_t {
44 int dot_limit;
45 int p2_slow, p2_fast;
46};
47
48struct gma_limit_t {
49 struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
50 struct gma_p2_t p2;
51 bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
52 int target, int refclk,
53 struct gma_clock_t *best_clock);
54};
55
56struct gma_clock_funcs {
57 void (*clock)(int refclk, struct gma_clock_t *clock);
58 const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
59 bool (*pll_is_valid)(struct drm_crtc *crtc,
60 const struct gma_limit_t *limit,
61 struct gma_clock_t *clock);
62};
63
64/* Common pipe related functions */
65extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
66extern void gma_wait_for_vblank(struct drm_device *dev);
67extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
68 struct drm_framebuffer *old_fb);
69extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
70 struct drm_file *file_priv,
71 uint32_t handle,
72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
79 const struct drm_display_mode *mode,
80 struct drm_display_mode *adjusted_mode);
81extern void gma_crtc_prepare(struct drm_crtc *crtc);
82extern void gma_crtc_commit(struct drm_crtc *crtc);
83extern void gma_crtc_disable(struct drm_crtc *crtc);
84extern void gma_crtc_destroy(struct drm_crtc *crtc);
85extern int gma_crtc_set_config(struct drm_mode_set *set);
86
87extern void gma_crtc_save(struct drm_crtc *crtc);
88extern void gma_crtc_restore(struct drm_crtc *crtc);
89
90extern void gma_encoder_prepare(struct drm_encoder *encoder);
91extern void gma_encoder_commit(struct drm_encoder *encoder);
92extern void gma_encoder_destroy(struct drm_encoder *encoder);
93
94/* Common clock related functions */
95extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
96extern void gma_clock(int refclk, struct gma_clock_t *clock);
97extern bool gma_pll_is_valid(struct drm_crtc *crtc,
98 const struct gma_limit_t *limit,
99 struct gma_clock_t *clock);
100extern bool gma_find_best_pll(const struct gma_limit_t *limit,
101 struct drm_crtc *crtc, int target, int refclk,
102 struct gma_clock_t *best_clock);
103#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 1f82183536a3..92babac362ec 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
196 */ 196 */
197static int psb_gtt_attach_pages(struct gtt_range *gt) 197static int psb_gtt_attach_pages(struct gtt_range *gt)
198{ 198{
199 struct inode *inode; 199 struct page **pages;
200 struct address_space *mapping;
201 int i;
202 struct page *p;
203 int pages = gt->gem.size / PAGE_SIZE;
204 200
205 WARN_ON(gt->pages); 201 WARN_ON(gt->pages);
206 202
207 /* This is the shared memory object that backs the GEM resource */ 203 pages = drm_gem_get_pages(&gt->gem, 0);
208 inode = file_inode(gt->gem.filp); 204 if (IS_ERR(pages))
209 mapping = inode->i_mapping; 205 return PTR_ERR(pages);
210 206
211 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); 207 gt->pages = pages;
212 if (gt->pages == NULL)
213 return -ENOMEM;
214 gt->npage = pages;
215 208
216 for (i = 0; i < pages; i++) {
217 p = shmem_read_mapping_page(mapping, i);
218 if (IS_ERR(p))
219 goto err;
220 gt->pages[i] = p;
221 }
222 return 0; 209 return 0;
223
224err:
225 while (i--)
226 page_cache_release(gt->pages[i]);
227 kfree(gt->pages);
228 gt->pages = NULL;
229 return PTR_ERR(p);
230} 210}
231 211
232/** 212/**
@@ -240,13 +220,7 @@ err:
240 */ 220 */
241static void psb_gtt_detach_pages(struct gtt_range *gt) 221static void psb_gtt_detach_pages(struct gtt_range *gt)
242{ 222{
243 int i; 223 drm_gem_put_pages(&gt->gem, gt->pages, true, false);
244 for (i = 0; i < gt->npage; i++) {
245 /* FIXME: do we need to force dirty */
246 set_page_dirty(gt->pages[i]);
247 page_cache_release(gt->pages[i]);
248 }
249 kfree(gt->pages);
250 gt->pages = NULL; 224 gt->pages = NULL;
251} 225}
252 226
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 3abf8315f57c..860a4ee9baaf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
249 struct drm_encoder *encoder = connector->encoder; 249 struct drm_encoder *encoder = connector->encoder;
250 250
251 if (!strcmp(property->name, "scaling mode") && encoder) { 251 if (!strcmp(property->name, "scaling mode") && encoder) {
252 struct psb_intel_crtc *psb_crtc = 252 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
253 to_psb_intel_crtc(encoder->crtc);
254 bool centerechange; 253 bool centerechange;
255 uint64_t val; 254 uint64_t val;
256 255
257 if (!psb_crtc) 256 if (!gma_crtc)
258 goto set_prop_error; 257 goto set_prop_error;
259 258
260 switch (value) { 259 switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
281 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || 280 centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
282 (value == DRM_MODE_SCALE_NO_SCALE); 281 (value == DRM_MODE_SCALE_NO_SCALE);
283 282
284 if (psb_crtc->saved_mode.hdisplay != 0 && 283 if (gma_crtc->saved_mode.hdisplay != 0 &&
285 psb_crtc->saved_mode.vdisplay != 0) { 284 gma_crtc->saved_mode.vdisplay != 0) {
286 if (centerechange) { 285 if (centerechange) {
287 if (!drm_crtc_helper_set_mode(encoder->crtc, 286 if (!drm_crtc_helper_set_mode(encoder->crtc,
288 &psb_crtc->saved_mode, 287 &gma_crtc->saved_mode,
289 encoder->crtc->x, 288 encoder->crtc->x,
290 encoder->crtc->y, 289 encoder->crtc->y,
291 encoder->crtc->fb)) 290 encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
294 struct drm_encoder_helper_funcs *funcs = 293 struct drm_encoder_helper_funcs *funcs =
295 encoder->helper_private; 294 encoder->helper_private;
296 funcs->mode_set(encoder, 295 funcs->mode_set(encoder,
297 &psb_crtc->saved_mode, 296 &gma_crtc->saved_mode,
298 &psb_crtc->saved_adjusted_mode); 297 &gma_crtc->saved_adjusted_mode);
299 } 298 }
300 } 299 }
301 } else if (!strcmp(property->name, "backlight") && encoder) { 300 } else if (!strcmp(property->name, "backlight") && encoder) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb0744841c..45d5af0546bf 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -227,7 +227,7 @@ enum {
227#define DSI_DPI_DISABLE_BTA BIT(3) 227#define DSI_DPI_DISABLE_BTA BIT(3)
228 228
229struct mdfld_dsi_connector { 229struct mdfld_dsi_connector {
230 struct psb_intel_connector base; 230 struct gma_connector base;
231 231
232 int pipe; 232 int pipe;
233 void *private; 233 void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
238}; 238};
239 239
240struct mdfld_dsi_encoder { 240struct mdfld_dsi_encoder {
241 struct psb_intel_encoder base; 241 struct gma_encoder base;
242 void *private; 242 void *private;
243}; 243};
244 244
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
269static inline struct mdfld_dsi_connector *mdfld_dsi_connector( 269static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
270 struct drm_connector *connector) 270 struct drm_connector *connector)
271{ 271{
272 struct psb_intel_connector *psb_connector; 272 struct gma_connector *gma_connector;
273 273
274 psb_connector = to_psb_intel_connector(connector); 274 gma_connector = to_gma_connector(connector);
275 275
276 return container_of(psb_connector, struct mdfld_dsi_connector, base); 276 return container_of(gma_connector, struct mdfld_dsi_connector, base);
277} 277}
278 278
279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( 279static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
280 struct drm_encoder *encoder) 280 struct drm_encoder *encoder)
281{ 281{
282 struct psb_intel_encoder *psb_encoder; 282 struct gma_encoder *gma_encoder;
283 283
284 psb_encoder = to_psb_intel_encoder(encoder); 284 gma_encoder = to_gma_encoder(encoder);
285 285
286 return container_of(psb_encoder, struct mdfld_dsi_encoder, base); 286 return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
287} 287}
288 288
289static inline struct mdfld_dsi_config * 289static inline struct mdfld_dsi_config *
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc43945..321c00a944e9 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -23,7 +23,7 @@
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "framebuffer.h" 27#include "framebuffer.h"
28#include "mdfld_output.h" 28#include "mdfld_output.h"
29#include "mdfld_dsi_output.h" 29#include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
65 } 65 }
66 66
67 /* FIXME JLIU7_PO */ 67 /* FIXME JLIU7_PO */
68 psb_intel_wait_for_vblank(dev); 68 gma_wait_for_vblank(dev);
69 return; 69 return;
70 70
71 /* Wait for for the pipe disable to take effect. */ 71 /* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
93 } 93 }
94 94
95 /* FIXME JLIU7_PO */ 95 /* FIXME JLIU7_PO */
96 psb_intel_wait_for_vblank(dev); 96 gma_wait_for_vblank(dev);
97 return; 97 return;
98 98
99 /* Wait for for the pipe enable to take effect. */ 99 /* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
104 } 104 }
105} 105}
106 106
107static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
108{
109 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
110 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
111}
112
113static void psb_intel_crtc_commit(struct drm_crtc *crtc)
114{
115 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
116 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
117}
118
119static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
120 const struct drm_display_mode *mode,
121 struct drm_display_mode *adjusted_mode)
122{
123 return true;
124}
125
126/** 107/**
127 * Return the pipe currently connected to the panel fitter, 108 * Return the pipe currently connected to the panel fitter,
128 * or -1 if the panel fitter is not present or not in use 109 * or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
184{ 165{
185 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
186 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
187 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 168 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
188 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 169 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
189 int pipe = psb_intel_crtc->pipe; 170 int pipe = gma_crtc->pipe;
190 const struct psb_offset *map = &dev_priv->regmap[pipe]; 171 const struct psb_offset *map = &dev_priv->regmap[pipe];
191 unsigned long start, offset; 172 unsigned long start, offset;
192 u32 dspcntr; 173 u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
324{ 305{
325 struct drm_device *dev = crtc->dev; 306 struct drm_device *dev = crtc->dev;
326 struct drm_psb_private *dev_priv = dev->dev_private; 307 struct drm_psb_private *dev_priv = dev->dev_private;
327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 308 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
328 int pipe = psb_intel_crtc->pipe; 309 int pipe = gma_crtc->pipe;
329 const struct psb_offset *map = &dev_priv->regmap[pipe]; 310 const struct psb_offset *map = &dev_priv->regmap[pipe];
330 u32 pipeconf = dev_priv->pipeconf[pipe]; 311 u32 pipeconf = dev_priv->pipeconf[pipe];
331 u32 temp; 312 u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
436 } 417 }
437 } 418 }
438 419
439 psb_intel_crtc_load_lut(crtc); 420 gma_crtc_load_lut(crtc);
440 421
441 /* Give the overlay scaler a chance to enable 422 /* Give the overlay scaler a chance to enable
442 if it's on this pipe */ 423 if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
611 struct drm_device *dev = crtc->dev; 592 struct drm_device *dev = crtc->dev;
612 struct drm_psb_private *dev_priv = dev->dev_private; 593 struct drm_psb_private *dev_priv = dev->dev_private;
613 594
614 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) 595 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
615 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { 596 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
616 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 597 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
617 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; 598 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
618 else if (ksel == KSEL_BYPASS_25) 599 else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
624 (dev_priv->core_freq == 100 || 605 (dev_priv->core_freq == 100 ||
625 dev_priv->core_freq == 200)) 606 dev_priv->core_freq == 200))
626 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; 607 limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
627 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 608 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
628 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) 609 if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
629 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; 610 limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
630 else if (ksel == KSEL_BYPASS_25) 611 else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
688 struct drm_framebuffer *old_fb) 669 struct drm_framebuffer *old_fb)
689{ 670{
690 struct drm_device *dev = crtc->dev; 671 struct drm_device *dev = crtc->dev;
691 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 672 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
692 struct drm_psb_private *dev_priv = dev->dev_private; 673 struct drm_psb_private *dev_priv = dev->dev_private;
693 int pipe = psb_intel_crtc->pipe; 674 int pipe = gma_crtc->pipe;
694 const struct psb_offset *map = &dev_priv->regmap[pipe]; 675 const struct psb_offset *map = &dev_priv->regmap[pipe];
695 int refclk = 0; 676 int refclk = 0;
696 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, 677 int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
700 u32 dpll = 0, fp = 0; 681 u32 dpll = 0, fp = 0;
701 bool is_mipi = false, is_mipi2 = false, is_hdmi = false; 682 bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
702 struct drm_mode_config *mode_config = &dev->mode_config; 683 struct drm_mode_config *mode_config = &dev->mode_config;
703 struct psb_intel_encoder *psb_intel_encoder = NULL; 684 struct gma_encoder *gma_encoder = NULL;
704 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 685 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
705 struct drm_encoder *encoder; 686 struct drm_encoder *encoder;
706 struct drm_connector *connector; 687 struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
749 if (!gma_power_begin(dev, true)) 730 if (!gma_power_begin(dev, true))
750 return 0; 731 return 0;
751 732
752 memcpy(&psb_intel_crtc->saved_mode, mode, 733 memcpy(&gma_crtc->saved_mode, mode,
753 sizeof(struct drm_display_mode)); 734 sizeof(struct drm_display_mode));
754 memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, 735 memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
755 sizeof(struct drm_display_mode)); 736 sizeof(struct drm_display_mode));
756 737
757 list_for_each_entry(connector, &mode_config->connector_list, head) { 738 list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
766 if (encoder->crtc != crtc) 747 if (encoder->crtc != crtc)
767 continue; 748 continue;
768 749
769 psb_intel_encoder = psb_intel_attached_encoder(connector); 750 gma_encoder = gma_attached_encoder(connector);
770 751
771 switch (psb_intel_encoder->type) { 752 switch (gma_encoder->type) {
772 case INTEL_OUTPUT_MIPI: 753 case INTEL_OUTPUT_MIPI:
773 is_mipi = true; 754 is_mipi = true;
774 break; 755 break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
819 800
820 REG_WRITE(map->pos, 0); 801 REG_WRITE(map->pos, 0);
821 802
822 if (psb_intel_encoder) 803 if (gma_encoder)
823 drm_object_property_get_value(&connector->base, 804 drm_object_property_get_value(&connector->base,
824 dev->mode_config.scaling_mode_property, &scalingType); 805 dev->mode_config.scaling_mode_property, &scalingType);
825 806
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
1034 1015
1035 /* Wait for for the pipe enable to take effect. */ 1016 /* Wait for for the pipe enable to take effect. */
1036 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); 1017 REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
1037 psb_intel_wait_for_vblank(dev); 1018 gma_wait_for_vblank(dev);
1038 1019
1039mrst_crtc_mode_set_exit: 1020mrst_crtc_mode_set_exit:
1040 1021
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
1045 1026
1046const struct drm_crtc_helper_funcs mdfld_helper_funcs = { 1027const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
1047 .dpms = mdfld_crtc_dpms, 1028 .dpms = mdfld_crtc_dpms,
1048 .mode_fixup = psb_intel_crtc_mode_fixup, 1029 .mode_fixup = gma_crtc_mode_fixup,
1049 .mode_set = mdfld_crtc_mode_set, 1030 .mode_set = mdfld_crtc_mode_set,
1050 .mode_set_base = mdfld__intel_pipe_set_base, 1031 .mode_set_base = mdfld__intel_pipe_set_base,
1051 .prepare = psb_intel_crtc_prepare, 1032 .prepare = gma_crtc_prepare,
1052 .commit = psb_intel_crtc_commit, 1033 .commit = gma_crtc_commit,
1053}; 1034};
1054 1035
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c1..54c98962b73e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -23,7 +23,7 @@
23#include "psb_drv.h" 23#include "psb_drv.h"
24#include "psb_intel_drv.h" 24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h" 25#include "psb_intel_reg.h"
26#include "psb_intel_display.h" 26#include "gma_display.h"
27#include "power.h" 27#include "power.h"
28 28
29struct psb_intel_range_t { 29struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
88 struct drm_device *dev = crtc->dev; 88 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private; 89 struct drm_psb_private *dev_priv = dev->dev_private;
90 90
91 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 91 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
92 || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { 92 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) { 93 switch (dev_priv->core_freq) {
94 case 100: 94 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; 95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
163{ 163{
164 struct drm_device *dev = crtc->dev; 164 struct drm_device *dev = crtc->dev;
165 struct drm_psb_private *dev_priv = dev->dev_private; 165 struct drm_psb_private *dev_priv = dev->dev_private;
166 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 166 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
167 int pipe = psb_intel_crtc->pipe; 167 int pipe = gma_crtc->pipe;
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 169 u32 temp;
170 170
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
212 REG_WRITE(map->base, REG_READ(map->base)); 212 REG_WRITE(map->base, REG_READ(map->base));
213 } 213 }
214 214
215 psb_intel_crtc_load_lut(crtc); 215 gma_crtc_load_lut(crtc);
216 216
217 /* Give the overlay scaler a chance to enable 217 /* Give the overlay scaler a chance to enable
218 if it's on this pipe */ 218 if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
242 REG_READ(map->conf); 242 REG_READ(map->conf);
243 } 243 }
244 /* Wait for for the pipe disable to take effect. */ 244 /* Wait for for the pipe disable to take effect. */
245 psb_intel_wait_for_vblank(dev); 245 gma_wait_for_vblank(dev);
246 246
247 temp = REG_READ(map->dpll); 247 temp = REG_READ(map->dpll);
248 if ((temp & DPLL_VCO_ENABLE) != 0) { 248 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
292 struct drm_framebuffer *old_fb) 292 struct drm_framebuffer *old_fb)
293{ 293{
294 struct drm_device *dev = crtc->dev; 294 struct drm_device *dev = crtc->dev;
295 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 295 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
296 struct drm_psb_private *dev_priv = dev->dev_private; 296 struct drm_psb_private *dev_priv = dev->dev_private;
297 int pipe = psb_intel_crtc->pipe; 297 int pipe = gma_crtc->pipe;
298 const struct psb_offset *map = &dev_priv->regmap[pipe]; 298 const struct psb_offset *map = &dev_priv->regmap[pipe];
299 int refclk = 0; 299 int refclk = 0;
300 struct oaktrail_clock_t clock; 300 struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
303 bool is_lvds = false; 303 bool is_lvds = false;
304 bool is_mipi = false; 304 bool is_mipi = false;
305 struct drm_mode_config *mode_config = &dev->mode_config; 305 struct drm_mode_config *mode_config = &dev->mode_config;
306 struct psb_intel_encoder *psb_intel_encoder = NULL; 306 struct gma_encoder *gma_encoder = NULL;
307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
308 struct drm_connector *connector; 308 struct drm_connector *connector;
309 309
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
313 if (!gma_power_begin(dev, true)) 313 if (!gma_power_begin(dev, true))
314 return 0; 314 return 0;
315 315
316 memcpy(&psb_intel_crtc->saved_mode, 316 memcpy(&gma_crtc->saved_mode,
317 mode, 317 mode,
318 sizeof(struct drm_display_mode)); 318 sizeof(struct drm_display_mode));
319 memcpy(&psb_intel_crtc->saved_adjusted_mode, 319 memcpy(&gma_crtc->saved_adjusted_mode,
320 adjusted_mode, 320 adjusted_mode,
321 sizeof(struct drm_display_mode)); 321 sizeof(struct drm_display_mode));
322 322
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
324 if (!connector->encoder || connector->encoder->crtc != crtc) 324 if (!connector->encoder || connector->encoder->crtc != crtc)
325 continue; 325 continue;
326 326
327 psb_intel_encoder = psb_intel_attached_encoder(connector); 327 gma_encoder = gma_attached_encoder(connector);
328 328
329 switch (psb_intel_encoder->type) { 329 switch (gma_encoder->type) {
330 case INTEL_OUTPUT_LVDS: 330 case INTEL_OUTPUT_LVDS:
331 is_lvds = true; 331 is_lvds = true;
332 break; 332 break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
350 ((mode->crtc_hdisplay - 1) << 16) | 350 ((mode->crtc_hdisplay - 1) << 16) |
351 (mode->crtc_vdisplay - 1)); 351 (mode->crtc_vdisplay - 1));
352 352
353 if (psb_intel_encoder) 353 if (gma_encoder)
354 drm_object_property_get_value(&connector->base, 354 drm_object_property_get_value(&connector->base,
355 dev->mode_config.scaling_mode_property, &scalingType); 355 dev->mode_config.scaling_mode_property, &scalingType);
356 356
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
484 484
485 REG_WRITE(map->conf, pipeconf); 485 REG_WRITE(map->conf, pipeconf);
486 REG_READ(map->conf); 486 REG_READ(map->conf);
487 psb_intel_wait_for_vblank(dev); 487 gma_wait_for_vblank(dev);
488 488
489 REG_WRITE(map->cntr, dspcntr); 489 REG_WRITE(map->cntr, dspcntr);
490 psb_intel_wait_for_vblank(dev); 490 gma_wait_for_vblank(dev);
491 491
492oaktrail_crtc_mode_set_exit: 492oaktrail_crtc_mode_set_exit:
493 gma_power_end(dev); 493 gma_power_end(dev);
494 return 0; 494 return 0;
495} 495}
496 496
497static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
498 const struct drm_display_mode *mode,
499 struct drm_display_mode *adjusted_mode)
500{
501 return true;
502}
503
504static int oaktrail_pipe_set_base(struct drm_crtc *crtc, 497static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
505 int x, int y, struct drm_framebuffer *old_fb) 498 int x, int y, struct drm_framebuffer *old_fb)
506{ 499{
507 struct drm_device *dev = crtc->dev; 500 struct drm_device *dev = crtc->dev;
508 struct drm_psb_private *dev_priv = dev->dev_private; 501 struct drm_psb_private *dev_priv = dev->dev_private;
509 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 502 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
510 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 503 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
511 int pipe = psb_intel_crtc->pipe; 504 int pipe = gma_crtc->pipe;
512 const struct psb_offset *map = &dev_priv->regmap[pipe]; 505 const struct psb_offset *map = &dev_priv->regmap[pipe];
513 unsigned long start, offset; 506 unsigned long start, offset;
514 507
@@ -563,24 +556,12 @@ pipe_set_base_exit:
563 return ret; 556 return ret;
564} 557}
565 558
566static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
567{
568 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
569 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
570}
571
572static void oaktrail_crtc_commit(struct drm_crtc *crtc)
573{
574 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
575 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
576}
577
578const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { 559const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
579 .dpms = oaktrail_crtc_dpms, 560 .dpms = oaktrail_crtc_dpms,
580 .mode_fixup = oaktrail_crtc_mode_fixup, 561 .mode_fixup = gma_crtc_mode_fixup,
581 .mode_set = oaktrail_crtc_mode_set, 562 .mode_set = oaktrail_crtc_mode_set,
582 .mode_set_base = oaktrail_pipe_set_base, 563 .mode_set_base = oaktrail_pipe_set_base,
583 .prepare = oaktrail_crtc_prepare, 564 .prepare = gma_crtc_prepare,
584 .commit = oaktrail_crtc_commit, 565 .commit = gma_crtc_commit,
585}; 566};
586 567
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161e..38153143ed8c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
155 HDMI_READ(HDMI_HCR); 155 HDMI_READ(HDMI_HCR);
156} 156}
157 157
158static void wait_for_vblank(struct drm_device *dev)
159{
160 /* Wait for 20ms, i.e. one cycle at 50hz. */
161 mdelay(20);
162}
163
164static unsigned int htotal_calculate(struct drm_display_mode *mode) 158static unsigned int htotal_calculate(struct drm_display_mode *mode)
165{ 159{
166 u32 htotal, new_crtc_htotal; 160 u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
372 366
373 REG_WRITE(PCH_PIPEBCONF, pipeconf); 367 REG_WRITE(PCH_PIPEBCONF, pipeconf);
374 REG_READ(PCH_PIPEBCONF); 368 REG_READ(PCH_PIPEBCONF);
375 wait_for_vblank(dev); 369 gma_wait_for_vblank(dev);
376 370
377 REG_WRITE(dspcntr_reg, dspcntr); 371 REG_WRITE(dspcntr_reg, dspcntr);
378 wait_for_vblank(dev); 372 gma_wait_for_vblank(dev);
379 373
380 gma_power_end(dev); 374 gma_power_end(dev);
381 375
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
459 REG_READ(PCH_PIPEBCONF); 453 REG_READ(PCH_PIPEBCONF);
460 } 454 }
461 455
462 wait_for_vblank(dev); 456 gma_wait_for_vblank(dev);
463 457
464 /* Enable plane */ 458 /* Enable plane */
465 temp = REG_READ(DSPBCNTR); 459 temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
470 REG_READ(DSPBSURF); 464 REG_READ(DSPBSURF);
471 } 465 }
472 466
473 psb_intel_crtc_load_lut(crtc); 467 gma_crtc_load_lut(crtc);
474 } 468 }
475 469
476 /* DSPARB */ 470 /* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
615static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { 609static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
616 .dpms = oaktrail_hdmi_dpms, 610 .dpms = oaktrail_hdmi_dpms,
617 .mode_fixup = oaktrail_hdmi_mode_fixup, 611 .mode_fixup = oaktrail_hdmi_mode_fixup,
618 .prepare = psb_intel_encoder_prepare, 612 .prepare = gma_encoder_prepare,
619 .mode_set = oaktrail_hdmi_mode_set, 613 .mode_set = oaktrail_hdmi_mode_set,
620 .commit = psb_intel_encoder_commit, 614 .commit = gma_encoder_commit,
621}; 615};
622 616
623static const struct drm_connector_helper_funcs 617static const struct drm_connector_helper_funcs
624 oaktrail_hdmi_connector_helper_funcs = { 618 oaktrail_hdmi_connector_helper_funcs = {
625 .get_modes = oaktrail_hdmi_get_modes, 619 .get_modes = oaktrail_hdmi_get_modes,
626 .mode_valid = oaktrail_hdmi_mode_valid, 620 .mode_valid = oaktrail_hdmi_mode_valid,
627 .best_encoder = psb_intel_best_encoder, 621 .best_encoder = gma_best_encoder,
628}; 622};
629 623
630static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { 624static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
646void oaktrail_hdmi_init(struct drm_device *dev, 640void oaktrail_hdmi_init(struct drm_device *dev,
647 struct psb_intel_mode_device *mode_dev) 641 struct psb_intel_mode_device *mode_dev)
648{ 642{
649 struct psb_intel_encoder *psb_intel_encoder; 643 struct gma_encoder *gma_encoder;
650 struct psb_intel_connector *psb_intel_connector; 644 struct gma_connector *gma_connector;
651 struct drm_connector *connector; 645 struct drm_connector *connector;
652 struct drm_encoder *encoder; 646 struct drm_encoder *encoder;
653 647
654 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 648 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
655 if (!psb_intel_encoder) 649 if (!gma_encoder)
656 return; 650 return;
657 651
658 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 652 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
659 if (!psb_intel_connector) 653 if (!gma_connector)
660 goto failed_connector; 654 goto failed_connector;
661 655
662 connector = &psb_intel_connector->base; 656 connector = &gma_connector->base;
663 encoder = &psb_intel_encoder->base; 657 encoder = &gma_encoder->base;
664 drm_connector_init(dev, connector, 658 drm_connector_init(dev, connector,
665 &oaktrail_hdmi_connector_funcs, 659 &oaktrail_hdmi_connector_funcs,
666 DRM_MODE_CONNECTOR_DVID); 660 DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
669 &oaktrail_hdmi_enc_funcs, 663 &oaktrail_hdmi_enc_funcs,
670 DRM_MODE_ENCODER_TMDS); 664 DRM_MODE_ENCODER_TMDS);
671 665
672 psb_intel_connector_attach_encoder(psb_intel_connector, 666 gma_connector_attach_encoder(gma_connector, gma_encoder);
673 psb_intel_encoder);
674 667
675 psb_intel_encoder->type = INTEL_OUTPUT_HDMI; 668 gma_encoder->type = INTEL_OUTPUT_HDMI;
676 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); 669 drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
677 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs); 670 drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
678 671
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
685 return; 678 return;
686 679
687failed_connector: 680failed_connector:
688 kfree(psb_intel_encoder); 681 kfree(gma_encoder);
689} 682}
690 683
691static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { 684static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48c..e77d7214fca4 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -43,7 +43,7 @@
43 * Sets the power state for the panel. 43 * Sets the power state for the panel.
44 */ 44 */
45static void oaktrail_lvds_set_power(struct drm_device *dev, 45static void oaktrail_lvds_set_power(struct drm_device *dev,
46 struct psb_intel_encoder *psb_intel_encoder, 46 struct gma_encoder *gma_encoder,
47 bool on) 47 bool on)
48{ 48{
49 u32 pp_status; 49 u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode) 78static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
79{ 79{
80 struct drm_device *dev = encoder->dev; 80 struct drm_device *dev = encoder->dev;
81 struct psb_intel_encoder *psb_intel_encoder = 81 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
82 to_psb_intel_encoder(encoder);
83 82
84 if (mode == DRM_MODE_DPMS_ON) 83 if (mode == DRM_MODE_DPMS_ON)
85 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 84 oaktrail_lvds_set_power(dev, gma_encoder, true);
86 else 85 else
87 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 86 oaktrail_lvds_set_power(dev, gma_encoder, false);
88 87
89 /* XXX: We never power down the LVDS pairs. */ 88 /* XXX: We never power down the LVDS pairs. */
90} 89}
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
166{ 165{
167 struct drm_device *dev = encoder->dev; 166 struct drm_device *dev = encoder->dev;
168 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
169 struct psb_intel_encoder *psb_intel_encoder = 168 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
170 to_psb_intel_encoder(encoder);
171 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 169 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
172 170
173 if (!gma_power_begin(dev, true)) 171 if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
176 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); 174 mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
177 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & 175 mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
178 BACKLIGHT_DUTY_CYCLE_MASK); 176 BACKLIGHT_DUTY_CYCLE_MASK);
179 oaktrail_lvds_set_power(dev, psb_intel_encoder, false); 177 oaktrail_lvds_set_power(dev, gma_encoder, false);
180 gma_power_end(dev); 178 gma_power_end(dev);
181} 179}
182 180
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
203{ 201{
204 struct drm_device *dev = encoder->dev; 202 struct drm_device *dev = encoder->dev;
205 struct drm_psb_private *dev_priv = dev->dev_private; 203 struct drm_psb_private *dev_priv = dev->dev_private;
206 struct psb_intel_encoder *psb_intel_encoder = 204 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
207 to_psb_intel_encoder(encoder);
208 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 205 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
209 206
210 if (mode_dev->backlight_duty_cycle == 0) 207 if (mode_dev->backlight_duty_cycle == 0)
211 mode_dev->backlight_duty_cycle = 208 mode_dev->backlight_duty_cycle =
212 oaktrail_lvds_get_max_backlight(dev); 209 oaktrail_lvds_get_max_backlight(dev);
213 oaktrail_lvds_set_power(dev, psb_intel_encoder, true); 210 oaktrail_lvds_set_power(dev, gma_encoder, true);
214} 211}
215 212
216static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = { 213static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
325void oaktrail_lvds_init(struct drm_device *dev, 322void oaktrail_lvds_init(struct drm_device *dev,
326 struct psb_intel_mode_device *mode_dev) 323 struct psb_intel_mode_device *mode_dev)
327{ 324{
328 struct psb_intel_encoder *psb_intel_encoder; 325 struct gma_encoder *gma_encoder;
329 struct psb_intel_connector *psb_intel_connector; 326 struct gma_connector *gma_connector;
330 struct drm_connector *connector; 327 struct drm_connector *connector;
331 struct drm_encoder *encoder; 328 struct drm_encoder *encoder;
332 struct drm_psb_private *dev_priv = dev->dev_private; 329 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
334 struct i2c_adapter *i2c_adap; 331 struct i2c_adapter *i2c_adap;
335 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 332 struct drm_display_mode *scan; /* *modes, *bios_mode; */
336 333
337 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 334 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
338 if (!psb_intel_encoder) 335 if (!gma_encoder)
339 return; 336 return;
340 337
341 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 338 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
342 if (!psb_intel_connector) 339 if (!gma_connector)
343 goto failed_connector; 340 goto failed_connector;
344 341
345 connector = &psb_intel_connector->base; 342 connector = &gma_connector->base;
346 encoder = &psb_intel_encoder->base; 343 encoder = &gma_encoder->base;
347 dev_priv->is_lvds_on = true; 344 dev_priv->is_lvds_on = true;
348 drm_connector_init(dev, connector, 345 drm_connector_init(dev, connector,
349 &psb_intel_lvds_connector_funcs, 346 &psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
352 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, 349 drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
353 DRM_MODE_ENCODER_LVDS); 350 DRM_MODE_ENCODER_LVDS);
354 351
355 psb_intel_connector_attach_encoder(psb_intel_connector, 352 gma_connector_attach_encoder(gma_connector, gma_encoder);
356 psb_intel_encoder); 353 gma_encoder->type = INTEL_OUTPUT_LVDS;
357 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
358 354
359 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); 355 drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
360 drm_connector_helper_add(connector, 356 drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
434 430
435failed_find: 431failed_find:
436 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); 432 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
437 if (psb_intel_encoder->ddc_bus) 433 if (gma_encoder->ddc_bus)
438 psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); 434 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
439 435
440/* failed_ddc: */ 436/* failed_ddc: */
441 437
442 drm_encoder_cleanup(encoder); 438 drm_encoder_cleanup(encoder);
443 drm_connector_cleanup(connector); 439 drm_connector_cleanup(connector);
444 kfree(psb_intel_connector); 440 kfree(gma_connector);
445failed_connector: 441failed_connector:
446 kfree(psb_intel_encoder); 442 kfree(gma_encoder);
447} 443}
448 444
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index f6f534b4197e..697678619bd1 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -25,7 +25,7 @@
25#include "psb_reg.h" 25#include "psb_reg.h"
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28 28#include "psb_device.h"
29 29
30static int psb_output_init(struct drm_device *dev) 30static int psb_output_init(struct drm_device *dev)
31{ 31{
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
380 380
381 .crtc_helper = &psb_intel_helper_funcs, 381 .crtc_helper = &psb_intel_helper_funcs,
382 .crtc_funcs = &psb_intel_crtc_funcs, 382 .crtc_funcs = &psb_intel_crtc_funcs,
383 .clock_funcs = &psb_clock_funcs,
383 384
384 .output_init = psb_output_init, 385 .output_init = psb_output_init,
385 386
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_device.h
index 3724b971e91c..35e304c7f85a 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -1,4 +1,6 @@
1/* copyright (c) 2008, Intel Corporation 1/*
2 * Copyright © 2013 Patrik Jakobsson
3 * Copyright © 2011 Intel Corporation
2 * 4 *
3 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
12 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Authors:
17 * Eric Anholt <eric@anholt.net>
18 */ 17 */
19 18
20#ifndef _INTEL_DISPLAY_H_ 19#ifndef _PSB_DEVICE_H_
21#define _INTEL_DISPLAY_H_ 20#define _PSB_DEVICE_H_
22 21
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 22extern const struct gma_clock_funcs psb_clock_funcs;
24 23
25#endif 24#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea5807442..fcb4e9ff1f20 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, 131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv); 132 struct drm_file *file_priv);
133 133
134static struct drm_ioctl_desc psb_ioctls[] = { 134static const struct drm_ioctl_desc psb_ioctls[] = {
135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH), 135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl, 136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
137 DRM_AUTH), 137 DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
270 unsigned long irqflags; 270 unsigned long irqflags;
271 int ret = -ENOMEM; 271 int ret = -ENOMEM;
272 struct drm_connector *connector; 272 struct drm_connector *connector;
273 struct psb_intel_encoder *psb_intel_encoder; 273 struct gma_encoder *gma_encoder;
274 274
275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
276 if (dev_priv == NULL) 276 if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
372 /* Only add backlight support if we have LVDS output */ 372 /* Only add backlight support if we have LVDS output */
373 list_for_each_entry(connector, &dev->mode_config.connector_list, 373 list_for_each_entry(connector, &dev->mode_config.connector_list,
374 head) { 374 head) {
375 psb_intel_encoder = psb_intel_attached_encoder(connector); 375 gma_encoder = gma_attached_encoder(connector);
376 376
377 switch (psb_intel_encoder->type) { 377 switch (gma_encoder->type) {
378 case INTEL_OUTPUT_LVDS: 378 case INTEL_OUTPUT_LVDS:
379 case INTEL_OUTPUT_MIPI: 379 case INTEL_OUTPUT_MIPI:
380 ret = gma_backlight_init(dev); 380 ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
441 struct drm_mode_object *obj; 441 struct drm_mode_object *obj;
442 struct drm_crtc *crtc; 442 struct drm_crtc *crtc;
443 struct drm_connector *connector; 443 struct drm_connector *connector;
444 struct psb_intel_crtc *psb_intel_crtc; 444 struct gma_crtc *gma_crtc;
445 int i = 0; 445 int i = 0;
446 int32_t obj_id; 446 int32_t obj_id;
447 447
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
454 454
455 connector = obj_to_connector(obj); 455 connector = obj_to_connector(obj);
456 crtc = connector->encoder->crtc; 456 crtc = connector->encoder->crtc;
457 psb_intel_crtc = to_psb_intel_crtc(crtc); 457 gma_crtc = to_gma_crtc(crtc);
458 458
459 for (i = 0; i < 256; i++) 459 for (i = 0; i < 256; i++)
460 psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; 460 gma_crtc->lut_adj[i] = lut_arg->lut[i];
461 461
462 psb_intel_crtc_load_lut(crtc); 462 gma_crtc_load_lut(crtc);
463 463
464 return 0; 464 return 0;
465} 465}
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
622 .unlocked_ioctl = psb_unlocked_ioctl, 622 .unlocked_ioctl = psb_unlocked_ioctl,
623 .mmap = drm_gem_mmap, 623 .mmap = drm_gem_mmap,
624 .poll = drm_poll, 624 .poll = drm_poll,
625 .fasync = drm_fasync,
626 .read = drm_read, 625 .read = drm_read,
627}; 626};
628 627
629static struct drm_driver driver = { 628static struct drm_driver driver = {
630 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 629 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
631 DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM , 630 DRIVER_MODESET | DRIVER_GEM ,
632 .load = psb_driver_load, 631 .load = psb_driver_load,
633 .unload = psb_driver_unload, 632 .unload = psb_driver_unload,
634 633
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
652 .gem_vm_ops = &psb_gem_vm_ops, 651 .gem_vm_ops = &psb_gem_vm_ops,
653 .dumb_create = psb_gem_dumb_create, 652 .dumb_create = psb_gem_dumb_create,
654 .dumb_map_offset = psb_gem_dumb_map_gtt, 653 .dumb_map_offset = psb_gem_dumb_map_gtt,
655 .dumb_destroy = psb_gem_dumb_destroy, 654 .dumb_destroy = drm_gem_dumb_destroy,
656 .fops = &psb_gem_fops, 655 .fops = &psb_gem_fops,
657 .name = DRIVER_NAME, 656 .name = DRIVER_NAME,
658 .desc = DRIVER_DESC, 657 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd12..4535ac7708f8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -27,6 +27,7 @@
27#include <drm/gma_drm.h> 27#include <drm/gma_drm.h>
28#include "psb_reg.h" 28#include "psb_reg.h"
29#include "psb_intel_drv.h" 29#include "psb_intel_drv.h"
30#include "gma_display.h"
30#include "intel_bios.h" 31#include "intel_bios.h"
31#include "gtt.h" 32#include "gtt.h"
32#include "power.h" 33#include "power.h"
@@ -46,6 +47,7 @@ enum {
46#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
47#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
48#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
49 51
50/* 52/*
51 * Driver definitions 53 * Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
675 /* Sub functions */ 677 /* Sub functions */
676 struct drm_crtc_helper_funcs const *crtc_helper; 678 struct drm_crtc_helper_funcs const *crtc_helper;
677 struct drm_crtc_funcs const *crtc_funcs; 679 struct drm_crtc_funcs const *crtc_funcs;
680 const struct gma_clock_funcs *clock_funcs;
678 681
679 /* Setup hooks */ 682 /* Setup hooks */
680 int (*chip_setup)(struct drm_device *dev); 683 int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
692 int (*restore_regs)(struct drm_device *dev); 695 int (*restore_regs)(struct drm_device *dev);
693 int (*power_up)(struct drm_device *dev); 696 int (*power_up)(struct drm_device *dev);
694 int (*power_down)(struct drm_device *dev); 697 int (*power_down)(struct drm_device *dev);
698 void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
699 void (*disable_sr)(struct drm_device *dev);
695 700
696 void (*lvds_bl_power)(struct drm_device *dev, bool on); 701 void (*lvds_bl_power)(struct drm_device *dev, bool on);
697#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 702#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
838 struct drm_file *file); 843 struct drm_file *file);
839extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 844extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
840 struct drm_mode_create_dumb *args); 845 struct drm_mode_create_dumb *args);
841extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
842 uint32_t handle);
843extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, 846extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
844 uint32_t handle, uint64_t *offset); 847 uint32_t handle, uint64_t *offset);
845extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 848extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6666493789d1..97f8a03fee43 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -19,46 +19,19 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23 22
24#include <drm/drmP.h> 23#include <drm/drmP.h>
25#include "framebuffer.h" 24#include "framebuffer.h"
26#include "psb_drv.h" 25#include "psb_drv.h"
27#include "psb_intel_drv.h" 26#include "psb_intel_drv.h"
28#include "psb_intel_reg.h" 27#include "psb_intel_reg.h"
29#include "psb_intel_display.h" 28#include "gma_display.h"
30#include "power.h" 29#include "power.h"
31 30
32struct psb_intel_clock_t {
33 /* given values */
34 int n;
35 int m1, m2;
36 int p1, p2;
37 /* derived values */
38 int dot;
39 int vco;
40 int m;
41 int p;
42};
43
44struct psb_intel_range_t {
45 int min, max;
46};
47
48struct psb_intel_p2_t {
49 int dot_limit;
50 int p2_slow, p2_fast;
51};
52
53struct psb_intel_limit_t {
54 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
55 struct psb_intel_p2_t p2;
56};
57
58#define INTEL_LIMIT_I9XX_SDVO_DAC 0 31#define INTEL_LIMIT_I9XX_SDVO_DAC 0
59#define INTEL_LIMIT_I9XX_LVDS 1 32#define INTEL_LIMIT_I9XX_LVDS 1
60 33
61static const struct psb_intel_limit_t psb_intel_limits[] = { 34static const struct gma_limit_t psb_intel_limits[] = {
62 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 35 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
63 .dot = {.min = 20000, .max = 400000}, 36 .dot = {.min = 20000, .max = 400000},
64 .vco = {.min = 1400000, .max = 2800000}, 37 .vco = {.min = 1400000, .max = 2800000},
@@ -68,8 +41,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
68 .m2 = {.min = 3, .max = 7}, 41 .m2 = {.min = 3, .max = 7},
69 .p = {.min = 5, .max = 80}, 42 .p = {.min = 5, .max = 80},
70 .p1 = {.min = 1, .max = 8}, 43 .p1 = {.min = 1, .max = 8},
71 .p2 = {.dot_limit = 200000, 44 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
72 .p2_slow = 10, .p2_fast = 5}, 45 .find_pll = gma_find_best_pll,
73 }, 46 },
74 { /* INTEL_LIMIT_I9XX_LVDS */ 47 { /* INTEL_LIMIT_I9XX_LVDS */
75 .dot = {.min = 20000, .max = 400000}, 48 .dot = {.min = 20000, .max = 400000},
@@ -83,23 +56,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
83 /* The single-channel range is 25-112Mhz, and dual-channel 56 /* The single-channel range is 25-112Mhz, and dual-channel
84 * is 80-224Mhz. Prefer single channel as much as possible. 57 * is 80-224Mhz. Prefer single channel as much as possible.
85 */ 58 */
86 .p2 = {.dot_limit = 112000, 59 .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
87 .p2_slow = 14, .p2_fast = 7}, 60 .find_pll = gma_find_best_pll,
88 }, 61 },
89}; 62};
90 63
91static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) 64static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
65 int refclk)
92{ 66{
93 const struct psb_intel_limit_t *limit; 67 const struct gma_limit_t *limit;
94 68
95 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 69 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
96 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; 70 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
97 else 71 else
98 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 72 limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
99 return limit; 73 return limit;
100} 74}
101 75
102static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock) 76static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
103{ 77{
104 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 78 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
105 clock->p = clock->p1 * clock->p2; 79 clock->p = clock->p1 * clock->p2;
@@ -108,353 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
108} 82}
109 83
110/** 84/**
111 * Returns whether any output on the specified pipe is of the specified type
112 */
113bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
114{
115 struct drm_device *dev = crtc->dev;
116 struct drm_mode_config *mode_config = &dev->mode_config;
117 struct drm_connector *l_entry;
118
119 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
120 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
121 struct psb_intel_encoder *psb_intel_encoder =
122 psb_intel_attached_encoder(l_entry);
123 if (psb_intel_encoder->type == type)
124 return true;
125 }
126 }
127 return false;
128}
129
130#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
131/**
132 * Returns whether the given set of divisors are valid for a given refclk with
133 * the given connectors.
134 */
135
136static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
137 struct psb_intel_clock_t *clock)
138{
139 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
140
141 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
142 INTELPllInvalid("p1 out of range\n");
143 if (clock->p < limit->p.min || limit->p.max < clock->p)
144 INTELPllInvalid("p out of range\n");
145 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
146 INTELPllInvalid("m2 out of range\n");
147 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
148 INTELPllInvalid("m1 out of range\n");
149 if (clock->m1 <= clock->m2)
150 INTELPllInvalid("m1 <= m2\n");
151 if (clock->m < limit->m.min || limit->m.max < clock->m)
152 INTELPllInvalid("m out of range\n");
153 if (clock->n < limit->n.min || limit->n.max < clock->n)
154 INTELPllInvalid("n out of range\n");
155 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
156 INTELPllInvalid("vco out of range\n");
157 /* XXX: We may need to be checking "Dot clock"
158 * depending on the multiplier, connector, etc.,
159 * rather than just a single range.
160 */
161 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
162 INTELPllInvalid("dot out of range\n");
163
164 return true;
165}
166
167/**
168 * Returns a set of divisors for the desired target clock with the given
169 * refclk, or FALSE. The returned values represent the clock equation:
170 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
171 */
172static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
173 int refclk,
174 struct psb_intel_clock_t *best_clock)
175{
176 struct drm_device *dev = crtc->dev;
177 struct psb_intel_clock_t clock;
178 const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
179 int err = target;
180
181 if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
182 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
183 /*
184 * For LVDS, if the panel is on, just rely on its current
185 * settings for dual-channel. We haven't figured out how to
186 * reliably set up different single/dual channel state, if we
187 * even can.
188 */
189 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
190 LVDS_CLKB_POWER_UP)
191 clock.p2 = limit->p2.p2_fast;
192 else
193 clock.p2 = limit->p2.p2_slow;
194 } else {
195 if (target < limit->p2.dot_limit)
196 clock.p2 = limit->p2.p2_slow;
197 else
198 clock.p2 = limit->p2.p2_fast;
199 }
200
201 memset(best_clock, 0, sizeof(*best_clock));
202
203 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
204 clock.m1++) {
205 for (clock.m2 = limit->m2.min;
206 clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
207 clock.m2++) {
208 for (clock.n = limit->n.min;
209 clock.n <= limit->n.max; clock.n++) {
210 for (clock.p1 = limit->p1.min;
211 clock.p1 <= limit->p1.max;
212 clock.p1++) {
213 int this_err;
214
215 psb_intel_clock(refclk, &clock);
216
217 if (!psb_intel_PLL_is_valid
218 (crtc, &clock))
219 continue;
220
221 this_err = abs(clock.dot - target);
222 if (this_err < err) {
223 *best_clock = clock;
224 err = this_err;
225 }
226 }
227 }
228 }
229 }
230
231 return err != target;
232}
233
234void psb_intel_wait_for_vblank(struct drm_device *dev)
235{
236 /* Wait for 20ms, i.e. one cycle at 50hz. */
237 mdelay(20);
238}
239
240static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
241 int x, int y, struct drm_framebuffer *old_fb)
242{
243 struct drm_device *dev = crtc->dev;
244 struct drm_psb_private *dev_priv = dev->dev_private;
245 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
246 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
247 int pipe = psb_intel_crtc->pipe;
248 const struct psb_offset *map = &dev_priv->regmap[pipe];
249 unsigned long start, offset;
250 u32 dspcntr;
251 int ret = 0;
252
253 if (!gma_power_begin(dev, true))
254 return 0;
255
256 /* no fb bound */
257 if (!crtc->fb) {
258 dev_dbg(dev->dev, "No FB bound\n");
259 goto psb_intel_pipe_cleaner;
260 }
261
262 /* We are displaying this buffer, make sure it is actually loaded
263 into the GTT */
264 ret = psb_gtt_pin(psbfb->gtt);
265 if (ret < 0)
266 goto psb_intel_pipe_set_base_exit;
267 start = psbfb->gtt->offset;
268
269 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
270
271 REG_WRITE(map->stride, crtc->fb->pitches[0]);
272
273 dspcntr = REG_READ(map->cntr);
274 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
275
276 switch (crtc->fb->bits_per_pixel) {
277 case 8:
278 dspcntr |= DISPPLANE_8BPP;
279 break;
280 case 16:
281 if (crtc->fb->depth == 15)
282 dspcntr |= DISPPLANE_15_16BPP;
283 else
284 dspcntr |= DISPPLANE_16BPP;
285 break;
286 case 24:
287 case 32:
288 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
289 break;
290 default:
291 dev_err(dev->dev, "Unknown color depth\n");
292 ret = -EINVAL;
293 psb_gtt_unpin(psbfb->gtt);
294 goto psb_intel_pipe_set_base_exit;
295 }
296 REG_WRITE(map->cntr, dspcntr);
297
298 REG_WRITE(map->base, start + offset);
299 REG_READ(map->base);
300
301psb_intel_pipe_cleaner:
302 /* If there was a previous display we can now unpin it */
303 if (old_fb)
304 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
305
306psb_intel_pipe_set_base_exit:
307 gma_power_end(dev);
308 return ret;
309}
310
311/**
312 * Sets the power management mode of the pipe and plane.
313 *
314 * This code should probably grow support for turning the cursor off and back
315 * on appropriately at the same time as we're turning the pipe off/on.
316 */
317static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
318{
319 struct drm_device *dev = crtc->dev;
320 struct drm_psb_private *dev_priv = dev->dev_private;
321 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
322 int pipe = psb_intel_crtc->pipe;
323 const struct psb_offset *map = &dev_priv->regmap[pipe];
324 u32 temp;
325
326 /* XXX: When our outputs are all unaware of DPMS modes other than off
327 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
328 */
329 switch (mode) {
330 case DRM_MODE_DPMS_ON:
331 case DRM_MODE_DPMS_STANDBY:
332 case DRM_MODE_DPMS_SUSPEND:
333 /* Enable the DPLL */
334 temp = REG_READ(map->dpll);
335 if ((temp & DPLL_VCO_ENABLE) == 0) {
336 REG_WRITE(map->dpll, temp);
337 REG_READ(map->dpll);
338 /* Wait for the clocks to stabilize. */
339 udelay(150);
340 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
341 REG_READ(map->dpll);
342 /* Wait for the clocks to stabilize. */
343 udelay(150);
344 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
345 REG_READ(map->dpll);
346 /* Wait for the clocks to stabilize. */
347 udelay(150);
348 }
349
350 /* Enable the pipe */
351 temp = REG_READ(map->conf);
352 if ((temp & PIPEACONF_ENABLE) == 0)
353 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
354
355 /* Enable the plane */
356 temp = REG_READ(map->cntr);
357 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
358 REG_WRITE(map->cntr,
359 temp | DISPLAY_PLANE_ENABLE);
360 /* Flush the plane changes */
361 REG_WRITE(map->base, REG_READ(map->base));
362 }
363
364 psb_intel_crtc_load_lut(crtc);
365
366 /* Give the overlay scaler a chance to enable
367 * if it's on this pipe */
368 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
369 break;
370 case DRM_MODE_DPMS_OFF:
371 /* Give the overlay scaler a chance to disable
372 * if it's on this pipe */
373 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
374
375 /* Disable the VGA plane that we never use */
376 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
377
378 /* Disable display plane */
379 temp = REG_READ(map->cntr);
380 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
381 REG_WRITE(map->cntr,
382 temp & ~DISPLAY_PLANE_ENABLE);
383 /* Flush the plane changes */
384 REG_WRITE(map->base, REG_READ(map->base));
385 REG_READ(map->base);
386 }
387
388 /* Next, disable display pipes */
389 temp = REG_READ(map->conf);
390 if ((temp & PIPEACONF_ENABLE) != 0) {
391 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
392 REG_READ(map->conf);
393 }
394
395 /* Wait for vblank for the disable to take effect. */
396 psb_intel_wait_for_vblank(dev);
397
398 temp = REG_READ(map->dpll);
399 if ((temp & DPLL_VCO_ENABLE) != 0) {
400 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
401 REG_READ(map->dpll);
402 }
403
404 /* Wait for the clocks to turn off. */
405 udelay(150);
406 break;
407 }
408
409 /*Set FIFO Watermarks*/
410 REG_WRITE(DSPARB, 0x3F3E);
411}
412
413static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
414{
415 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
416 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
417}
418
419static void psb_intel_crtc_commit(struct drm_crtc *crtc)
420{
421 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
422 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
423}
424
425void psb_intel_encoder_prepare(struct drm_encoder *encoder)
426{
427 struct drm_encoder_helper_funcs *encoder_funcs =
428 encoder->helper_private;
429 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
430 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
431}
432
433void psb_intel_encoder_commit(struct drm_encoder *encoder)
434{
435 struct drm_encoder_helper_funcs *encoder_funcs =
436 encoder->helper_private;
437 /* lvds has its own version of commit see psb_intel_lvds_commit */
438 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
439}
440
441void psb_intel_encoder_destroy(struct drm_encoder *encoder)
442{
443 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
444
445 drm_encoder_cleanup(encoder);
446 kfree(intel_encoder);
447}
448
449static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
450 const struct drm_display_mode *mode,
451 struct drm_display_mode *adjusted_mode)
452{
453 return true;
454}
455
456
457/**
458 * Return the pipe currently connected to the panel fitter, 85 * Return the pipe currently connected to the panel fitter,
459 * or -1 if the panel fitter is not present or not in use 86 * or -1 if the panel fitter is not present or not in use
460 */ 87 */
@@ -479,17 +106,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
479{ 106{
480 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
481 struct drm_psb_private *dev_priv = dev->dev_private; 108 struct drm_psb_private *dev_priv = dev->dev_private;
482 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 109 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
483 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 110 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
484 int pipe = psb_intel_crtc->pipe; 111 int pipe = gma_crtc->pipe;
485 const struct psb_offset *map = &dev_priv->regmap[pipe]; 112 const struct psb_offset *map = &dev_priv->regmap[pipe];
486 int refclk; 113 int refclk;
487 struct psb_intel_clock_t clock; 114 struct gma_clock_t clock;
488 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 115 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
489 bool ok, is_sdvo = false; 116 bool ok, is_sdvo = false;
490 bool is_lvds = false, is_tv = false; 117 bool is_lvds = false, is_tv = false;
491 struct drm_mode_config *mode_config = &dev->mode_config; 118 struct drm_mode_config *mode_config = &dev->mode_config;
492 struct drm_connector *connector; 119 struct drm_connector *connector;
120 const struct gma_limit_t *limit;
493 121
494 /* No scan out no play */ 122 /* No scan out no play */
495 if (crtc->fb == NULL) { 123 if (crtc->fb == NULL) {
@@ -498,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
498 } 126 }
499 127
500 list_for_each_entry(connector, &mode_config->connector_list, head) { 128 list_for_each_entry(connector, &mode_config->connector_list, head) {
501 struct psb_intel_encoder *psb_intel_encoder = 129 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
502 psb_intel_attached_encoder(connector);
503 130
504 if (!connector->encoder 131 if (!connector->encoder
505 || connector->encoder->crtc != crtc) 132 || connector->encoder->crtc != crtc)
506 continue; 133 continue;
507 134
508 switch (psb_intel_encoder->type) { 135 switch (gma_encoder->type) {
509 case INTEL_OUTPUT_LVDS: 136 case INTEL_OUTPUT_LVDS:
510 is_lvds = true; 137 is_lvds = true;
511 break; 138 break;
@@ -520,10 +147,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
520 147
521 refclk = 96000; 148 refclk = 96000;
522 149
523 ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 150 limit = gma_crtc->clock_funcs->limit(crtc, refclk);
151
152 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
524 &clock); 153 &clock);
525 if (!ok) { 154 if (!ok) {
526 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 155 DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
156 adjusted_mode->clock, clock.dot);
527 return 0; 157 return 0;
528 } 158 }
529 159
@@ -661,368 +291,29 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
661 REG_WRITE(map->conf, pipeconf); 291 REG_WRITE(map->conf, pipeconf);
662 REG_READ(map->conf); 292 REG_READ(map->conf);
663 293
664 psb_intel_wait_for_vblank(dev); 294 gma_wait_for_vblank(dev);
665 295
666 REG_WRITE(map->cntr, dspcntr); 296 REG_WRITE(map->cntr, dspcntr);
667 297
668 /* Flush the plane changes */ 298 /* Flush the plane changes */
669 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 299 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
670 300
671 psb_intel_wait_for_vblank(dev); 301 gma_wait_for_vblank(dev);
672
673 return 0;
674}
675
676/** Loads the palette/gamma unit for the CRTC with the prepared values */
677void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
678{
679 struct drm_device *dev = crtc->dev;
680 struct drm_psb_private *dev_priv = dev->dev_private;
681 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
682 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
683 int palreg = map->palette;
684 int i;
685
686 /* The clocks have to be on to load the palette. */
687 if (!crtc->enabled)
688 return;
689
690 switch (psb_intel_crtc->pipe) {
691 case 0:
692 case 1:
693 break;
694 default:
695 dev_err(dev->dev, "Illegal Pipe Number.\n");
696 return;
697 }
698
699 if (gma_power_begin(dev, false)) {
700 for (i = 0; i < 256; i++) {
701 REG_WRITE(palreg + 4 * i,
702 ((psb_intel_crtc->lut_r[i] +
703 psb_intel_crtc->lut_adj[i]) << 16) |
704 ((psb_intel_crtc->lut_g[i] +
705 psb_intel_crtc->lut_adj[i]) << 8) |
706 (psb_intel_crtc->lut_b[i] +
707 psb_intel_crtc->lut_adj[i]));
708 }
709 gma_power_end(dev);
710 } else {
711 for (i = 0; i < 256; i++) {
712 dev_priv->regs.pipe[0].palette[i] =
713 ((psb_intel_crtc->lut_r[i] +
714 psb_intel_crtc->lut_adj[i]) << 16) |
715 ((psb_intel_crtc->lut_g[i] +
716 psb_intel_crtc->lut_adj[i]) << 8) |
717 (psb_intel_crtc->lut_b[i] +
718 psb_intel_crtc->lut_adj[i]);
719 }
720
721 }
722}
723
724/**
725 * Save HW states of giving crtc
726 */
727static void psb_intel_crtc_save(struct drm_crtc *crtc)
728{
729 struct drm_device *dev = crtc->dev;
730 struct drm_psb_private *dev_priv = dev->dev_private;
731 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
732 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
733 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
734 uint32_t paletteReg;
735 int i;
736
737 if (!crtc_state) {
738 dev_err(dev->dev, "No CRTC state found\n");
739 return;
740 }
741
742 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
743 crtc_state->savePIPECONF = REG_READ(map->conf);
744 crtc_state->savePIPESRC = REG_READ(map->src);
745 crtc_state->saveFP0 = REG_READ(map->fp0);
746 crtc_state->saveFP1 = REG_READ(map->fp1);
747 crtc_state->saveDPLL = REG_READ(map->dpll);
748 crtc_state->saveHTOTAL = REG_READ(map->htotal);
749 crtc_state->saveHBLANK = REG_READ(map->hblank);
750 crtc_state->saveHSYNC = REG_READ(map->hsync);
751 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
752 crtc_state->saveVBLANK = REG_READ(map->vblank);
753 crtc_state->saveVSYNC = REG_READ(map->vsync);
754 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
755
756 /*NOTE: DSPSIZE DSPPOS only for psb*/
757 crtc_state->saveDSPSIZE = REG_READ(map->size);
758 crtc_state->saveDSPPOS = REG_READ(map->pos);
759
760 crtc_state->saveDSPBASE = REG_READ(map->base);
761
762 paletteReg = map->palette;
763 for (i = 0; i < 256; ++i)
764 crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
765}
766
767/**
768 * Restore HW states of giving crtc
769 */
770static void psb_intel_crtc_restore(struct drm_crtc *crtc)
771{
772 struct drm_device *dev = crtc->dev;
773 struct drm_psb_private *dev_priv = dev->dev_private;
774 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
775 struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
776 const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
777 uint32_t paletteReg;
778 int i;
779
780 if (!crtc_state) {
781 dev_err(dev->dev, "No crtc state\n");
782 return;
783 }
784
785 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
786 REG_WRITE(map->dpll,
787 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
788 REG_READ(map->dpll);
789 udelay(150);
790 }
791
792 REG_WRITE(map->fp0, crtc_state->saveFP0);
793 REG_READ(map->fp0);
794
795 REG_WRITE(map->fp1, crtc_state->saveFP1);
796 REG_READ(map->fp1);
797
798 REG_WRITE(map->dpll, crtc_state->saveDPLL);
799 REG_READ(map->dpll);
800 udelay(150);
801
802 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
803 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
804 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
805 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
806 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
807 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
808 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
809
810 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
811 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
812
813 REG_WRITE(map->src, crtc_state->savePIPESRC);
814 REG_WRITE(map->base, crtc_state->saveDSPBASE);
815 REG_WRITE(map->conf, crtc_state->savePIPECONF);
816
817 psb_intel_wait_for_vblank(dev);
818
819 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
820 REG_WRITE(map->base, crtc_state->saveDSPBASE);
821
822 psb_intel_wait_for_vblank(dev);
823
824 paletteReg = map->palette;
825 for (i = 0; i < 256; ++i)
826 REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
827}
828
829static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
830 struct drm_file *file_priv,
831 uint32_t handle,
832 uint32_t width, uint32_t height)
833{
834 struct drm_device *dev = crtc->dev;
835 struct drm_psb_private *dev_priv = dev->dev_private;
836 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
837 int pipe = psb_intel_crtc->pipe;
838 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
839 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
840 uint32_t temp;
841 size_t addr = 0;
842 struct gtt_range *gt;
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src;
846 int ret = 0, i, cursor_pages;
847
848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) {
850 /* turn off the cursor */
851 temp = CURSOR_MODE_DISABLE;
852
853 if (gma_power_begin(dev, false)) {
854 REG_WRITE(control, temp);
855 REG_WRITE(base, 0);
856 gma_power_end(dev);
857 }
858
859 /* Unpin the old GEM object */
860 if (psb_intel_crtc->cursor_obj) {
861 gt = container_of(psb_intel_crtc->cursor_obj,
862 struct gtt_range, gem);
863 psb_gtt_unpin(gt);
864 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
865 psb_intel_crtc->cursor_obj = NULL;
866 }
867
868 return 0;
869 }
870
871 /* Currently we only support 64x64 cursors */
872 if (width != 64 || height != 64) {
873 dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
874 return -EINVAL;
875 }
876
877 obj = drm_gem_object_lookup(dev, file_priv, handle);
878 if (!obj)
879 return -ENOENT;
880
881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n");
883 ret = -ENOMEM;
884 goto unref_cursor;
885 }
886
887 gt = container_of(obj, struct gtt_range, gem);
888
889 /* Pin the memory into the GTT */
890 ret = psb_gtt_pin(gt);
891 if (ret) {
892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
893 goto unref_cursor;
894 }
895
896 if (dev_priv->ops->cursor_needs_phys) {
897 if (cursor_gt == NULL) {
898 dev_err(dev->dev, "No hardware cursor mem available");
899 ret = -ENOMEM;
900 goto unref_cursor;
901 }
902
903 /* Prevent overflow */
904 if (gt->npage > 4)
905 cursor_pages = 4;
906 else
907 cursor_pages = gt->npage;
908
909 /* Copy the cursor to cursor mem */
910 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
911 for (i = 0; i < cursor_pages; i++) {
912 tmp_src = kmap(gt->pages[i]);
913 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
914 kunmap(gt->pages[i]);
915 tmp_dst += PAGE_SIZE;
916 }
917
918 addr = psb_intel_crtc->cursor_addr;
919 } else {
920 addr = gt->offset; /* Or resource.start ??? */
921 psb_intel_crtc->cursor_addr = addr;
922 }
923
924 temp = 0;
925 /* set the pipe for the cursor */
926 temp |= (pipe << 28);
927 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
928
929 if (gma_power_begin(dev, false)) {
930 REG_WRITE(control, temp);
931 REG_WRITE(base, addr);
932 gma_power_end(dev);
933 }
934
935 /* unpin the old bo */
936 if (psb_intel_crtc->cursor_obj) {
937 gt = container_of(psb_intel_crtc->cursor_obj,
938 struct gtt_range, gem);
939 psb_gtt_unpin(gt);
940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
941 }
942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
949}
950
951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
952{
953 struct drm_device *dev = crtc->dev;
954 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
955 int pipe = psb_intel_crtc->pipe;
956 uint32_t temp = 0;
957 uint32_t addr;
958
959
960 if (x < 0) {
961 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
962 x = -x;
963 }
964 if (y < 0) {
965 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
966 y = -y;
967 }
968
969 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
970 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
971
972 addr = psb_intel_crtc->cursor_addr;
973 302
974 if (gma_power_begin(dev, false)) {
975 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
976 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
977 gma_power_end(dev);
978 }
979 return 0; 303 return 0;
980} 304}
981 305
982static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
983 u16 *green, u16 *blue, uint32_t type, uint32_t size)
984{
985 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
986 int i;
987
988 if (size != 256)
989 return;
990
991 for (i = 0; i < 256; i++) {
992 psb_intel_crtc->lut_r[i] = red[i] >> 8;
993 psb_intel_crtc->lut_g[i] = green[i] >> 8;
994 psb_intel_crtc->lut_b[i] = blue[i] >> 8;
995 }
996
997 psb_intel_crtc_load_lut(crtc);
998}
999
1000static int psb_crtc_set_config(struct drm_mode_set *set)
1001{
1002 int ret;
1003 struct drm_device *dev = set->crtc->dev;
1004 struct drm_psb_private *dev_priv = dev->dev_private;
1005
1006 if (!dev_priv->rpm_enabled)
1007 return drm_crtc_helper_set_config(set);
1008
1009 pm_runtime_forbid(&dev->pdev->dev);
1010 ret = drm_crtc_helper_set_config(set);
1011 pm_runtime_allow(&dev->pdev->dev);
1012 return ret;
1013}
1014
1015/* Returns the clock of the currently programmed mode of the given pipe. */ 306/* Returns the clock of the currently programmed mode of the given pipe. */
1016static int psb_intel_crtc_clock_get(struct drm_device *dev, 307static int psb_intel_crtc_clock_get(struct drm_device *dev,
1017 struct drm_crtc *crtc) 308 struct drm_crtc *crtc)
1018{ 309{
1019 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 310 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1020 struct drm_psb_private *dev_priv = dev->dev_private; 311 struct drm_psb_private *dev_priv = dev->dev_private;
1021 int pipe = psb_intel_crtc->pipe; 312 int pipe = gma_crtc->pipe;
1022 const struct psb_offset *map = &dev_priv->regmap[pipe]; 313 const struct psb_offset *map = &dev_priv->regmap[pipe];
1023 u32 dpll; 314 u32 dpll;
1024 u32 fp; 315 u32 fp;
1025 struct psb_intel_clock_t clock; 316 struct gma_clock_t clock;
1026 bool is_lvds; 317 bool is_lvds;
1027 struct psb_pipe *p = &dev_priv->regs.pipe[pipe]; 318 struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
1028 319
@@ -1092,8 +383,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1092struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 383struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1093 struct drm_crtc *crtc) 384 struct drm_crtc *crtc)
1094{ 385{
1095 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 386 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1096 int pipe = psb_intel_crtc->pipe; 387 int pipe = gma_crtc->pipe;
1097 struct drm_display_mode *mode; 388 struct drm_display_mode *mode;
1098 int htot; 389 int htot;
1099 int hsync; 390 int hsync;
@@ -1136,58 +427,30 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1136 return mode; 427 return mode;
1137} 428}
1138 429
1139static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1140{
1141 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1142 struct gtt_range *gt;
1143
1144 /* Unpin the old GEM object */
1145 if (psb_intel_crtc->cursor_obj) {
1146 gt = container_of(psb_intel_crtc->cursor_obj,
1147 struct gtt_range, gem);
1148 psb_gtt_unpin(gt);
1149 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1150 psb_intel_crtc->cursor_obj = NULL;
1151 }
1152
1153 if (psb_intel_crtc->cursor_gt != NULL)
1154 psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
1155 kfree(psb_intel_crtc->crtc_state);
1156 drm_crtc_cleanup(crtc);
1157 kfree(psb_intel_crtc);
1158}
1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 430const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1174 .dpms = psb_intel_crtc_dpms, 431 .dpms = gma_crtc_dpms,
1175 .mode_fixup = psb_intel_crtc_mode_fixup, 432 .mode_fixup = gma_crtc_mode_fixup,
1176 .mode_set = psb_intel_crtc_mode_set, 433 .mode_set = psb_intel_crtc_mode_set,
1177 .mode_set_base = psb_intel_pipe_set_base, 434 .mode_set_base = gma_pipe_set_base,
1178 .prepare = psb_intel_crtc_prepare, 435 .prepare = gma_crtc_prepare,
1179 .commit = psb_intel_crtc_commit, 436 .commit = gma_crtc_commit,
1180 .disable = psb_intel_crtc_disable, 437 .disable = gma_crtc_disable,
1181}; 438};
1182 439
1183const struct drm_crtc_funcs psb_intel_crtc_funcs = { 440const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1184 .save = psb_intel_crtc_save, 441 .save = gma_crtc_save,
1185 .restore = psb_intel_crtc_restore, 442 .restore = gma_crtc_restore,
1186 .cursor_set = psb_intel_crtc_cursor_set, 443 .cursor_set = gma_crtc_cursor_set,
1187 .cursor_move = psb_intel_crtc_cursor_move, 444 .cursor_move = gma_crtc_cursor_move,
1188 .gamma_set = psb_intel_crtc_gamma_set, 445 .gamma_set = gma_crtc_gamma_set,
1189 .set_config = psb_crtc_set_config, 446 .set_config = gma_crtc_set_config,
1190 .destroy = psb_intel_crtc_destroy, 447 .destroy = gma_crtc_destroy,
448};
449
450const struct gma_clock_funcs psb_clock_funcs = {
451 .clock = psb_intel_clock,
452 .limit = psb_intel_limit,
453 .pll_is_valid = gma_pll_is_valid,
1191}; 454};
1192 455
1193/* 456/*
@@ -1195,7 +458,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
1195 * to zero. This is a workaround for h/w defect on Oaktrail 458 * to zero. This is a workaround for h/w defect on Oaktrail
1196 */ 459 */
1197static void psb_intel_cursor_init(struct drm_device *dev, 460static void psb_intel_cursor_init(struct drm_device *dev,
1198 struct psb_intel_crtc *psb_intel_crtc) 461 struct gma_crtc *gma_crtc)
1199{ 462{
1200 struct drm_psb_private *dev_priv = dev->dev_private; 463 struct drm_psb_private *dev_priv = dev->dev_private;
1201 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; 464 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
@@ -1208,88 +471,87 @@ static void psb_intel_cursor_init(struct drm_device *dev,
1208 */ 471 */
1209 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); 472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
1210 if (!cursor_gt) { 473 if (!cursor_gt) {
1211 psb_intel_crtc->cursor_gt = NULL; 474 gma_crtc->cursor_gt = NULL;
1212 goto out; 475 goto out;
1213 } 476 }
1214 psb_intel_crtc->cursor_gt = cursor_gt; 477 gma_crtc->cursor_gt = cursor_gt;
1215 psb_intel_crtc->cursor_addr = dev_priv->stolen_base + 478 gma_crtc->cursor_addr = dev_priv->stolen_base +
1216 cursor_gt->offset; 479 cursor_gt->offset;
1217 } else { 480 } else {
1218 psb_intel_crtc->cursor_gt = NULL; 481 gma_crtc->cursor_gt = NULL;
1219 } 482 }
1220 483
1221out: 484out:
1222 REG_WRITE(control[psb_intel_crtc->pipe], 0); 485 REG_WRITE(control[gma_crtc->pipe], 0);
1223 REG_WRITE(base[psb_intel_crtc->pipe], 0); 486 REG_WRITE(base[gma_crtc->pipe], 0);
1224} 487}
1225 488
1226void psb_intel_crtc_init(struct drm_device *dev, int pipe, 489void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1227 struct psb_intel_mode_device *mode_dev) 490 struct psb_intel_mode_device *mode_dev)
1228{ 491{
1229 struct drm_psb_private *dev_priv = dev->dev_private; 492 struct drm_psb_private *dev_priv = dev->dev_private;
1230 struct psb_intel_crtc *psb_intel_crtc; 493 struct gma_crtc *gma_crtc;
1231 int i; 494 int i;
1232 uint16_t *r_base, *g_base, *b_base; 495 uint16_t *r_base, *g_base, *b_base;
1233 496
1234 /* We allocate a extra array of drm_connector pointers 497 /* We allocate a extra array of drm_connector pointers
1235 * for fbdev after the crtc */ 498 * for fbdev after the crtc */
1236 psb_intel_crtc = 499 gma_crtc = kzalloc(sizeof(struct gma_crtc) +
1237 kzalloc(sizeof(struct psb_intel_crtc) + 500 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
1238 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 501 GFP_KERNEL);
1239 GFP_KERNEL); 502 if (gma_crtc == NULL)
1240 if (psb_intel_crtc == NULL)
1241 return; 503 return;
1242 504
1243 psb_intel_crtc->crtc_state = 505 gma_crtc->crtc_state =
1244 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); 506 kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
1245 if (!psb_intel_crtc->crtc_state) { 507 if (!gma_crtc->crtc_state) {
1246 dev_err(dev->dev, "Crtc state error: No memory\n"); 508 dev_err(dev->dev, "Crtc state error: No memory\n");
1247 kfree(psb_intel_crtc); 509 kfree(gma_crtc);
1248 return; 510 return;
1249 } 511 }
1250 512
1251 /* Set the CRTC operations from the chip specific data */ 513 /* Set the CRTC operations from the chip specific data */
1252 drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); 514 drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
1253 515
1254 drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); 516 /* Set the CRTC clock functions from chip specific data */
1255 psb_intel_crtc->pipe = pipe; 517 gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
1256 psb_intel_crtc->plane = pipe;
1257 518
1258 r_base = psb_intel_crtc->base.gamma_store; 519 drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
520 gma_crtc->pipe = pipe;
521 gma_crtc->plane = pipe;
522
523 r_base = gma_crtc->base.gamma_store;
1259 g_base = r_base + 256; 524 g_base = r_base + 256;
1260 b_base = g_base + 256; 525 b_base = g_base + 256;
1261 for (i = 0; i < 256; i++) { 526 for (i = 0; i < 256; i++) {
1262 psb_intel_crtc->lut_r[i] = i; 527 gma_crtc->lut_r[i] = i;
1263 psb_intel_crtc->lut_g[i] = i; 528 gma_crtc->lut_g[i] = i;
1264 psb_intel_crtc->lut_b[i] = i; 529 gma_crtc->lut_b[i] = i;
1265 r_base[i] = i << 8; 530 r_base[i] = i << 8;
1266 g_base[i] = i << 8; 531 g_base[i] = i << 8;
1267 b_base[i] = i << 8; 532 b_base[i] = i << 8;
1268 533
1269 psb_intel_crtc->lut_adj[i] = 0; 534 gma_crtc->lut_adj[i] = 0;
1270 } 535 }
1271 536
1272 psb_intel_crtc->mode_dev = mode_dev; 537 gma_crtc->mode_dev = mode_dev;
1273 psb_intel_crtc->cursor_addr = 0; 538 gma_crtc->cursor_addr = 0;
1274 539
1275 drm_crtc_helper_add(&psb_intel_crtc->base, 540 drm_crtc_helper_add(&gma_crtc->base,
1276 dev_priv->ops->crtc_helper); 541 dev_priv->ops->crtc_helper);
1277 542
1278 /* Setup the array of drm_connector pointer array */ 543 /* Setup the array of drm_connector pointer array */
1279 psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; 544 gma_crtc->mode_set.crtc = &gma_crtc->base;
1280 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 545 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
1281 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); 546 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
1282 dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = 547 dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
1283 &psb_intel_crtc->base; 548 dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
1284 dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = 549 gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
1285 &psb_intel_crtc->base; 550 gma_crtc->mode_set.num_connectors = 0;
1286 psb_intel_crtc->mode_set.connectors = 551 psb_intel_cursor_init(dev, gma_crtc);
1287 (struct drm_connector **) (psb_intel_crtc + 1);
1288 psb_intel_crtc->mode_set.num_connectors = 0;
1289 psb_intel_cursor_init(dev, psb_intel_crtc);
1290 552
1291 /* Set to true so that the pipe is forced off on initial config. */ 553 /* Set to true so that the pipe is forced off on initial config. */
1292 psb_intel_crtc->active = true; 554 gma_crtc->active = true;
1293} 555}
1294 556
1295int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 557int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1298,7 +560,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1298 struct drm_psb_private *dev_priv = dev->dev_private; 560 struct drm_psb_private *dev_priv = dev->dev_private;
1299 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; 561 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
1300 struct drm_mode_object *drmmode_obj; 562 struct drm_mode_object *drmmode_obj;
1301 struct psb_intel_crtc *crtc; 563 struct gma_crtc *crtc;
1302 564
1303 if (!dev_priv) { 565 if (!dev_priv) {
1304 dev_err(dev->dev, "called with no initialization\n"); 566 dev_err(dev->dev, "called with no initialization\n");
@@ -1313,7 +575,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1313 return -EINVAL; 575 return -EINVAL;
1314 } 576 }
1315 577
1316 crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); 578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
1317 pipe_from_crtc_id->pipe = crtc->pipe; 579 pipe_from_crtc_id->pipe = crtc->pipe;
1318 580
1319 return 0; 581 return 0;
@@ -1324,14 +586,14 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1324 struct drm_crtc *crtc = NULL; 586 struct drm_crtc *crtc = NULL;
1325 587
1326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 588 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1327 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 589 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1328 if (psb_intel_crtc->pipe == pipe) 590 if (gma_crtc->pipe == pipe)
1329 break; 591 break;
1330 } 592 }
1331 return crtc; 593 return crtc;
1332} 594}
1333 595
1334int psb_intel_connector_clones(struct drm_device *dev, int type_mask) 596int gma_connector_clones(struct drm_device *dev, int type_mask)
1335{ 597{
1336 int index_mask = 0; 598 int index_mask = 0;
1337 struct drm_connector *connector; 599 struct drm_connector *connector;
@@ -1339,30 +601,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
1339 601
1340 list_for_each_entry(connector, &dev->mode_config.connector_list, 602 list_for_each_entry(connector, &dev->mode_config.connector_list,
1341 head) { 603 head) {
1342 struct psb_intel_encoder *psb_intel_encoder = 604 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1343 psb_intel_attached_encoder(connector); 605 if (type_mask & (1 << gma_encoder->type))
1344 if (type_mask & (1 << psb_intel_encoder->type))
1345 index_mask |= (1 << entry); 606 index_mask |= (1 << entry);
1346 entry++; 607 entry++;
1347 } 608 }
1348 return index_mask; 609 return index_mask;
1349} 610}
1350
1351/* current intel driver doesn't take advantage of encoders
1352 always give back the encoder for the connector
1353*/
1354struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
1355{
1356 struct psb_intel_encoder *psb_intel_encoder =
1357 psb_intel_attached_encoder(connector);
1358
1359 return &psb_intel_encoder->base;
1360}
1361
1362void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
1363 struct psb_intel_encoder *encoder)
1364{
1365 connector->encoder = encoder;
1366 drm_mode_connector_attach_encoder(&connector->base,
1367 &encoder->base);
1368}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 4dcae421a58d..bde27fdb41bf 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -24,6 +24,7 @@
24#include <drm/drm_crtc.h> 24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include "gma_display.h"
27 28
28/* 29/*
29 * Display related stuff 30 * Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
116 u8 slave_addr; 117 u8 slave_addr;
117}; 118};
118 119
119struct psb_intel_encoder { 120struct gma_encoder {
120 struct drm_encoder base; 121 struct drm_encoder base;
121 int type; 122 int type;
122 bool needs_tv_clock; 123 bool needs_tv_clock;
123 void (*hot_plug)(struct psb_intel_encoder *); 124 void (*hot_plug)(struct gma_encoder *);
124 int crtc_mask; 125 int crtc_mask;
125 int clone_mask; 126 int clone_mask;
126 u32 ddi_select; /* Channel info */ 127 u32 ddi_select; /* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
136 struct psb_intel_i2c_chan *ddc_bus; 137 struct psb_intel_i2c_chan *ddc_bus;
137}; 138};
138 139
139struct psb_intel_connector { 140struct gma_connector {
140 struct drm_connector base; 141 struct drm_connector base;
141 struct psb_intel_encoder *encoder; 142 struct gma_encoder *encoder;
142}; 143};
143 144
144struct psb_intel_crtc_state { 145struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
161 uint32_t savePalette[256]; 162 uint32_t savePalette[256];
162}; 163};
163 164
164struct psb_intel_crtc { 165struct gma_crtc {
165 struct drm_crtc base; 166 struct drm_crtc base;
166 int pipe; 167 int pipe;
167 int plane; 168 int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
188 189
189 /* Saved Crtc HW states */ 190 /* Saved Crtc HW states */
190 struct psb_intel_crtc_state *crtc_state; 191 struct psb_intel_crtc_state *crtc_state;
192
193 const struct gma_clock_funcs *clock_funcs;
191}; 194};
192 195
193#define to_psb_intel_crtc(x) \ 196#define to_gma_crtc(x) \
194 container_of(x, struct psb_intel_crtc, base) 197 container_of(x, struct gma_crtc, base)
195#define to_psb_intel_connector(x) \ 198#define to_gma_connector(x) \
196 container_of(x, struct psb_intel_connector, base) 199 container_of(x, struct gma_connector, base)
197#define to_psb_intel_encoder(x) \ 200#define to_gma_encoder(x) \
198 container_of(x, struct psb_intel_encoder, base) 201 container_of(x, struct gma_encoder, base)
199#define to_psb_intel_framebuffer(x) \ 202#define to_psb_intel_framebuffer(x) \
200 container_of(x, struct psb_intel_framebuffer, base) 203 container_of(x, struct psb_intel_framebuffer, base)
201 204
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
223extern void mid_dsi_init(struct drm_device *dev, 226extern void mid_dsi_init(struct drm_device *dev,
224 struct psb_intel_mode_device *mode_dev, int dsi_num); 227 struct psb_intel_mode_device *mode_dev, int dsi_num);
225 228
226extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); 229extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
227extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); 230extern void gma_connector_attach_encoder(struct gma_connector *connector,
228extern void psb_intel_encoder_commit(struct drm_encoder *encoder); 231 struct gma_encoder *encoder);
229extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
230 232
231static inline struct psb_intel_encoder *psb_intel_attached_encoder( 233static inline struct gma_encoder *gma_attached_encoder(
232 struct drm_connector *connector) 234 struct drm_connector *connector)
233{ 235{
234 return to_psb_intel_connector(connector)->encoder; 236 return to_gma_connector(connector)->encoder;
235} 237}
236 238
237extern void psb_intel_connector_attach_encoder(
238 struct psb_intel_connector *connector,
239 struct psb_intel_encoder *encoder);
240
241extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
242 *connector);
243
244extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
245 struct drm_crtc *crtc); 240 struct drm_crtc *crtc);
246extern void psb_intel_wait_for_vblank(struct drm_device *dev);
247extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 241extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
248 struct drm_file *file_priv); 242 struct drm_file *file_priv);
249extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 243extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e6192..32342f6990d9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
267 struct drm_device *dev = connector->dev; 267 struct drm_device *dev = connector->dev;
268 struct drm_psb_private *dev_priv = 268 struct drm_psb_private *dev_priv =
269 (struct drm_psb_private *)dev->dev_private; 269 (struct drm_psb_private *)dev->dev_private;
270 struct psb_intel_encoder *psb_intel_encoder = 270 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
271 psb_intel_attached_encoder(connector);
272 struct psb_intel_lvds_priv *lvds_priv = 271 struct psb_intel_lvds_priv *lvds_priv =
273 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 272 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
274 273
275 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); 274 lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
276 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); 275 lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
307{ 306{
308 struct drm_device *dev = connector->dev; 307 struct drm_device *dev = connector->dev;
309 u32 pp_status; 308 u32 pp_status;
310 struct psb_intel_encoder *psb_intel_encoder = 309 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
311 psb_intel_attached_encoder(connector);
312 struct psb_intel_lvds_priv *lvds_priv = 310 struct psb_intel_lvds_priv *lvds_priv =
313 (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; 311 (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
314 312
315 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", 313 dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
316 lvds_priv->savePP_ON, 314 lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
349 struct drm_display_mode *mode) 347 struct drm_display_mode *mode)
350{ 348{
351 struct drm_psb_private *dev_priv = connector->dev->dev_private; 349 struct drm_psb_private *dev_priv = connector->dev->dev_private;
352 struct psb_intel_encoder *psb_intel_encoder = 350 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
353 psb_intel_attached_encoder(connector);
354 struct drm_display_mode *fixed_mode = 351 struct drm_display_mode *fixed_mode =
355 dev_priv->mode_dev.panel_fixed_mode; 352 dev_priv->mode_dev.panel_fixed_mode;
356 353
357 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 354 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
358 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2; 355 fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
359 356
360 /* just in case */ 357 /* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
381 struct drm_device *dev = encoder->dev; 378 struct drm_device *dev = encoder->dev;
382 struct drm_psb_private *dev_priv = dev->dev_private; 379 struct drm_psb_private *dev_priv = dev->dev_private;
383 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 380 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
384 struct psb_intel_crtc *psb_intel_crtc = 381 struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
385 to_psb_intel_crtc(encoder->crtc);
386 struct drm_encoder *tmp_encoder; 382 struct drm_encoder *tmp_encoder;
387 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; 383 struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
388 struct psb_intel_encoder *psb_intel_encoder = 384 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
389 to_psb_intel_encoder(encoder);
390 385
391 if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) 386 if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
392 panel_fixed_mode = mode_dev->panel_fixed_mode2; 387 panel_fixed_mode = mode_dev->panel_fixed_mode2;
393 388
394 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ 389 /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
395 if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) { 390 if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
396 printk(KERN_ERR "Can't support LVDS on pipe A\n"); 391 printk(KERN_ERR "Can't support LVDS on pipe A\n");
397 return false; 392 return false;
398 } 393 }
399 if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { 394 if (IS_MRST(dev) && gma_crtc->pipe != 0) {
400 printk(KERN_ERR "Must use PIPE A\n"); 395 printk(KERN_ERR "Must use PIPE A\n");
401 return false; 396 return false;
402 } 397 }
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
525 struct drm_device *dev = connector->dev; 520 struct drm_device *dev = connector->dev;
526 struct drm_psb_private *dev_priv = dev->dev_private; 521 struct drm_psb_private *dev_priv = dev->dev_private;
527 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 522 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
528 struct psb_intel_encoder *psb_intel_encoder = 523 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
529 psb_intel_attached_encoder(connector); 524 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
530 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
531 int ret = 0; 525 int ret = 0;
532 526
533 if (!IS_MRST(dev)) 527 if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
564 */ 558 */
565void psb_intel_lvds_destroy(struct drm_connector *connector) 559void psb_intel_lvds_destroy(struct drm_connector *connector)
566{ 560{
567 struct psb_intel_encoder *psb_intel_encoder = 561 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
568 psb_intel_attached_encoder(connector); 562 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
569 struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
570 563
571 if (lvds_priv->ddc_bus) 564 if (lvds_priv->ddc_bus)
572 psb_intel_i2c_destroy(lvds_priv->ddc_bus); 565 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
585 return -1; 578 return -1;
586 579
587 if (!strcmp(property->name, "scaling mode")) { 580 if (!strcmp(property->name, "scaling mode")) {
588 struct psb_intel_crtc *crtc = 581 struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
589 to_psb_intel_crtc(encoder->crtc);
590 uint64_t curval; 582 uint64_t curval;
591 583
592 if (!crtc) 584 if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
656 psb_intel_lvds_connector_helper_funcs = { 648 psb_intel_lvds_connector_helper_funcs = {
657 .get_modes = psb_intel_lvds_get_modes, 649 .get_modes = psb_intel_lvds_get_modes,
658 .mode_valid = psb_intel_lvds_mode_valid, 650 .mode_valid = psb_intel_lvds_mode_valid,
659 .best_encoder = psb_intel_best_encoder, 651 .best_encoder = gma_best_encoder,
660}; 652};
661 653
662const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 654const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
691void psb_intel_lvds_init(struct drm_device *dev, 683void psb_intel_lvds_init(struct drm_device *dev,
692 struct psb_intel_mode_device *mode_dev) 684 struct psb_intel_mode_device *mode_dev)
693{ 685{
694 struct psb_intel_encoder *psb_intel_encoder; 686 struct gma_encoder *gma_encoder;
695 struct psb_intel_connector *psb_intel_connector; 687 struct gma_connector *gma_connector;
696 struct psb_intel_lvds_priv *lvds_priv; 688 struct psb_intel_lvds_priv *lvds_priv;
697 struct drm_connector *connector; 689 struct drm_connector *connector;
698 struct drm_encoder *encoder; 690 struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
702 u32 lvds; 694 u32 lvds;
703 int pipe; 695 int pipe;
704 696
705 psb_intel_encoder = 697 gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
706 kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); 698 if (!gma_encoder) {
707 if (!psb_intel_encoder) { 699 dev_err(dev->dev, "gma_encoder allocation error\n");
708 dev_err(dev->dev, "psb_intel_encoder allocation error\n");
709 return; 700 return;
710 } 701 }
711 702
712 psb_intel_connector = 703 gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
713 kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); 704 if (!gma_connector) {
714 if (!psb_intel_connector) { 705 dev_err(dev->dev, "gma_connector allocation error\n");
715 dev_err(dev->dev, "psb_intel_connector allocation error\n");
716 goto failed_encoder; 706 goto failed_encoder;
717 } 707 }
718 708
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
722 goto failed_connector; 712 goto failed_connector;
723 } 713 }
724 714
725 psb_intel_encoder->dev_priv = lvds_priv; 715 gma_encoder->dev_priv = lvds_priv;
726 716
727 connector = &psb_intel_connector->base; 717 connector = &gma_connector->base;
728 encoder = &psb_intel_encoder->base; 718 encoder = &gma_encoder->base;
729 drm_connector_init(dev, connector, 719 drm_connector_init(dev, connector,
730 &psb_intel_lvds_connector_funcs, 720 &psb_intel_lvds_connector_funcs,
731 DRM_MODE_CONNECTOR_LVDS); 721 DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
734 &psb_intel_lvds_enc_funcs, 724 &psb_intel_lvds_enc_funcs,
735 DRM_MODE_ENCODER_LVDS); 725 DRM_MODE_ENCODER_LVDS);
736 726
737 psb_intel_connector_attach_encoder(psb_intel_connector, 727 gma_connector_attach_encoder(gma_connector, gma_encoder);
738 psb_intel_encoder); 728 gma_encoder->type = INTEL_OUTPUT_LVDS;
739 psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
740 729
741 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); 730 drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
742 drm_connector_helper_add(connector, 731 drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
851 drm_encoder_cleanup(encoder); 840 drm_encoder_cleanup(encoder);
852 drm_connector_cleanup(connector); 841 drm_connector_cleanup(connector);
853failed_connector: 842failed_connector:
854 kfree(psb_intel_connector); 843 kfree(gma_connector);
855failed_encoder: 844failed_encoder:
856 kfree(psb_intel_encoder); 845 kfree(gma_encoder);
857} 846}
858 847
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23b..77841a113617 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) 65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
66 66
67struct psb_intel_sdvo { 67struct psb_intel_sdvo {
68 struct psb_intel_encoder base; 68 struct gma_encoder base;
69 69
70 struct i2c_adapter *i2c; 70 struct i2c_adapter *i2c;
71 u8 slave_addr; 71 u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
140}; 140};
141 141
142struct psb_intel_sdvo_connector { 142struct psb_intel_sdvo_connector {
143 struct psb_intel_connector base; 143 struct gma_connector base;
144 144
145 /* Mark the type of connector */ 145 /* Mark the type of connector */
146 uint16_t output_flag; 146 uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
200 200
201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) 201static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
202{ 202{
203 return container_of(psb_intel_attached_encoder(connector), 203 return container_of(gma_attached_encoder(connector),
204 struct psb_intel_sdvo, base); 204 struct psb_intel_sdvo, base);
205} 205}
206 206
207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector) 207static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
208{ 208{
209 return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base); 209 return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
210} 210}
211 211
212static bool 212static bool
@@ -987,7 +987,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
987{ 987{
988 struct drm_device *dev = encoder->dev; 988 struct drm_device *dev = encoder->dev;
989 struct drm_crtc *crtc = encoder->crtc; 989 struct drm_crtc *crtc = encoder->crtc;
990 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 990 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
991 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 991 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
992 u32 sdvox; 992 u32 sdvox;
993 struct psb_intel_sdvo_in_out_map in_out; 993 struct psb_intel_sdvo_in_out_map in_out;
@@ -1070,7 +1070,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
1070 } 1070 }
1071 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1071 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1072 1072
1073 if (psb_intel_crtc->pipe == 1) 1073 if (gma_crtc->pipe == 1)
1074 sdvox |= SDVO_PIPE_B_SELECT; 1074 sdvox |= SDVO_PIPE_B_SELECT;
1075 if (psb_intel_sdvo->has_hdmi_audio) 1075 if (psb_intel_sdvo->has_hdmi_audio)
1076 sdvox |= SDVO_AUDIO_ENABLE; 1076 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1121,7 +1121,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1121 if ((temp & SDVO_ENABLE) == 0) 1121 if ((temp & SDVO_ENABLE) == 0)
1122 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); 1122 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1123 for (i = 0; i < 2; i++) 1123 for (i = 0; i < 2; i++)
1124 psb_intel_wait_for_vblank(dev); 1124 gma_wait_for_vblank(dev);
1125 1125
1126 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2); 1126 status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
1127 /* Warn if the device reported failure to sync. 1127 /* Warn if the device reported failure to sync.
@@ -1836,10 +1836,8 @@ done:
1836static void psb_intel_sdvo_save(struct drm_connector *connector) 1836static void psb_intel_sdvo_save(struct drm_connector *connector)
1837{ 1837{
1838 struct drm_device *dev = connector->dev; 1838 struct drm_device *dev = connector->dev;
1839 struct psb_intel_encoder *psb_intel_encoder = 1839 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
1840 psb_intel_attached_encoder(connector); 1840 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
1841 struct psb_intel_sdvo *sdvo =
1842 to_psb_intel_sdvo(&psb_intel_encoder->base);
1843 1841
1844 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg); 1842 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
1845} 1843}
@@ -1847,8 +1845,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
1847static void psb_intel_sdvo_restore(struct drm_connector *connector) 1845static void psb_intel_sdvo_restore(struct drm_connector *connector)
1848{ 1846{
1849 struct drm_device *dev = connector->dev; 1847 struct drm_device *dev = connector->dev;
1850 struct drm_encoder *encoder = 1848 struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
1851 &psb_intel_attached_encoder(connector)->base;
1852 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder); 1849 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
1853 struct drm_crtc *crtc = encoder->crtc; 1850 struct drm_crtc *crtc = encoder->crtc;
1854 1851
@@ -1864,9 +1861,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
1864static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1861static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1865 .dpms = psb_intel_sdvo_dpms, 1862 .dpms = psb_intel_sdvo_dpms,
1866 .mode_fixup = psb_intel_sdvo_mode_fixup, 1863 .mode_fixup = psb_intel_sdvo_mode_fixup,
1867 .prepare = psb_intel_encoder_prepare, 1864 .prepare = gma_encoder_prepare,
1868 .mode_set = psb_intel_sdvo_mode_set, 1865 .mode_set = psb_intel_sdvo_mode_set,
1869 .commit = psb_intel_encoder_commit, 1866 .commit = gma_encoder_commit,
1870}; 1867};
1871 1868
1872static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1869static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1882,7 +1879,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1882static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = { 1879static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
1883 .get_modes = psb_intel_sdvo_get_modes, 1880 .get_modes = psb_intel_sdvo_get_modes,
1884 .mode_valid = psb_intel_sdvo_mode_valid, 1881 .mode_valid = psb_intel_sdvo_mode_valid,
1885 .best_encoder = psb_intel_best_encoder, 1882 .best_encoder = gma_best_encoder,
1886}; 1883};
1887 1884
1888static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1885static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1894,7 +1891,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1894 psb_intel_sdvo->sdvo_lvds_fixed_mode); 1891 psb_intel_sdvo->sdvo_lvds_fixed_mode);
1895 1892
1896 i2c_del_adapter(&psb_intel_sdvo->ddc); 1893 i2c_del_adapter(&psb_intel_sdvo->ddc);
1897 psb_intel_encoder_destroy(encoder); 1894 gma_encoder_destroy(encoder);
1898} 1895}
1899 1896
1900static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { 1897static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2055,7 +2052,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2055 connector->base.base.doublescan_allowed = 0; 2052 connector->base.base.doublescan_allowed = 0;
2056 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2053 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2057 2054
2058 psb_intel_connector_attach_encoder(&connector->base, &encoder->base); 2055 gma_connector_attach_encoder(&connector->base, &encoder->base);
2059 drm_sysfs_connector_add(&connector->base.base); 2056 drm_sysfs_connector_add(&connector->base.base);
2060} 2057}
2061 2058
@@ -2075,7 +2072,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2075{ 2072{
2076 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2073 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2077 struct drm_connector *connector; 2074 struct drm_connector *connector;
2078 struct psb_intel_connector *intel_connector; 2075 struct gma_connector *intel_connector;
2079 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2076 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2080 2077
2081 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2078 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2115,7 +2112,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
2115{ 2112{
2116 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2113 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2117 struct drm_connector *connector; 2114 struct drm_connector *connector;
2118 struct psb_intel_connector *intel_connector; 2115 struct gma_connector *intel_connector;
2119 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2116 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2120 2117
2121 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2118 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2154,7 +2151,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2154{ 2151{
2155 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2152 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2156 struct drm_connector *connector; 2153 struct drm_connector *connector;
2157 struct psb_intel_connector *intel_connector; 2154 struct gma_connector *intel_connector;
2158 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2155 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2159 2156
2160 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2157 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2188,7 +2185,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
2188{ 2185{
2189 struct drm_encoder *encoder = &psb_intel_sdvo->base.base; 2186 struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
2190 struct drm_connector *connector; 2187 struct drm_connector *connector;
2191 struct psb_intel_connector *intel_connector; 2188 struct gma_connector *intel_connector;
2192 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector; 2189 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
2193 2190
2194 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL); 2191 psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2540,7 +2537,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
2540bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2537bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2541{ 2538{
2542 struct drm_psb_private *dev_priv = dev->dev_private; 2539 struct drm_psb_private *dev_priv = dev->dev_private;
2543 struct psb_intel_encoder *psb_intel_encoder; 2540 struct gma_encoder *gma_encoder;
2544 struct psb_intel_sdvo *psb_intel_sdvo; 2541 struct psb_intel_sdvo *psb_intel_sdvo;
2545 int i; 2542 int i;
2546 2543
@@ -2557,9 +2554,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2557 } 2554 }
2558 2555
2559 /* encoder type will be decided later */ 2556 /* encoder type will be decided later */
2560 psb_intel_encoder = &psb_intel_sdvo->base; 2557 gma_encoder = &psb_intel_sdvo->base;
2561 psb_intel_encoder->type = INTEL_OUTPUT_SDVO; 2558 gma_encoder->type = INTEL_OUTPUT_SDVO;
2562 drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0); 2559 drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
2563 2560
2564 /* Read the regs to test if we can talk to the device */ 2561 /* Read the regs to test if we can talk to the device */
2565 for (i = 0; i < 0x40; i++) { 2562 for (i = 0; i < 0x40; i++) {
@@ -2577,7 +2574,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2577 else 2574 else
2578 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2575 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2579 2576
2580 drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs); 2577 drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
2581 2578
2582 /* In default case sdvo lvds is false */ 2579 /* In default case sdvo lvds is false */
2583 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps)) 2580 if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2620,7 +2617,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2620 return true; 2617 return true;
2621 2618
2622err: 2619err:
2623 drm_encoder_cleanup(&psb_intel_encoder->base); 2620 drm_encoder_cleanup(&gma_encoder->base);
2624 i2c_del_adapter(&psb_intel_sdvo->ddc); 2621 i2c_del_adapter(&psb_intel_sdvo->ddc);
2625 kfree(psb_intel_sdvo); 2622 kfree(psb_intel_sdvo);
2626 2623
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf9..c2bd711e86e9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_encoder_slave.h> 24#include <drm/drm_encoder_slave.h>
25#include <drm/drm_edid.h> 25#include <drm/drm_edid.h>
26 26#include <drm/i2c/tda998x.h>
27 27
28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
29 29
@@ -32,6 +32,11 @@ struct tda998x_priv {
32 uint16_t rev; 32 uint16_t rev;
33 uint8_t current_page; 33 uint8_t current_page;
34 int dpms; 34 int dpms;
35 bool is_hdmi_sink;
36 u8 vip_cntrl_0;
37 u8 vip_cntrl_1;
38 u8 vip_cntrl_2;
39 struct tda998x_encoder_params params;
35}; 40};
36 41
37#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) 42#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
68# define I2C_MASTER_DIS_MM (1 << 0) 73# define I2C_MASTER_DIS_MM (1 << 0)
69# define I2C_MASTER_DIS_FILT (1 << 1) 74# define I2C_MASTER_DIS_FILT (1 << 1)
70# define I2C_MASTER_APP_STRT_LAT (1 << 2) 75# define I2C_MASTER_APP_STRT_LAT (1 << 2)
76#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
77# define FEAT_POWERDOWN_SPDIF (1 << 3)
71#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */ 78#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
72#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */ 79#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
73#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */ 80#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
74# define INT_FLAGS_2_EDID_BLK_RD (1 << 1) 81# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
82#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
75#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */ 83#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
76#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */ 84#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
77#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */ 85#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
110#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */ 118#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
111# define VIP_CNTRL_5_CKCASE (1 << 0) 119# define VIP_CNTRL_5_CKCASE (1 << 0)
112# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) 120# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
121#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
122#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
113#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ 123#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
114# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) 124# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
115# define MAT_CONTRL_MAT_BP (1 << 2) 125# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
130#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */ 140#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
131#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */ 141#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
132#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */ 142#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
143#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
144#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
133#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */ 145#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
134#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */ 146#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
147#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
148#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
135#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */ 149#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
136#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */ 150#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
137#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */ 151#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
142#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */ 156#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
143#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */ 157#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
144#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */ 158#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
159#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
160#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
161#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
162#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
145#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */ 163#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
146#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */ 164#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
147#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */ 165#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
148#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */ 166#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
149#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */ 167#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
168# define TBG_CNTRL_0_TOP_TGL (1 << 0)
169# define TBG_CNTRL_0_TOP_SEL (1 << 1)
170# define TBG_CNTRL_0_DE_EXT (1 << 2)
171# define TBG_CNTRL_0_TOP_EXT (1 << 3)
150# define TBG_CNTRL_0_FRAME_DIS (1 << 5) 172# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
151# define TBG_CNTRL_0_SYNC_MTHD (1 << 6) 173# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
152# define TBG_CNTRL_0_SYNC_ONCE (1 << 7) 174# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
153#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */ 175#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
154# define TBG_CNTRL_1_VH_TGL_0 (1 << 0) 176# define TBG_CNTRL_1_H_TGL (1 << 0)
155# define TBG_CNTRL_1_VH_TGL_1 (1 << 1) 177# define TBG_CNTRL_1_V_TGL (1 << 1)
156# define TBG_CNTRL_1_VH_TGL_2 (1 << 2) 178# define TBG_CNTRL_1_TGL_EN (1 << 2)
157# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3) 179# define TBG_CNTRL_1_X_EXT (1 << 3)
158# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4) 180# define TBG_CNTRL_1_H_EXT (1 << 4)
159# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5) 181# define TBG_CNTRL_1_V_EXT (1 << 5)
160# define TBG_CNTRL_1_DWIN_DIS (1 << 6) 182# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
161#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */ 183#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
162#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */ 184#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
171# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4) 193# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
172# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6) 194# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
173#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */ 195#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
196#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
197# define I2S_FORMAT(x) (((x) & 3) << 0)
198#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
199# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
200# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
201# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
174 202
175 203
176/* Page 02h: PLL settings */ 204/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
194#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */ 222#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
195#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */ 223#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
196#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */ 224#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
225# define AUDIO_DIV_SERCLK_1 0
226# define AUDIO_DIV_SERCLK_2 1
227# define AUDIO_DIV_SERCLK_4 2
228# define AUDIO_DIV_SERCLK_8 3
229# define AUDIO_DIV_SERCLK_16 4
230# define AUDIO_DIV_SERCLK_32 5
197#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */ 231#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
198# define SEL_CLK_SEL_CLK1 (1 << 0) 232# define SEL_CLK_SEL_CLK1 (1 << 0)
199# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1) 233# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
212 246
213 247
214/* Page 10h: information frames and packets */ 248/* Page 10h: information frames and packets */
249#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
250#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
251#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
252#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
253#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
215 254
216 255
217/* Page 11h: audio settings and content info packets */ 256/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
221# define AIP_CNTRL_0_LAYOUT (1 << 2) 260# define AIP_CNTRL_0_LAYOUT (1 << 2)
222# define AIP_CNTRL_0_ACR_MAN (1 << 5) 261# define AIP_CNTRL_0_ACR_MAN (1 << 5)
223# define AIP_CNTRL_0_RST_CTS (1 << 6) 262# define AIP_CNTRL_0_RST_CTS (1 << 6)
263#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
264# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
265# define CA_I2S_HBR_CHSTAT (1 << 6)
266#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
267#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
268#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
269#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
270#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
271#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
272#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
273#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
274# define CTS_N_K(x) (((x) & 7) << 0)
275# define CTS_N_M(x) (((x) & 3) << 4)
224#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */ 276#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
225# define ENC_CNTRL_RST_ENC (1 << 0) 277# define ENC_CNTRL_RST_ENC (1 << 0)
226# define ENC_CNTRL_RST_SEL (1 << 1) 278# define ENC_CNTRL_RST_SEL (1 << 1)
227# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2) 279# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
280#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
281# define DIP_FLAGS_ACR (1 << 0)
282# define DIP_FLAGS_GC (1 << 1)
283#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
284# define DIP_IF_FLAGS_IF1 (1 << 1)
285# define DIP_IF_FLAGS_IF2 (1 << 2)
286# define DIP_IF_FLAGS_IF3 (1 << 3)
287# define DIP_IF_FLAGS_IF4 (1 << 4)
288# define DIP_IF_FLAGS_IF5 (1 << 5)
289#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
228 290
229 291
230/* Page 12h: HDCP and OTP */ 292/* Page 12h: HDCP and OTP */
231#define REG_TX3 REG(0x12, 0x9a) /* read/write */ 293#define REG_TX3 REG(0x12, 0x9a) /* read/write */
294#define REG_TX4 REG(0x12, 0x9b) /* read/write */
295# define TX4_PD_RAM (1 << 1)
232#define REG_TX33 REG(0x12, 0xb8) /* read/write */ 296#define REG_TX33 REG(0x12, 0xb8) /* read/write */
233# define TX33_HDMI (1 << 1) 297# define TX33_HDMI (1 << 1)
234 298
@@ -338,6 +402,23 @@ fail:
338 return ret; 402 return ret;
339} 403}
340 404
405static void
406reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
407{
408 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
409 uint8_t buf[cnt+1];
410 int ret;
411
412 buf[0] = REG2ADDR(reg);
413 memcpy(&buf[1], p, cnt);
414
415 set_page(encoder, reg);
416
417 ret = i2c_master_send(client, buf, cnt + 1);
418 if (ret < 0)
419 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
420}
421
341static uint8_t 422static uint8_t
342reg_read(struct drm_encoder *encoder, uint16_t reg) 423reg_read(struct drm_encoder *encoder, uint16_t reg)
343{ 424{
@@ -406,13 +487,172 @@ tda998x_reset(struct drm_encoder *encoder)
406 reg_write(encoder, REG_SERIALIZER, 0x00); 487 reg_write(encoder, REG_SERIALIZER, 0x00);
407 reg_write(encoder, REG_BUFFER_OUT, 0x00); 488 reg_write(encoder, REG_BUFFER_OUT, 0x00);
408 reg_write(encoder, REG_PLL_SCG1, 0x00); 489 reg_write(encoder, REG_PLL_SCG1, 0x00);
409 reg_write(encoder, REG_AUDIO_DIV, 0x03); 490 reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
410 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); 491 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
411 reg_write(encoder, REG_PLL_SCGN1, 0xfa); 492 reg_write(encoder, REG_PLL_SCGN1, 0xfa);
412 reg_write(encoder, REG_PLL_SCGN2, 0x00); 493 reg_write(encoder, REG_PLL_SCGN2, 0x00);
413 reg_write(encoder, REG_PLL_SCGR1, 0x5b); 494 reg_write(encoder, REG_PLL_SCGR1, 0x5b);
414 reg_write(encoder, REG_PLL_SCGR2, 0x00); 495 reg_write(encoder, REG_PLL_SCGR2, 0x00);
415 reg_write(encoder, REG_PLL_SCG2, 0x10); 496 reg_write(encoder, REG_PLL_SCG2, 0x10);
497
498 /* Write the default value MUX register */
499 reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
500}
501
502static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
503{
504 uint8_t sum = 0;
505
506 while (bytes--)
507 sum += *buf++;
508 return (255 - sum) + 1;
509}
510
511#define HB(x) (x)
512#define PB(x) (HB(2) + 1 + (x))
513
514static void
515tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
516 uint8_t *buf, size_t size)
517{
518 buf[PB(0)] = tda998x_cksum(buf, size);
519
520 reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
521 reg_write_range(encoder, addr, buf, size);
522 reg_set(encoder, REG_DIP_IF_FLAGS, bit);
523}
524
525static void
526tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
527{
528 uint8_t buf[PB(5) + 1];
529
530 buf[HB(0)] = 0x84;
531 buf[HB(1)] = 0x01;
532 buf[HB(2)] = 10;
533 buf[PB(0)] = 0;
534 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
535 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
536 buf[PB(4)] = p->audio_frame[4];
537 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
538
539 tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
540 sizeof(buf));
541}
542
543static void
544tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
545{
546 uint8_t buf[PB(13) + 1];
547
548 memset(buf, 0, sizeof(buf));
549 buf[HB(0)] = 0x82;
550 buf[HB(1)] = 0x02;
551 buf[HB(2)] = 13;
552 buf[PB(4)] = drm_match_cea_mode(mode);
553
554 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
555 sizeof(buf));
556}
557
558static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
559{
560 if (on) {
561 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
562 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
563 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
564 } else {
565 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
566 }
567}
568
569static void
570tda998x_configure_audio(struct drm_encoder *encoder,
571 struct drm_display_mode *mode, struct tda998x_encoder_params *p)
572{
573 uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
574 uint32_t n;
575
576 /* Enable audio ports */
577 reg_write(encoder, REG_ENA_AP, p->audio_cfg);
578 reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
579
580 /* Set audio input source */
581 switch (p->audio_format) {
582 case AFMT_SPDIF:
583 reg_write(encoder, REG_MUX_AP, 0x40);
584 clksel_aip = AIP_CLKSEL_AIP(0);
585 /* FS64SPDIF */
586 clksel_fs = AIP_CLKSEL_FS(2);
587 cts_n = CTS_N_M(3) | CTS_N_K(3);
588 ca_i2s = 0;
589 break;
590
591 case AFMT_I2S:
592 reg_write(encoder, REG_MUX_AP, 0x64);
593 clksel_aip = AIP_CLKSEL_AIP(1);
594 /* ACLK */
595 clksel_fs = AIP_CLKSEL_FS(0);
596 cts_n = CTS_N_M(3) | CTS_N_K(3);
597 ca_i2s = CA_I2S_CA_I2S(0);
598 break;
599 }
600
601 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
602 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
603
604 /* Enable automatic CTS generation */
605 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
606 reg_write(encoder, REG_CTS_N, cts_n);
607
608 /*
609 * Audio input somehow depends on HDMI line rate which is
610 * related to pixclk. Testing showed that modes with pixclk
611 * >100MHz need a larger divider while <40MHz need the default.
612 * There is no detailed info in the datasheet, so we just
613 * assume 100MHz requires larger divider.
614 */
615 if (mode->clock > 100000)
616 adiv = AUDIO_DIV_SERCLK_16;
617 else
618 adiv = AUDIO_DIV_SERCLK_8;
619 reg_write(encoder, REG_AUDIO_DIV, adiv);
620
621 /*
622 * This is the approximate value of N, which happens to be
623 * the recommended values for non-coherent clocks.
624 */
625 n = 128 * p->audio_sample_rate / 1000;
626
627 /* Write the CTS and N values */
628 buf[0] = 0x44;
629 buf[1] = 0x42;
630 buf[2] = 0x01;
631 buf[3] = n;
632 buf[4] = n >> 8;
633 buf[5] = n >> 16;
634 reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
635
636 /* Set CTS clock reference */
637 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
638
639 /* Reset CTS generator */
640 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
641 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
642
643 /* Write the channel status */
644 buf[0] = 0x04;
645 buf[1] = 0x00;
646 buf[2] = 0x00;
647 buf[3] = 0xf1;
648 reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
649
650 tda998x_audio_mute(encoder, true);
651 mdelay(20);
652 tda998x_audio_mute(encoder, false);
653
654 /* Write the audio information packet */
655 tda998x_write_aif(encoder, p);
416} 656}
417 657
418/* DRM encoder functions */ 658/* DRM encoder functions */
@@ -420,6 +660,23 @@ tda998x_reset(struct drm_encoder *encoder)
420static void 660static void
421tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) 661tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
422{ 662{
663 struct tda998x_priv *priv = to_tda998x_priv(encoder);
664 struct tda998x_encoder_params *p = params;
665
666 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
667 (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
668 VIP_CNTRL_0_SWAP_B(p->swap_b) |
669 (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
670 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
671 (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
672 VIP_CNTRL_1_SWAP_D(p->swap_d) |
673 (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
674 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
675 (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
676 VIP_CNTRL_2_SWAP_F(p->swap_f) |
677 (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
678
679 priv->params = *p;
423} 680}
424 681
425static void 682static void
@@ -436,18 +693,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
436 693
437 switch (mode) { 694 switch (mode) {
438 case DRM_MODE_DPMS_ON: 695 case DRM_MODE_DPMS_ON:
439 /* enable audio and video ports */ 696 /* enable video ports, audio will be enabled later */
440 reg_write(encoder, REG_ENA_AP, 0xff);
441 reg_write(encoder, REG_ENA_VP_0, 0xff); 697 reg_write(encoder, REG_ENA_VP_0, 0xff);
442 reg_write(encoder, REG_ENA_VP_1, 0xff); 698 reg_write(encoder, REG_ENA_VP_1, 0xff);
443 reg_write(encoder, REG_ENA_VP_2, 0xff); 699 reg_write(encoder, REG_ENA_VP_2, 0xff);
444 /* set muxing after enabling ports: */ 700 /* set muxing after enabling ports: */
445 reg_write(encoder, REG_VIP_CNTRL_0, 701 reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
446 VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3)); 702 reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
447 reg_write(encoder, REG_VIP_CNTRL_1, 703 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
448 VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
449 reg_write(encoder, REG_VIP_CNTRL_2,
450 VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
451 break; 704 break;
452 case DRM_MODE_DPMS_OFF: 705 case DRM_MODE_DPMS_OFF:
453 /* disable audio and video ports */ 706 /* disable audio and video ports */
@@ -494,43 +747,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
494 struct drm_display_mode *adjusted_mode) 747 struct drm_display_mode *adjusted_mode)
495{ 748{
496 struct tda998x_priv *priv = to_tda998x_priv(encoder); 749 struct tda998x_priv *priv = to_tda998x_priv(encoder);
497 uint16_t hs_start, hs_end, line_start, line_end; 750 uint16_t ref_pix, ref_line, n_pix, n_line;
498 uint16_t vwin_start, vwin_end, de_start, de_end; 751 uint16_t hs_pix_s, hs_pix_e;
499 uint16_t ref_pix, ref_line, pix_start2; 752 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
753 uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
754 uint16_t vwin1_line_s, vwin1_line_e;
755 uint16_t vwin2_line_s, vwin2_line_e;
756 uint16_t de_pix_s, de_pix_e;
500 uint8_t reg, div, rep; 757 uint8_t reg, div, rep;
501 758
502 hs_start = mode->hsync_start - mode->hdisplay; 759 /*
503 hs_end = mode->hsync_end - mode->hdisplay; 760 * Internally TDA998x is using ITU-R BT.656 style sync but
504 line_start = 1; 761 * we get VESA style sync. TDA998x is using a reference pixel
505 line_end = 1 + mode->vsync_end - mode->vsync_start; 762 * relative to ITU to sync to the input frame and for output
506 vwin_start = mode->vtotal - mode->vsync_start; 763 * sync generation. Currently, we are using reference detection
507 vwin_end = vwin_start + mode->vdisplay; 764 * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
508 de_start = mode->htotal - mode->hdisplay; 765 * which is position of rising VS with coincident rising HS.
509 de_end = mode->htotal; 766 *
510 767 * Now there is some issues to take care of:
511 pix_start2 = 0; 768 * - HDMI data islands require sync-before-active
512 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 769 * - TDA998x register values must be > 0 to be enabled
513 pix_start2 = (mode->htotal / 2) + hs_start; 770 * - REFLINE needs an additional offset of +1
514 771 * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
515 /* TODO how is this value calculated? It is 2 for all common 772 *
516 * formats in the tables in out of tree nxp driver (assuming 773 * So we add +1 to all horizontal and vertical register values,
517 * I've properly deciphered their byzantine table system) 774 * plus an additional +3 for REFPIX as we are using RGB input only.
518 */ 775 */
519 ref_line = 2; 776 n_pix = mode->htotal;
520 777 n_line = mode->vtotal;
521 /* this might changes for other color formats from the CRTC: */ 778
522 ref_pix = 3 + hs_start; 779 hs_pix_e = mode->hsync_end - mode->hdisplay;
780 hs_pix_s = mode->hsync_start - mode->hdisplay;
781 de_pix_e = mode->htotal;
782 de_pix_s = mode->htotal - mode->hdisplay;
783 ref_pix = 3 + hs_pix_s;
784
785 /*
786 * Attached LCD controllers may generate broken sync. Allow
787 * those to adjust the position of the rising VS edge by adding
788 * HSKEW to ref_pix.
789 */
790 if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
791 ref_pix += adjusted_mode->hskew;
792
793 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
794 ref_line = 1 + mode->vsync_start - mode->vdisplay;
795 vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
796 vwin1_line_e = vwin1_line_s + mode->vdisplay;
797 vs1_pix_s = vs1_pix_e = hs_pix_s;
798 vs1_line_s = mode->vsync_start - mode->vdisplay;
799 vs1_line_e = vs1_line_s +
800 mode->vsync_end - mode->vsync_start;
801 vwin2_line_s = vwin2_line_e = 0;
802 vs2_pix_s = vs2_pix_e = 0;
803 vs2_line_s = vs2_line_e = 0;
804 } else {
805 ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
806 vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
807 vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
808 vs1_pix_s = vs1_pix_e = hs_pix_s;
809 vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
810 vs1_line_e = vs1_line_s +
811 (mode->vsync_end - mode->vsync_start)/2;
812 vwin2_line_s = vwin1_line_s + mode->vtotal/2;
813 vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
814 vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
815 vs2_line_s = vs1_line_s + mode->vtotal/2 ;
816 vs2_line_e = vs2_line_s +
817 (mode->vsync_end - mode->vsync_start)/2;
818 }
523 819
524 div = 148500 / mode->clock; 820 div = 148500 / mode->clock;
525 821
526 DBG("clock=%d, div=%u", mode->clock, div);
527 DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
528 hs_start, hs_end, line_start, line_end);
529 DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
530 vwin_start, vwin_end, de_start, de_end);
531 DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
532 ref_line, ref_pix, pix_start2);
533
534 /* mute the audio FIFO: */ 822 /* mute the audio FIFO: */
535 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 823 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
536 824
@@ -561,9 +849,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
561 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | 849 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
562 PLL_SERIAL_2_SRL_PR(rep)); 850 PLL_SERIAL_2_SRL_PR(rep));
563 851
564 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
565 reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
566
567 /* set color matrix bypass flag: */ 852 /* set color matrix bypass flag: */
568 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); 853 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
569 854
@@ -572,47 +857,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
572 857
573 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); 858 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
574 859
860 /*
861 * Sync on rising HSYNC/VSYNC
862 */
575 reg_write(encoder, REG_VIP_CNTRL_3, 0); 863 reg_write(encoder, REG_VIP_CNTRL_3, 0);
576 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS); 864 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
865
866 /*
867 * TDA19988 requires high-active sync at input stage,
868 * so invert low-active sync provided by master encoder here
869 */
870 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
871 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
577 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 872 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
578 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); 873 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
579 874
875 /*
876 * Always generate sync polarity relative to input sync and
877 * revert input stage toggled sync at output stage
878 */
879 reg = TBG_CNTRL_1_TGL_EN;
580 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 880 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
581 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); 881 reg |= TBG_CNTRL_1_H_TGL;
882 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
883 reg |= TBG_CNTRL_1_V_TGL;
884 reg_write(encoder, REG_TBG_CNTRL_1, reg);
582 885
583 reg_write(encoder, REG_VIDFORMAT, 0x00); 886 reg_write(encoder, REG_VIDFORMAT, 0x00);
584 reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1); 887 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
585 reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1); 888 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
586 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start); 889 reg_write16(encoder, REG_NPIX_MSB, n_pix);
587 reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end); 890 reg_write16(encoder, REG_NLINE_MSB, n_line);
588 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start); 891 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
589 reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start); 892 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
590 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start); 893 reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
591 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end); 894 reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
592 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start); 895 reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
593 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end); 896 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
594 reg_write16(encoder, REG_DE_START_MSB, de_start); 897 reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
595 reg_write16(encoder, REG_DE_STOP_MSB, de_end); 898 reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
899 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
900 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
901 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
902 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
903 reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
904 reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
905 reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
906 reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
596 907
597 if (priv->rev == TDA19988) { 908 if (priv->rev == TDA19988) {
598 /* let incoming pixels fill the active space (if any) */ 909 /* let incoming pixels fill the active space (if any) */
599 reg_write(encoder, REG_ENABLE_SPACE, 0x01); 910 reg_write(encoder, REG_ENABLE_SPACE, 0x01);
600 } 911 }
601 912
602 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
603 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
604
605 reg = TBG_CNTRL_1_VHX_EXT_DE |
606 TBG_CNTRL_1_VHX_EXT_HS |
607 TBG_CNTRL_1_VHX_EXT_VS |
608 TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
609 TBG_CNTRL_1_VH_TGL_2;
610 if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
611 reg |= TBG_CNTRL_1_VH_TGL_0;
612 reg_set(encoder, REG_TBG_CNTRL_1, reg);
613
614 /* must be last register set: */ 913 /* must be last register set: */
615 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); 914 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
915
916 /* Only setup the info frames if the sink is HDMI */
917 if (priv->is_hdmi_sink) {
918 /* We need to turn HDMI HDCP stuff on to get audio through */
919 reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
920 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
921 reg_set(encoder, REG_TX33, TX33_HDMI);
922
923 tda998x_write_avi(encoder, adjusted_mode);
924
925 if (priv->params.audio_cfg)
926 tda998x_configure_audio(encoder, adjusted_mode,
927 &priv->params);
928 }
616} 929}
617 930
618static enum drm_connector_status 931static enum drm_connector_status
@@ -673,6 +986,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
673static uint8_t * 986static uint8_t *
674do_get_edid(struct drm_encoder *encoder) 987do_get_edid(struct drm_encoder *encoder)
675{ 988{
989 struct tda998x_priv *priv = to_tda998x_priv(encoder);
676 int j = 0, valid_extensions = 0; 990 int j = 0, valid_extensions = 0;
677 uint8_t *block, *new; 991 uint8_t *block, *new;
678 bool print_bad_edid = drm_debug & DRM_UT_KMS; 992 bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +994,9 @@ do_get_edid(struct drm_encoder *encoder)
680 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 994 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
681 return NULL; 995 return NULL;
682 996
997 if (priv->rev == TDA19988)
998 reg_clear(encoder, REG_TX4, TX4_PD_RAM);
999
683 /* base block fetch */ 1000 /* base block fetch */
684 if (read_edid_block(encoder, block, 0)) 1001 if (read_edid_block(encoder, block, 0))
685 goto fail; 1002 goto fail;
@@ -689,7 +1006,7 @@ do_get_edid(struct drm_encoder *encoder)
689 1006
690 /* if there's no extensions, we're done */ 1007 /* if there's no extensions, we're done */
691 if (block[0x7e] == 0) 1008 if (block[0x7e] == 0)
692 return block; 1009 goto done;
693 1010
694 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); 1011 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
695 if (!new) 1012 if (!new)
@@ -716,9 +1033,15 @@ do_get_edid(struct drm_encoder *encoder)
716 block = new; 1033 block = new;
717 } 1034 }
718 1035
1036done:
1037 if (priv->rev == TDA19988)
1038 reg_set(encoder, REG_TX4, TX4_PD_RAM);
1039
719 return block; 1040 return block;
720 1041
721fail: 1042fail:
1043 if (priv->rev == TDA19988)
1044 reg_set(encoder, REG_TX4, TX4_PD_RAM);
722 dev_warn(encoder->dev->dev, "failed to read EDID\n"); 1045 dev_warn(encoder->dev->dev, "failed to read EDID\n");
723 kfree(block); 1046 kfree(block);
724 return NULL; 1047 return NULL;
@@ -728,12 +1051,14 @@ static int
728tda998x_encoder_get_modes(struct drm_encoder *encoder, 1051tda998x_encoder_get_modes(struct drm_encoder *encoder,
729 struct drm_connector *connector) 1052 struct drm_connector *connector)
730{ 1053{
1054 struct tda998x_priv *priv = to_tda998x_priv(encoder);
731 struct edid *edid = (struct edid *)do_get_edid(encoder); 1055 struct edid *edid = (struct edid *)do_get_edid(encoder);
732 int n = 0; 1056 int n = 0;
733 1057
734 if (edid) { 1058 if (edid) {
735 drm_mode_connector_update_edid_property(connector, edid); 1059 drm_mode_connector_update_edid_property(connector, edid);
736 n = drm_add_edid_modes(connector, edid); 1060 n = drm_add_edid_modes(connector, edid);
1061 priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
737 kfree(edid); 1062 kfree(edid);
738 } 1063 }
739 1064
@@ -807,6 +1132,10 @@ tda998x_encoder_init(struct i2c_client *client,
807 if (!priv) 1132 if (!priv)
808 return -ENOMEM; 1133 return -ENOMEM;
809 1134
1135 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1136 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1137 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1138
810 priv->current_page = 0; 1139 priv->current_page = 0;
811 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1140 priv->cec = i2c_new_dummy(client->adapter, 0x34);
812 priv->dpms = DRM_MODE_DPMS_OFF; 1141 priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489f..ab1892eb1074 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
113 .release = drm_release, 113 .release = drm_release,
114 .unlocked_ioctl = drm_ioctl, 114 .unlocked_ioctl = drm_ioctl,
115 .mmap = i810_mmap_buffers, 115 .mmap = i810_mmap_buffers,
116 .fasync = drm_fasync,
117#ifdef CONFIG_COMPAT 116#ifdef CONFIG_COMPAT
118 .compat_ioctl = drm_compat_ioctl, 117 .compat_ioctl = drm_compat_ioctl,
119#endif 118#endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
1241 return 0; 1240 return 0;
1242} 1241}
1243 1242
1244struct drm_ioctl_desc i810_ioctls[] = { 1243const struct drm_ioctl_desc i810_ioctls[] = {
1245 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1244 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1246 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1245 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1247 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1246 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 2e91fc3580b4..d8180d22cedd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
49 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap, 50 .mmap = drm_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52 .fasync = drm_fasync,
53#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
54 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
55#endif 54#endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
58 57
59static struct drm_driver driver = { 58static struct drm_driver driver = {
60 .driver_features = 59 .driver_features =
61 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 60 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
62 DRIVER_HAVE_DMA, 61 DRIVER_HAVE_DMA,
63 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 62 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
64 .load = i810_driver_load, 63 .load = i810_driver_load,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f5..d4d16eddd651 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
125extern int i810_driver_device_is_agp(struct drm_device *dev); 125extern int i810_driver_device_is_agp(struct drm_device *dev);
126 126
127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 127extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
128extern struct drm_ioctl_desc i810_ioctls[]; 128extern const struct drm_ioctl_desc i810_ioctls[];
129extern int i810_max_ioctl; 129extern int i810_max_ioctl;
130 130
131#define I810_BASE(reg) ((unsigned long) \ 131#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 09e8ef910ec5..3e4e6073d171 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1296,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1296 1296
1297 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1298 1298
1299 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); 1299 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
1300 if (ret) 1300 if (ret)
1301 goto cleanup_vga_client; 1301 goto cleanup_vga_client;
1302 1302
@@ -1658,7 +1658,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1658 if (INTEL_INFO(dev)->num_pipes) { 1658 if (INTEL_INFO(dev)->num_pipes) {
1659 /* Must be done after probing outputs */ 1659 /* Must be done after probing outputs */
1660 intel_opregion_init(dev); 1660 intel_opregion_init(dev);
1661 acpi_video_register_with_quirks(); 1661 acpi_video_register();
1662 } 1662 }
1663 1663
1664 if (IS_GEN5(dev)) 1664 if (IS_GEN5(dev))
@@ -1859,7 +1859,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1859 kfree(file_priv); 1859 kfree(file_priv);
1860} 1860}
1861 1861
1862struct drm_ioctl_desc i915_ioctls[] = { 1862const struct drm_ioctl_desc i915_ioctls[] = {
1863 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1863 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1864 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1864 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1865 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 1865 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index beb295634a49..735dd5625e9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1009,7 +1009,6 @@ static const struct file_operations i915_driver_fops = {
1009 .unlocked_ioctl = drm_ioctl, 1009 .unlocked_ioctl = drm_ioctl,
1010 .mmap = drm_gem_mmap, 1010 .mmap = drm_gem_mmap,
1011 .poll = drm_poll, 1011 .poll = drm_poll,
1012 .fasync = drm_fasync,
1013 .read = drm_read, 1012 .read = drm_read,
1014#ifdef CONFIG_COMPAT 1013#ifdef CONFIG_COMPAT
1015 .compat_ioctl = i915_compat_ioctl, 1014 .compat_ioctl = i915_compat_ioctl,
@@ -1022,7 +1021,7 @@ static struct drm_driver driver = {
1022 * deal with them for Intel hardware. 1021 * deal with them for Intel hardware.
1023 */ 1022 */
1024 .driver_features = 1023 .driver_features =
1025 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 1024 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1026 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 1025 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1027 .load = i915_driver_load, 1026 .load = i915_driver_load,
1028 .unload = i915_driver_unload, 1027 .unload = i915_driver_unload,
@@ -1053,7 +1052,7 @@ static struct drm_driver driver = {
1053 1052
1054 .dumb_create = i915_gem_dumb_create, 1053 .dumb_create = i915_gem_dumb_create,
1055 .dumb_map_offset = i915_gem_mmap_gtt, 1054 .dumb_map_offset = i915_gem_mmap_gtt,
1056 .dumb_destroy = i915_gem_dumb_destroy, 1055 .dumb_destroy = drm_gem_dumb_destroy,
1057 .ioctls = i915_ioctls, 1056 .ioctls = i915_ioctls,
1058 .fops = &i915_driver_fops, 1057 .fops = &i915_driver_fops,
1059 .name = DRIVER_NAME, 1058 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5f8a638c5145..f22c81d040c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -380,7 +380,8 @@ struct drm_i915_display_funcs {
380 void (*init_clock_gating)(struct drm_device *dev); 380 void (*init_clock_gating)(struct drm_device *dev);
381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
382 struct drm_framebuffer *fb, 382 struct drm_framebuffer *fb,
383 struct drm_i915_gem_object *obj); 383 struct drm_i915_gem_object *obj,
384 uint32_t flags);
384 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 385 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
385 int x, int y); 386 int x, int y);
386 void (*hpd_irq_setup)(struct drm_device *dev); 387 void (*hpd_irq_setup)(struct drm_device *dev);
@@ -1687,7 +1688,7 @@ struct drm_i915_file_private {
1687#define INTEL_RC6p_ENABLE (1<<1) 1688#define INTEL_RC6p_ENABLE (1<<1)
1688#define INTEL_RC6pp_ENABLE (1<<2) 1689#define INTEL_RC6pp_ENABLE (1<<2)
1689 1690
1690extern struct drm_ioctl_desc i915_ioctls[]; 1691extern const struct drm_ioctl_desc i915_ioctls[];
1691extern int i915_max_ioctl; 1692extern int i915_max_ioctl;
1692extern unsigned int i915_fbpercrtc __always_unused; 1693extern unsigned int i915_fbpercrtc __always_unused;
1693extern int i915_panel_ignore_lid __read_mostly; 1694extern int i915_panel_ignore_lid __read_mostly;
@@ -1867,8 +1868,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
1867 struct drm_mode_create_dumb *args); 1868 struct drm_mode_create_dumb *args);
1868int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1869int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1869 uint32_t handle, uint64_t *offset); 1870 uint32_t handle, uint64_t *offset);
1870int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1871 uint32_t handle);
1872/** 1871/**
1873 * Returns true if seq1 is later than seq2. 1872 * Returns true if seq1 is later than seq2.
1874 */ 1873 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 23c42567631e..2d1cb10d846f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/drm_vma_manager.h>
29#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include "i915_trace.h" 32#include "i915_trace.h"
@@ -261,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
261 args->size, &args->handle); 262 args->size, &args->handle);
262} 263}
263 264
264int i915_gem_dumb_destroy(struct drm_file *file,
265 struct drm_device *dev,
266 uint32_t handle)
267{
268 return drm_gem_handle_delete(file, handle);
269}
270
271/** 265/**
272 * Creates a new mm object and returns a handle to it. 266 * Creates a new mm object and returns a handle to it.
273 */ 267 */
@@ -1443,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1443 if (!obj->fault_mappable) 1437 if (!obj->fault_mappable)
1444 return; 1438 return;
1445 1439
1446 if (obj->base.dev->dev_mapping) 1440 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1447 unmap_mapping_range(obj->base.dev->dev_mapping,
1448 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1449 obj->base.size, 1);
1450
1451 obj->fault_mappable = false; 1441 obj->fault_mappable = false;
1452} 1442}
1453 1443
@@ -1503,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1503 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1493 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1504 int ret; 1494 int ret;
1505 1495
1506 if (obj->base.map_list.map) 1496 if (drm_vma_node_has_offset(&obj->base.vma_node))
1507 return 0; 1497 return 0;
1508 1498
1509 dev_priv->mm.shrinker_no_lock_stealing = true; 1499 dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1534,9 +1524,6 @@ out:
1534 1524
1535static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1525static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1536{ 1526{
1537 if (!obj->base.map_list.map)
1538 return;
1539
1540 drm_gem_free_mmap_offset(&obj->base); 1527 drm_gem_free_mmap_offset(&obj->base);
1541} 1528}
1542 1529
@@ -1575,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1575 if (ret) 1562 if (ret)
1576 goto out; 1563 goto out;
1577 1564
1578 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1565 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1579 1566
1580out: 1567out:
1581 drm_gem_object_unreference(&obj->base); 1568 drm_gem_object_unreference(&obj->base);
@@ -3178,7 +3165,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3178search_free: 3165search_free:
3179 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3166 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3180 size, alignment, 3167 size, alignment,
3181 obj->cache_level, 0, gtt_max); 3168 obj->cache_level, 0, gtt_max,
3169 DRM_MM_SEARCH_DEFAULT);
3182 if (ret) { 3170 if (ret) {
3183 ret = i915_gem_evict_something(dev, vm, size, alignment, 3171 ret = i915_gem_evict_something(dev, vm, size, alignment,
3184 obj->cache_level, 3172 obj->cache_level,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 938eb341054c..e918b05fcbdd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -103,17 +103,6 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
103 mutex_unlock(&obj->base.dev->struct_mutex); 103 mutex_unlock(&obj->base.dev->struct_mutex);
104} 104}
105 105
106static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
107{
108 struct drm_i915_gem_object *obj = dma_buf->priv;
109
110 if (obj->base.export_dma_buf == dma_buf) {
111 /* drop the reference on the export fd holds */
112 obj->base.export_dma_buf = NULL;
113 drm_gem_object_unreference_unlocked(&obj->base);
114 }
115}
116
117static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 106static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
118{ 107{
119 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -224,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
224static const struct dma_buf_ops i915_dmabuf_ops = { 213static const struct dma_buf_ops i915_dmabuf_ops = {
225 .map_dma_buf = i915_gem_map_dma_buf, 214 .map_dma_buf = i915_gem_map_dma_buf,
226 .unmap_dma_buf = i915_gem_unmap_dma_buf, 215 .unmap_dma_buf = i915_gem_unmap_dma_buf,
227 .release = i915_gem_dmabuf_release, 216 .release = drm_gem_dmabuf_release,
228 .kmap = i915_gem_dmabuf_kmap, 217 .kmap = i915_gem_dmabuf_kmap,
229 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 218 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
230 .kunmap = i915_gem_dmabuf_kunmap, 219 .kunmap = i915_gem_dmabuf_kunmap,
@@ -300,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
300 goto fail_detach; 289 goto fail_detach;
301 } 290 }
302 291
303 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 292 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
304 if (ret) {
305 i915_gem_object_free(obj);
306 goto fail_detach;
307 }
308
309 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); 293 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
310 obj->base.import_attach = attach; 294 obj->base.import_attach = attach;
311 295
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7f4c510a751b..9969d10b80f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -94,31 +94,36 @@ static int i915_setup_compression(struct drm_device *dev, int size)
94{ 94{
95 struct drm_i915_private *dev_priv = dev->dev_private; 95 struct drm_i915_private *dev_priv = dev->dev_private;
96 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 96 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
97 int ret;
97 98
98 /* Try to over-allocate to reduce reallocations and fragmentation */ 99 compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
99 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
100 size <<= 1, 4096, 0);
101 if (!compressed_fb)
102 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
103 size >>= 1, 4096, 0);
104 if (compressed_fb)
105 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
106 if (!compressed_fb) 100 if (!compressed_fb)
107 goto err; 101 goto err_llb;
102
103 /* Try to over-allocate to reduce reallocations and fragmentation */
104 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
105 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
106 if (ret)
107 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
108 size >>= 1, 4096,
109 DRM_MM_SEARCH_DEFAULT);
110 if (ret)
111 goto err_llb;
108 112
109 if (HAS_PCH_SPLIT(dev)) 113 if (HAS_PCH_SPLIT(dev))
110 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 114 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
111 else if (IS_GM45(dev)) { 115 else if (IS_GM45(dev)) {
112 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 116 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
113 } else { 117 } else {
114 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 118 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
115 4096, 4096, 0);
116 if (compressed_llb)
117 compressed_llb = drm_mm_get_block(compressed_llb,
118 4096, 4096);
119 if (!compressed_llb) 119 if (!compressed_llb)
120 goto err_fb; 120 goto err_fb;
121 121
122 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
123 4096, 4096, DRM_MM_SEARCH_DEFAULT);
124 if (ret)
125 goto err_fb;
126
122 dev_priv->fbc.compressed_llb = compressed_llb; 127 dev_priv->fbc.compressed_llb = compressed_llb;
123 128
124 I915_WRITE(FBC_CFB_BASE, 129 I915_WRITE(FBC_CFB_BASE,
@@ -136,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
136 return 0; 141 return 0;
137 142
138err_fb: 143err_fb:
139 drm_mm_put_block(compressed_fb); 144 kfree(compressed_llb);
140err: 145 drm_mm_remove_node(compressed_fb);
146err_llb:
147 kfree(compressed_fb);
141 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 148 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
142 return -ENOSPC; 149 return -ENOSPC;
143} 150}
@@ -165,11 +172,15 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
165 if (dev_priv->fbc.size == 0) 172 if (dev_priv->fbc.size == 0)
166 return; 173 return;
167 174
168 if (dev_priv->fbc.compressed_fb) 175 if (dev_priv->fbc.compressed_fb) {
169 drm_mm_put_block(dev_priv->fbc.compressed_fb); 176 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
177 kfree(dev_priv->fbc.compressed_fb);
178 }
170 179
171 if (dev_priv->fbc.compressed_llb) 180 if (dev_priv->fbc.compressed_llb) {
172 drm_mm_put_block(dev_priv->fbc.compressed_llb); 181 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
182 kfree(dev_priv->fbc.compressed_llb);
183 }
173 184
174 dev_priv->fbc.size = 0; 185 dev_priv->fbc.size = 0;
175} 186}
@@ -273,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
273 if (obj == NULL) 284 if (obj == NULL)
274 return NULL; 285 return NULL;
275 286
276 if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) 287 drm_gem_private_object_init(dev, &obj->base, stolen->size);
277 goto cleanup;
278
279 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 288 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
280 289
281 obj->pages = i915_pages_create_for_stolen(dev, 290 obj->pages = i915_pages_create_for_stolen(dev,
@@ -303,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
303 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
304 struct drm_i915_gem_object *obj; 313 struct drm_i915_gem_object *obj;
305 struct drm_mm_node *stolen; 314 struct drm_mm_node *stolen;
315 int ret;
306 316
307 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 317 if (!drm_mm_initialized(&dev_priv->mm.stolen))
308 return NULL; 318 return NULL;
@@ -311,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
311 if (size == 0) 321 if (size == 0)
312 return NULL; 322 return NULL;
313 323
314 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 324 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
315 if (stolen) 325 if (!stolen)
316 stolen = drm_mm_get_block(stolen, size, 4096); 326 return NULL;
317 if (stolen == NULL) 327
328 ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
329 4096, DRM_MM_SEARCH_DEFAULT);
330 if (ret) {
331 kfree(stolen);
318 return NULL; 332 return NULL;
333 }
319 334
320 obj = _i915_gem_object_create_stolen(dev, stolen); 335 obj = _i915_gem_object_create_stolen(dev, stolen);
321 if (obj) 336 if (obj)
322 return obj; 337 return obj;
323 338
324 drm_mm_put_block(stolen); 339 drm_mm_remove_node(stolen);
340 kfree(stolen);
325 return NULL; 341 return NULL;
326} 342}
327 343
@@ -367,7 +383,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
367 obj = _i915_gem_object_create_stolen(dev, stolen); 383 obj = _i915_gem_object_create_stolen(dev, stolen);
368 if (obj == NULL) { 384 if (obj == NULL) {
369 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 385 DRM_DEBUG_KMS("failed to allocate stolen object\n");
370 drm_mm_put_block(stolen); 386 drm_mm_remove_node(stolen);
387 kfree(stolen);
371 return NULL; 388 return NULL;
372 } 389 }
373 390
@@ -406,7 +423,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
406err_vma: 423err_vma:
407 i915_gem_vma_destroy(vma); 424 i915_gem_vma_destroy(vma);
408err_out: 425err_out:
409 drm_mm_put_block(stolen); 426 drm_mm_remove_node(stolen);
427 kfree(stolen);
410 drm_gem_object_unreference(&obj->base); 428 drm_gem_object_unreference(&obj->base);
411 return NULL; 429 return NULL;
412} 430}
@@ -415,7 +433,8 @@ void
415i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 433i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
416{ 434{
417 if (obj->stolen) { 435 if (obj->stolen) {
418 drm_mm_put_block(obj->stolen); 436 drm_mm_remove_node(obj->stolen);
437 kfree(obj->stolen);
419 obj->stolen = NULL; 438 obj->stolen = NULL;
420 } 439 }
421} 440}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 76d965c38d7e..56708c64e68f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4160,6 +4160,8 @@
4160 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4160 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
4161#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4161#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
4162 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4162 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
4163#define HSW_TVIDEO_DIP_VS_DATA(trans) \
4164 _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
4163#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4165#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
4164 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4166 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
4165#define HSW_TVIDEO_DIP_GCP(trans) \ 4167#define HSW_TVIDEO_DIP_GCP(trans) \
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b4daa640a6d8..10c1db596387 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7650,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7650static int intel_gen2_queue_flip(struct drm_device *dev, 7650static int intel_gen2_queue_flip(struct drm_device *dev,
7651 struct drm_crtc *crtc, 7651 struct drm_crtc *crtc,
7652 struct drm_framebuffer *fb, 7652 struct drm_framebuffer *fb,
7653 struct drm_i915_gem_object *obj) 7653 struct drm_i915_gem_object *obj,
7654 uint32_t flags)
7654{ 7655{
7655 struct drm_i915_private *dev_priv = dev->dev_private; 7656 struct drm_i915_private *dev_priv = dev->dev_private;
7656 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7694,7 +7695,8 @@ err:
7694static int intel_gen3_queue_flip(struct drm_device *dev, 7695static int intel_gen3_queue_flip(struct drm_device *dev,
7695 struct drm_crtc *crtc, 7696 struct drm_crtc *crtc,
7696 struct drm_framebuffer *fb, 7697 struct drm_framebuffer *fb,
7697 struct drm_i915_gem_object *obj) 7698 struct drm_i915_gem_object *obj,
7699 uint32_t flags)
7698{ 7700{
7699 struct drm_i915_private *dev_priv = dev->dev_private; 7701 struct drm_i915_private *dev_priv = dev->dev_private;
7700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7735,7 +7737,8 @@ err:
7735static int intel_gen4_queue_flip(struct drm_device *dev, 7737static int intel_gen4_queue_flip(struct drm_device *dev,
7736 struct drm_crtc *crtc, 7738 struct drm_crtc *crtc,
7737 struct drm_framebuffer *fb, 7739 struct drm_framebuffer *fb,
7738 struct drm_i915_gem_object *obj) 7740 struct drm_i915_gem_object *obj,
7741 uint32_t flags)
7739{ 7742{
7740 struct drm_i915_private *dev_priv = dev->dev_private; 7743 struct drm_i915_private *dev_priv = dev->dev_private;
7741 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7783,7 +7786,8 @@ err:
7783static int intel_gen6_queue_flip(struct drm_device *dev, 7786static int intel_gen6_queue_flip(struct drm_device *dev,
7784 struct drm_crtc *crtc, 7787 struct drm_crtc *crtc,
7785 struct drm_framebuffer *fb, 7788 struct drm_framebuffer *fb,
7786 struct drm_i915_gem_object *obj) 7789 struct drm_i915_gem_object *obj,
7790 uint32_t flags)
7787{ 7791{
7788 struct drm_i915_private *dev_priv = dev->dev_private; 7792 struct drm_i915_private *dev_priv = dev->dev_private;
7789 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7833,7 +7837,8 @@ err:
7833static int intel_gen7_queue_flip(struct drm_device *dev, 7837static int intel_gen7_queue_flip(struct drm_device *dev,
7834 struct drm_crtc *crtc, 7838 struct drm_crtc *crtc,
7835 struct drm_framebuffer *fb, 7839 struct drm_framebuffer *fb,
7836 struct drm_i915_gem_object *obj) 7840 struct drm_i915_gem_object *obj,
7841 uint32_t flags)
7837{ 7842{
7838 struct drm_i915_private *dev_priv = dev->dev_private; 7843 struct drm_i915_private *dev_priv = dev->dev_private;
7839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7883,14 +7888,16 @@ err:
7883static int intel_default_queue_flip(struct drm_device *dev, 7888static int intel_default_queue_flip(struct drm_device *dev,
7884 struct drm_crtc *crtc, 7889 struct drm_crtc *crtc,
7885 struct drm_framebuffer *fb, 7890 struct drm_framebuffer *fb,
7886 struct drm_i915_gem_object *obj) 7891 struct drm_i915_gem_object *obj,
7892 uint32_t flags)
7887{ 7893{
7888 return -ENODEV; 7894 return -ENODEV;
7889} 7895}
7890 7896
7891static int intel_crtc_page_flip(struct drm_crtc *crtc, 7897static int intel_crtc_page_flip(struct drm_crtc *crtc,
7892 struct drm_framebuffer *fb, 7898 struct drm_framebuffer *fb,
7893 struct drm_pending_vblank_event *event) 7899 struct drm_pending_vblank_event *event,
7900 uint32_t page_flip_flags)
7894{ 7901{
7895 struct drm_device *dev = crtc->dev; 7902 struct drm_device *dev = crtc->dev;
7896 struct drm_i915_private *dev_priv = dev->dev_private; 7903 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7960,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7960 atomic_inc(&intel_crtc->unpin_work_count); 7967 atomic_inc(&intel_crtc->unpin_work_count);
7961 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 7968 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
7962 7969
7963 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7970 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
7964 if (ret) 7971 if (ret)
7965 goto cleanup_pending; 7972 goto cleanup_pending;
7966 7973
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 94179fdf61f5..4148cc85bf7f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -74,6 +74,8 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
74 return VIDEO_DIP_SELECT_AVI; 74 return VIDEO_DIP_SELECT_AVI;
75 case HDMI_INFOFRAME_TYPE_SPD: 75 case HDMI_INFOFRAME_TYPE_SPD:
76 return VIDEO_DIP_SELECT_SPD; 76 return VIDEO_DIP_SELECT_SPD;
77 case HDMI_INFOFRAME_TYPE_VENDOR:
78 return VIDEO_DIP_SELECT_VENDOR;
77 default: 79 default:
78 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 80 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
79 return 0; 81 return 0;
@@ -87,6 +89,8 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
87 return VIDEO_DIP_ENABLE_AVI; 89 return VIDEO_DIP_ENABLE_AVI;
88 case HDMI_INFOFRAME_TYPE_SPD: 90 case HDMI_INFOFRAME_TYPE_SPD:
89 return VIDEO_DIP_ENABLE_SPD; 91 return VIDEO_DIP_ENABLE_SPD;
92 case HDMI_INFOFRAME_TYPE_VENDOR:
93 return VIDEO_DIP_ENABLE_VENDOR;
90 default: 94 default:
91 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 95 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
92 return 0; 96 return 0;
@@ -100,6 +104,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
100 return VIDEO_DIP_ENABLE_AVI_HSW; 104 return VIDEO_DIP_ENABLE_AVI_HSW;
101 case HDMI_INFOFRAME_TYPE_SPD: 105 case HDMI_INFOFRAME_TYPE_SPD:
102 return VIDEO_DIP_ENABLE_SPD_HSW; 106 return VIDEO_DIP_ENABLE_SPD_HSW;
107 case HDMI_INFOFRAME_TYPE_VENDOR:
108 return VIDEO_DIP_ENABLE_VS_HSW;
103 default: 109 default:
104 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 110 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
105 return 0; 111 return 0;
@@ -114,6 +120,8 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
114 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); 120 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
115 case HDMI_INFOFRAME_TYPE_SPD: 121 case HDMI_INFOFRAME_TYPE_SPD:
116 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); 122 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
123 case HDMI_INFOFRAME_TYPE_VENDOR:
124 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
117 default: 125 default:
118 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 126 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
119 return 0; 127 return 0;
@@ -392,6 +400,21 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
392 intel_write_infoframe(encoder, &frame); 400 intel_write_infoframe(encoder, &frame);
393} 401}
394 402
403static void
404intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
405 struct drm_display_mode *adjusted_mode)
406{
407 union hdmi_infoframe frame;
408 int ret;
409
410 ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
411 adjusted_mode);
412 if (ret < 0)
413 return;
414
415 intel_write_infoframe(encoder, &frame);
416}
417
395static void g4x_set_infoframes(struct drm_encoder *encoder, 418static void g4x_set_infoframes(struct drm_encoder *encoder,
396 struct drm_display_mode *adjusted_mode) 419 struct drm_display_mode *adjusted_mode)
397{ 420{
@@ -454,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
454 477
455 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 478 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
456 intel_hdmi_set_spd_infoframe(encoder); 479 intel_hdmi_set_spd_infoframe(encoder);
480 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
457} 481}
458 482
459static void ibx_set_infoframes(struct drm_encoder *encoder, 483static void ibx_set_infoframes(struct drm_encoder *encoder,
@@ -515,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
515 539
516 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 540 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
517 intel_hdmi_set_spd_infoframe(encoder); 541 intel_hdmi_set_spd_infoframe(encoder);
542 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
518} 543}
519 544
520static void cpt_set_infoframes(struct drm_encoder *encoder, 545static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -550,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
550 575
551 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 576 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
552 intel_hdmi_set_spd_infoframe(encoder); 577 intel_hdmi_set_spd_infoframe(encoder);
578 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
553} 579}
554 580
555static void vlv_set_infoframes(struct drm_encoder *encoder, 581static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -584,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
584 610
585 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 611 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
586 intel_hdmi_set_spd_infoframe(encoder); 612 intel_hdmi_set_spd_infoframe(encoder);
613 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
587} 614}
588 615
589static void hsw_set_infoframes(struct drm_encoder *encoder, 616static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -611,6 +638,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
611 638
612 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); 639 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
613 intel_hdmi_set_spd_infoframe(encoder); 640 intel_hdmi_set_spd_infoframe(encoder);
641 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
614} 642}
615 643
616static void intel_hdmi_mode_set(struct intel_encoder *encoder) 644static void intel_hdmi_mode_set(struct intel_encoder *encoder)
@@ -802,10 +830,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
802 } 830 }
803} 831}
804 832
833static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
834{
835 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
836
837 if (IS_G4X(dev))
838 return 165000;
839 else if (IS_HASWELL(dev))
840 return 300000;
841 else
842 return 225000;
843}
844
805static int intel_hdmi_mode_valid(struct drm_connector *connector, 845static int intel_hdmi_mode_valid(struct drm_connector *connector,
806 struct drm_display_mode *mode) 846 struct drm_display_mode *mode)
807{ 847{
808 if (mode->clock > 165000) 848 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
809 return MODE_CLOCK_HIGH; 849 return MODE_CLOCK_HIGH;
810 if (mode->clock < 20000) 850 if (mode->clock < 20000)
811 return MODE_CLOCK_LOW; 851 return MODE_CLOCK_LOW;
@@ -823,6 +863,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
823 struct drm_device *dev = encoder->base.dev; 863 struct drm_device *dev = encoder->base.dev;
824 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 864 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
825 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 865 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
866 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
826 int desired_bpp; 867 int desired_bpp;
827 868
828 if (intel_hdmi->color_range_auto) { 869 if (intel_hdmi->color_range_auto) {
@@ -846,7 +887,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
846 * outputs. We also need to check that the higher clock still fits 887 * outputs. We also need to check that the higher clock still fits
847 * within limits. 888 * within limits.
848 */ 889 */
849 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 890 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
850 && HAS_PCH_SPLIT(dev)) { 891 && HAS_PCH_SPLIT(dev)) {
851 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 892 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
852 desired_bpp = 12*3; 893 desired_bpp = 12*3;
@@ -863,7 +904,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
863 pipe_config->pipe_bpp = desired_bpp; 904 pipe_config->pipe_bpp = desired_bpp;
864 } 905 }
865 906
866 if (adjusted_mode->clock > 225000) { 907 if (adjusted_mode->clock > portclock_limit) {
867 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 908 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
868 return false; 909 return false;
869 } 910 }
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 17d0a637e4fb..6b1a87c8aac5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
50 .unlocked_ioctl = drm_ioctl, 50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap, 51 .mmap = drm_mmap,
52 .poll = drm_poll, 52 .poll = drm_poll,
53 .fasync = drm_fasync,
54#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
55 .compat_ioctl = mga_compat_ioctl, 54 .compat_ioctl = mga_compat_ioctl,
56#endif 55#endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
59 58
60static struct drm_driver driver = { 59static struct drm_driver driver = {
61 .driver_features = 60 .driver_features =
62 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 61 DRIVER_USE_AGP | DRIVER_PCI_DMA |
63 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 62 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
64 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 63 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
65 .load = mga_driver_load, 64 .load = mga_driver_load,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969a..ca4bc54ea214 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
149 unsigned int agp_size; 149 unsigned int agp_size;
150} drm_mga_private_t; 150} drm_mga_private_t;
151 151
152extern struct drm_ioctl_desc mga_ioctls[]; 152extern const struct drm_ioctl_desc mga_ioctls[];
153extern int mga_max_ioctl; 153extern int mga_max_ioctl;
154 154
155 /* mga_dma.c */ 155 /* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0f..37cc2fb4eadd 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
1083 return 0; 1083 return 0;
1084} 1084}
1085 1085
1086struct drm_ioctl_desc mga_ioctls[] = { 1086const struct drm_ioctl_desc mga_ioctls[] = {
1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), 1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7c..fcce7b2f8011 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
81 .unlocked_ioctl = drm_ioctl, 81 .unlocked_ioctl = drm_ioctl,
82 .mmap = mgag200_mmap, 82 .mmap = mgag200_mmap,
83 .poll = drm_poll, 83 .poll = drm_poll,
84 .fasync = drm_fasync,
85#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
86 .compat_ioctl = drm_compat_ioctl, 85 .compat_ioctl = drm_compat_ioctl,
87#endif 86#endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
89}; 88};
90 89
91static struct drm_driver driver = { 90static struct drm_driver driver = {
92 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR, 91 .driver_features = DRIVER_GEM | DRIVER_MODESET,
93 .load = mgag200_driver_load, 92 .load = mgag200_driver_load,
94 .unload = mgag200_driver_unload, 93 .unload = mgag200_driver_unload,
95 .fops = &mgag200_driver_fops, 94 .fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
104 .gem_free_object = mgag200_gem_free_object, 103 .gem_free_object = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 104 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 105 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = mgag200_dumb_destroy, 106 .dumb_destroy = drm_gem_dumb_destroy,
108}; 107};
109 108
110static struct pci_driver mgag200_pci_driver = { 109static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d9352..baaae19332e2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 264int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 265 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 266 struct drm_mode_create_dumb *args);
267int mgag200_dumb_destroy(struct drm_file *file,
268 struct drm_device *dev,
269 uint32_t handle);
270void mgag200_gem_free_object(struct drm_gem_object *obj); 267void mgag200_gem_free_object(struct drm_gem_object *obj);
271int 268int
272mgag200_dumb_mmap_offset(struct drm_file *file, 269mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee0..0f8b861b10b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_dumb_destroy(struct drm_file *file,
314 struct drm_device *dev,
315 uint32_t handle)
316{
317 return drm_gem_handle_delete(file, handle);
318}
319
320int mgag200_gem_init_object(struct drm_gem_object *obj) 313int mgag200_gem_init_object(struct drm_gem_object *obj)
321{ 314{
322 BUG(); 315 BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
349 342
350static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) 343static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
351{ 344{
352 return bo->bo.addr_space_offset; 345 return drm_vma_node_offset_addr(&bo->bo.vma_node);
353} 346}
354 347
355int 348int
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b044c7b..fd4539d9ad2c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
148 148
149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 149static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
150{ 150{
151 return 0; 151 struct mgag200_bo *mgabo = mgag200_bo(bo);
152
153 return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
152} 154}
153 155
154static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 156static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
321 return ret; 323 return ret;
322 } 324 }
323 325
324 mgabo->gem.driver_private = NULL;
325 mgabo->bo.bdev = &mdev->ttm.bdev; 326 mgabo->bo.bdev = &mdev->ttm.bdev;
326 327
327 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 328 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 000000000000..a06c19cc56f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
1
2config DRM_MSM
3 tristate "MSM DRM"
4 depends on DRM
5 depends on ARCH_MSM
6 depends on ARCH_MSM8960
7 select DRM_KMS_HELPER
8 select SHMEM
9 select TMPFS
10 default y
11 help
12 DRM/KMS driver for MSM/snapdragon.
13
14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM
17 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT
20 select FB_SYS_FOPS
21 default y
22 help
23 Choose this option if you have a need for the legacy fbdev
24 support. Note that this support also provide the linux console
25 support on top of the MSM modesetting driver.
26
27config DRM_MSM_REGISTER_LOGGING
28 bool "MSM DRM register logging"
29 depends on DRM_MSM
30 default n
31 help
32 Compile in support for logging register reads/writes in a format
33 that can be parsed by envytools demsm tool. If enabled, register
34 logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 000000000000..439dfb5b417b
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
5
6msm-y := \
7 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \
10 hdmi/hdmi_connector.o \
11 hdmi/hdmi_i2c.o \
12 hdmi/hdmi_phy_8960.o \
13 hdmi/hdmi_phy_8x60.o \
14 mdp4/mdp4_crtc.o \
15 mdp4/mdp4_dtv_encoder.o \
16 mdp4/mdp4_format.o \
17 mdp4/mdp4_irq.o \
18 mdp4/mdp4_kms.o \
19 mdp4/mdp4_plane.o \
20 msm_connector.o \
21 msm_drv.o \
22 msm_fb.o \
23 msm_gem.o \
24 msm_gem_submit.o \
25 msm_gpu.o \
26 msm_ringbuffer.o
27
28msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
29
30obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 000000000000..e036f6c1db94
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
1NOTES about msm drm/kms driver:
2
3In the current snapdragon SoC's, we have (at least) 3 different
4display controller blocks at play:
5 + MDP3 - ?? seems to be what is on geeksphone peak device
6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
7 + MDSS - snapdragon 800
8
9(I don't have a completely clear picture on which display controller
10maps to which part #)
11
12Plus a handful of blocks around them for HDMI/DSI/etc output.
13
14And on gpu side of things:
15 + zero, one, or two 2d cores (z180)
16 + and either a2xx or a3xx 3d core.
17
18But, HDMI/DSI/etc blocks seem like they can be shared across multiple
19display controller blocks. And I for sure don't want to have to deal
20with N different kms devices from xf86-video-freedreno. Plus, it
21seems like we can do some clever tricks like use GPU to trigger
22pageflip after rendering completes (ie. have the kms/crtc code build
23up gpu cmdstream to update scanout and write FLUSH register after).
24
25So, the approach is one drm driver, with some modularity. Different
26'struct msm_kms' implementations, depending on display controller.
27And one or more 'struct msm_gpu' for the various different gpu sub-
28modules.
29
30(Second part is not implemented yet. So far this is just basic KMS
31driver, and not exposing any custom ioctls to userspace for now.)
32
33The kms module provides the plane, crtc, and encoder objects, and
34loads whatever connectors are appropriate.
35
36For MDP4, the mapping is:
37
38 plane -> PIPE{RGBn,VGn} \
39 crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
40 encoder -> DTV/LCDC/DSI (within MDP4) /
41 connector -> HDMI/DSI/etc --> other device(s)
42
43Since the irq's that drm core mostly cares about are vblank/framedone,
44we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
45and treat the MDP4 block's irq as "the" irq. Even though the connectors
46may have their own irqs which they install themselves. For this reason
47the display controller is the "master" device.
48
49Each connector probably ends up being a separate device, just for the
50logistics of finding/mapping io region, irq, etc. Idealy we would
51have a better way than just stashing the platform device in a global
52(ie. like DT super-node.. but I don't have any snapdragon hw yet that
53is using DT).
54
55Note that so far I've not been able to get any docs on the hw, and it
56seems that access to such docs would prevent me from working on the
57freedreno gallium driver. So there may be some mistakes in register
58names (I had to invent a few, since no sufficient hint was given in
59the downstream android fbdev driver), bitfield sizes, etc. My current
60state of understanding the registers is given in the envytools rnndb
61files at:
62
63 https://github.com/freedreno/envytools/tree/master/rnndb
64 (the mdp4/hdmi/dsi directories)
65
66These files are used both for a parser tool (in the same tree) to
67parse logged register reads/writes (both from downstream android fbdev
68driver, and this driver with register logging enabled), as well as to
69generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 000000000000..35463864b959
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
1#ifndef A2XX_XML
2#define A2XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a2xx_rb_dither_type {
44 DITHER_PIXEL = 0,
45 DITHER_SUBPIXEL = 1,
46};
47
48enum a2xx_colorformatx {
49 COLORX_4_4_4_4 = 0,
50 COLORX_1_5_5_5 = 1,
51 COLORX_5_6_5 = 2,
52 COLORX_8 = 3,
53 COLORX_8_8 = 4,
54 COLORX_8_8_8_8 = 5,
55 COLORX_S8_8_8_8 = 6,
56 COLORX_16_FLOAT = 7,
57 COLORX_16_16_FLOAT = 8,
58 COLORX_16_16_16_16_FLOAT = 9,
59 COLORX_32_FLOAT = 10,
60 COLORX_32_32_FLOAT = 11,
61 COLORX_32_32_32_32_FLOAT = 12,
62 COLORX_2_3_3 = 13,
63 COLORX_8_8_8 = 14,
64};
65
66enum a2xx_sq_surfaceformat {
67 FMT_1_REVERSE = 0,
68 FMT_1 = 1,
69 FMT_8 = 2,
70 FMT_1_5_5_5 = 3,
71 FMT_5_6_5 = 4,
72 FMT_6_5_5 = 5,
73 FMT_8_8_8_8 = 6,
74 FMT_2_10_10_10 = 7,
75 FMT_8_A = 8,
76 FMT_8_B = 9,
77 FMT_8_8 = 10,
78 FMT_Cr_Y1_Cb_Y0 = 11,
79 FMT_Y1_Cr_Y0_Cb = 12,
80 FMT_5_5_5_1 = 13,
81 FMT_8_8_8_8_A = 14,
82 FMT_4_4_4_4 = 15,
83 FMT_10_11_11 = 16,
84 FMT_11_11_10 = 17,
85 FMT_DXT1 = 18,
86 FMT_DXT2_3 = 19,
87 FMT_DXT4_5 = 20,
88 FMT_24_8 = 22,
89 FMT_24_8_FLOAT = 23,
90 FMT_16 = 24,
91 FMT_16_16 = 25,
92 FMT_16_16_16_16 = 26,
93 FMT_16_EXPAND = 27,
94 FMT_16_16_EXPAND = 28,
95 FMT_16_16_16_16_EXPAND = 29,
96 FMT_16_FLOAT = 30,
97 FMT_16_16_FLOAT = 31,
98 FMT_16_16_16_16_FLOAT = 32,
99 FMT_32 = 33,
100 FMT_32_32 = 34,
101 FMT_32_32_32_32 = 35,
102 FMT_32_FLOAT = 36,
103 FMT_32_32_FLOAT = 37,
104 FMT_32_32_32_32_FLOAT = 38,
105 FMT_32_AS_8 = 39,
106 FMT_32_AS_8_8 = 40,
107 FMT_16_MPEG = 41,
108 FMT_16_16_MPEG = 42,
109 FMT_8_INTERLACED = 43,
110 FMT_32_AS_8_INTERLACED = 44,
111 FMT_32_AS_8_8_INTERLACED = 45,
112 FMT_16_INTERLACED = 46,
113 FMT_16_MPEG_INTERLACED = 47,
114 FMT_16_16_MPEG_INTERLACED = 48,
115 FMT_DXN = 49,
116 FMT_8_8_8_8_AS_16_16_16_16 = 50,
117 FMT_DXT1_AS_16_16_16_16 = 51,
118 FMT_DXT2_3_AS_16_16_16_16 = 52,
119 FMT_DXT4_5_AS_16_16_16_16 = 53,
120 FMT_2_10_10_10_AS_16_16_16_16 = 54,
121 FMT_10_11_11_AS_16_16_16_16 = 55,
122 FMT_11_11_10_AS_16_16_16_16 = 56,
123 FMT_32_32_32_FLOAT = 57,
124 FMT_DXT3A = 58,
125 FMT_DXT5A = 59,
126 FMT_CTX1 = 60,
127 FMT_DXT3A_AS_1_1_1_1 = 61,
128};
129
130enum a2xx_sq_ps_vtx_mode {
131 POSITION_1_VECTOR = 0,
132 POSITION_2_VECTORS_UNUSED = 1,
133 POSITION_2_VECTORS_SPRITE = 2,
134 POSITION_2_VECTORS_EDGE = 3,
135 POSITION_2_VECTORS_KILL = 4,
136 POSITION_2_VECTORS_SPRITE_KILL = 5,
137 POSITION_2_VECTORS_EDGE_KILL = 6,
138 MULTIPASS = 7,
139};
140
141enum a2xx_sq_sample_cntl {
142 CENTROIDS_ONLY = 0,
143 CENTERS_ONLY = 1,
144 CENTROIDS_AND_CENTERS = 2,
145};
146
147enum a2xx_dx_clip_space {
148 DXCLIP_OPENGL = 0,
149 DXCLIP_DIRECTX = 1,
150};
151
152enum a2xx_pa_su_sc_polymode {
153 POLY_DISABLED = 0,
154 POLY_DUALMODE = 1,
155};
156
157enum a2xx_rb_edram_mode {
158 EDRAM_NOP = 0,
159 COLOR_DEPTH = 4,
160 DEPTH_ONLY = 5,
161 EDRAM_COPY = 6,
162};
163
164enum a2xx_pa_sc_pattern_bit_order {
165 LITTLE = 0,
166 BIG = 1,
167};
168
169enum a2xx_pa_sc_auto_reset_cntl {
170 NEVER = 0,
171 EACH_PRIMITIVE = 1,
172 EACH_PACKET = 2,
173};
174
175enum a2xx_pa_pixcenter {
176 PIXCENTER_D3D = 0,
177 PIXCENTER_OGL = 1,
178};
179
180enum a2xx_pa_roundmode {
181 TRUNCATE = 0,
182 ROUND = 1,
183 ROUNDTOEVEN = 2,
184 ROUNDTOODD = 3,
185};
186
187enum a2xx_pa_quantmode {
188 ONE_SIXTEENTH = 0,
189 ONE_EIGTH = 1,
190 ONE_QUARTER = 2,
191 ONE_HALF = 3,
192 ONE = 4,
193};
194
195enum a2xx_rb_copy_sample_select {
196 SAMPLE_0 = 0,
197 SAMPLE_1 = 1,
198 SAMPLE_2 = 2,
199 SAMPLE_3 = 3,
200 SAMPLE_01 = 4,
201 SAMPLE_23 = 5,
202 SAMPLE_0123 = 6,
203};
204
205enum sq_tex_clamp {
206 SQ_TEX_WRAP = 0,
207 SQ_TEX_MIRROR = 1,
208 SQ_TEX_CLAMP_LAST_TEXEL = 2,
209 SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
210 SQ_TEX_CLAMP_HALF_BORDER = 4,
211 SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
212 SQ_TEX_CLAMP_BORDER = 6,
213 SQ_TEX_MIRROR_ONCE_BORDER = 7,
214};
215
216enum sq_tex_swiz {
217 SQ_TEX_X = 0,
218 SQ_TEX_Y = 1,
219 SQ_TEX_Z = 2,
220 SQ_TEX_W = 3,
221 SQ_TEX_ZERO = 4,
222 SQ_TEX_ONE = 5,
223};
224
225enum sq_tex_filter {
226 SQ_TEX_FILTER_POINT = 0,
227 SQ_TEX_FILTER_BILINEAR = 1,
228 SQ_TEX_FILTER_BICUBIC = 2,
229};
230
231#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
232
233#define REG_A2XX_RBBM_CNTL 0x0000003b
234
235#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
236
237#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
238
239#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
240
241#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
242
243#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
244
245#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
246
247#define REG_A2XX_RBBM_DEBUG 0x0000039b
248
249#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
250
251#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
252
253#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
254
255#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
256
257#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
258
259#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
260
261#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
262
263#define REG_A2XX_RBBM_INT_ACK 0x000003b6
264
265#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
266
267#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
268
269#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
270
271#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
272
273#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
274
275#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
276
277#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
278
279#define REG_A2XX_CP_ST_BASE 0x0000044d
280
281#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
282
283#define REG_A2XX_CP_IB1_BASE 0x00000458
284
285#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
286
287#define REG_A2XX_CP_IB2_BASE 0x0000045a
288
289#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
290
291#define REG_A2XX_CP_STAT 0x0000047f
292
293#define REG_A2XX_RBBM_STATUS 0x000005d0
294#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
295#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
296static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
297{
298 return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
299}
300#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
301#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
302#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
303#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
304#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
305#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
306#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
307#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
308#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
309#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
310#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
311#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
312#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
313#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
314#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
315#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
316#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
323static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
324{
325 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
326}
327#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
328#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
329static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
330{
331 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
332}
333
334static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
335
336static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
337
338static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
339
340static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
341
342#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
343
344#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
345
346#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
347
348#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
349
350#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
351
352#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
353
354#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
355
356#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
357
358#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
359
360#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
361
362#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
363
364#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
365
366#define REG_A2XX_SQ_INT_CNTL 0x00000d34
367
368#define REG_A2XX_SQ_INT_STATUS 0x00000d35
369
370#define REG_A2XX_SQ_INT_ACK 0x00000d36
371
372#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
373
374#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
375
376#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
377
378#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
379
380#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
381
382#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
383
384#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
385
386#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
387
388#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
389
390#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
391
392#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
393
394#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
395
396#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
397
398#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
399
400#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
401
402#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
403
404#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
405
406#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
407
408#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
409
410#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
411
412#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
413#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
414
415#define REG_A2XX_TP0_CHICKEN 0x00000e1e
416
417#define REG_A2XX_RB_BC_CONTROL 0x00000f01
418#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
419#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
420#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
421static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
422{
423 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
424}
425#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
426#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
427#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
428#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
429#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
430#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
431#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
432static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
433{
434 return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
435}
436#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
437#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
438#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
439#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
440#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
441#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
442static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
443{
444 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
445}
446#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
447#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
448#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
449static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
450{
451 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
452}
453#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
454#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
455static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
456{
457 return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
458}
459#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
460#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
461#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
462
463#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
464
465#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
466
467#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
468
469#define REG_A2XX_RB_SURFACE_INFO 0x00002000
470
471#define REG_A2XX_RB_COLOR_INFO 0x00002001
472#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
473#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
474static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
475{
476 return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
477}
478#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
479#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
480static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
481{
482 return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
483}
484#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
485#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
486#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
487static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
488{
489 return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
490}
491#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
492#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
493static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
494{
495 return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
496}
497#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
498#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
499static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
500{
501 return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
502}
503
504#define REG_A2XX_RB_DEPTH_INFO 0x00002002
505#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
506#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
507static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
508{
509 return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
510}
511#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
512#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
513static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
514{
515 return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
516}
517
518#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
519
520#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
521
522#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
523#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
524#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
525#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
526static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
527{
528 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
529}
530#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
531#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
532static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
533{
534 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
535}
536
537#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
538#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
539#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
540#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
541static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
542{
543 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
544}
545#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
546#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
547static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
548{
549 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
550}
551
552#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
553#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
554#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
555static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
556{
557 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
558}
559#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
560#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
561static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
562{
563 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
564}
565#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
566
567#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
568#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
569#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
570#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
571static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
572{
573 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
574}
575#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
576#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
577static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
578{
579 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
580}
581
582#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
583#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
584#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
585#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
586static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
587{
588 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
589}
590#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
591#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
592static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
593{
594 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
595}
596
597#define REG_A2XX_UNKNOWN_2010 0x00002010
598
599#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
600
601#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
602
603#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
604
605#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
606
607#define REG_A2XX_RB_COLOR_MASK 0x00002104
608#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
609#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
610#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
611#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
612
613#define REG_A2XX_RB_BLEND_RED 0x00002105
614
615#define REG_A2XX_RB_BLEND_GREEN 0x00002106
616
617#define REG_A2XX_RB_BLEND_BLUE 0x00002107
618
619#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
620
621#define REG_A2XX_RB_FOG_COLOR 0x00002109
622
623#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
624#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
625#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
626static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
627{
628 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
629}
630#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
631#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
632static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
633{
634 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
635}
636#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
637#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
638static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
639{
640 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
641}
642
643#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
644#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
645#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
646static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
647{
648 return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
649}
650#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
651#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
652static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
653{
654 return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
655}
656#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
657#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
658static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
659{
660 return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
661}
662
663#define REG_A2XX_RB_ALPHA_REF 0x0000210e
664
665#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
666#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
667#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
668static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
669{
670 return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
671}
672
673#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
674#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
675#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
676static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
677{
678 return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
679}
680
681#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
682#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
683#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
684static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
685{
686 return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
687}
688
689#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
690#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
691#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
692static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
693{
694 return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
695}
696
697#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
698#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
699#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
700static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
701{
702 return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
703}
704
705#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
706#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
707#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
708static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
709{
710 return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
711}
712
713#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
714#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
715#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
716static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
717{
718 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
719}
720#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
721#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
722static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
723{
724 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
725}
726#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
727#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
728#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
729#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
730#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
731#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
732static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
733{
734 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
735}
736#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
737#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
738static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
739{
740 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
741}
742#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
743#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
744static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
745{
746 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
747}
748#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
749
750#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
751#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
752#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
753#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
754#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
755static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
756{
757 return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
758}
759#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
760#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
761static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
762{
763 return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
764}
765#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
766#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
767#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
768
769#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
770
771#define REG_A2XX_SQ_WRAPPING_0 0x00002183
772
773#define REG_A2XX_SQ_WRAPPING_1 0x00002184
774
775#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
776
777#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
778
779#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
780#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
781#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
782#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
783#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
784#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
785#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
786static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
787{
788 return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
789}
790#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
791#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
792#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
793static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
794{
795 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
796}
797#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
798#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
799static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
800{
801 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
802}
803#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
804#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
805static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
806{
807 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
808}
809#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
810#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
811static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
812{
813 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
814}
815#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
816#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
817static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
818{
819 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
820}
821#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
822#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
823static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
824{
825 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
826}
827#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
828#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
829static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
830{
831 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
832}
833#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
834#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
835static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
836{
837 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
838}
839
840#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
841#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
842#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
843static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
844{
845 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
846}
847#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
848#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
849static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
850{
851 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
852}
853#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
854#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
855static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
856{
857 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
858}
859#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
860#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
861static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
862{
863 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
864}
865#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
866#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
867static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
868{
869 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
870}
871#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
872#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
873static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
874{
875 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
876}
877#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
878#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
879
880#define REG_A2XX_RB_COLORCONTROL 0x00002202
881#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
882#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
883static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
884{
885 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
886}
887#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
888#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
889#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
890#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
891#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
892#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
893#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
894static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
895{
896 return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
897}
898#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
899#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
900static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
901{
902 return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
903}
904#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
905#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
906static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
907{
908 return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
909}
910#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
911#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
912#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
913static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
914{
915 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
916}
917#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
918#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
919static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
920{
921 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
922}
923#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
924#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
925static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
926{
927 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
928}
929#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
930#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
931static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
932{
933 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
934}
935
936#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
937#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
938#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
939static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
940{
941 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
942}
943#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
944#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
945static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
946{
947 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
948}
949#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
950#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
951static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
952{
953 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
954}
955
956#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
957#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
958#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
959#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
960#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
961static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
962{
963 return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
964}
965#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
966#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
967#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
968#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
969#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
970
971#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
972#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
973#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
974#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
975#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
976#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
977static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
978{
979 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
980}
981#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
982#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
983static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
984{
985 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
986}
987#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
988#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
989static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
990{
991 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
992}
993#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
994#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
995#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
996#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
997#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
998#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
999#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
1000#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
1001#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
1002#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
1003#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
1004#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
1005#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
1006#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
1007#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
1008#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
1009
1010#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
1011#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
1012#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
1013#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
1014#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
1015#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
1016#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
1017#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
1018#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
1019#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
1020#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
1021
1022#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
1023#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
1024#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
1025static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
1026{
1027 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
1028}
1029#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
1030#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
1031static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
1032{
1033 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
1034}
1035#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
1036#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
1037static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
1038{
1039 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
1040}
1041
1042#define REG_A2XX_RB_MODECONTROL 0x00002208
1043#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
1044#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
1045static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
1046{
1047 return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
1048}
1049
1050#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
1051
1052#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
1053
1054#define REG_A2XX_CLEAR_COLOR 0x0000220b
1055#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
1056#define A2XX_CLEAR_COLOR_RED__SHIFT 0
1057static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
1058{
1059 return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
1060}
1061#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
1062#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
1063static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
1064{
1065 return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
1066}
1067#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
1068#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
1069static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
1070{
1071 return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
1072}
1073#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
1074#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
1075static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1076{
1077 return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
1078}
1079
1080#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
1081
1082#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
1083#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
1084#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1085static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1086{
1087 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1088}
1089#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1090#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1091static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1092{
1093 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1094}
1095
1096#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
1097#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1098#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1099static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1100{
1101 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1102}
1103#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1104#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1105static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1106{
1107 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1108}
1109
1110#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
1111#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
1112#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1113static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1114{
1115 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1116}
1117
1118#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
1119#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
1120#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
1121static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
1122{
1123 return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
1124}
1125#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
1126#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
1127static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
1128{
1129 return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
1130}
1131#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
1132#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
1133static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
1134{
1135 return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
1136}
1137#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
1138#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
1139static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
1140{
1141 return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
1142}
1143
1144#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
1145
1146#define REG_A2XX_VGT_ENHANCE 0x00002294
1147
1148#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
1149#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
1150#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
1151static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
1152{
1153 return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
1154}
1155#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
1156#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
1157#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
1158
1159#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
1160
1161#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
1162#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
1163#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
1164static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
1165{
1166 return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
1167}
1168#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
1169#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
1170static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
1171{
1172 return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
1173}
1174#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
1175#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
1176static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
1177{
1178 return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
1179}
1180
1181#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
1182#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
1183#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
1184static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
1185{
1186 return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
1187}
1188
1189#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
1190#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
1191#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
1192static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
1193{
1194 return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
1195}
1196
1197#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
1198#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
1199#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
1200static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
1201{
1202 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
1203}
1204
1205#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
1206#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
1207#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
1208static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
1209{
1210 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
1211}
1212
1213#define REG_A2XX_SQ_VS_CONST 0x00002307
1214#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
1215#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
1216static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
1217{
1218 return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
1219}
1220#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
1221#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
1222static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
1223{
1224 return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
1225}
1226
1227#define REG_A2XX_SQ_PS_CONST 0x00002308
1228#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
1229#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
1230static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
1231{
1232 return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
1233}
1234#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
1235#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
1236static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
1237{
1238 return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
1239}
1240
1241#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
1242
1243#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
1244
1245#define REG_A2XX_PA_SC_AA_MASK 0x00002312
1246
1247#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
1248
1249#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
1250
1251#define REG_A2XX_RB_COPY_CONTROL 0x00002318
1252#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
1253#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
1254static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
1255{
1256 return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
1257}
1258#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
1259#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
1260#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
1261static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
1262{
1263 return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
1264}
1265
1266#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
1267
1268#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
1269#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
1270#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
1271static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
1272{
1273 return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
1274}
1275
1276#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
1277#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
1278#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
1279static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
1280{
1281 return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
1282}
1283#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
1284#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
1285#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
1286static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
1287{
1288 return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
1289}
1290#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
1291#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
1292static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
1293{
1294 return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
1295}
1296#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
1297#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
1298static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
1299{
1300 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
1301}
1302#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
1303#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
1304static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
1305{
1306 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
1307}
1308#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
1309#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
1310#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
1311#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
1312
1313#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
1314#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
1315#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
1316static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
1317{
1318 return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
1319}
1320#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
1321#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
1322static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
1323{
1324 return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
1325}
1326
1327#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
1328
1329#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
1330
1331#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
1332
1333#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
1334
1335#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
1336
1337#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
1338
1339#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
1340
1341#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
1342
1343#define REG_A2XX_SQ_CONSTANT_0 0x00004000
1344
1345#define REG_A2XX_SQ_FETCH_0 0x00004800
1346
1347#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
1348
1349#define REG_A2XX_SQ_CF_LOOP 0x00004908
1350
1351#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
1352
1353#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
1354
1355#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
1356
1357#define REG_A2XX_SQ_TEX_0 0x00000000
1358#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
1359#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
1360static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
1361{
1362 return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
1363}
1364#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
1365#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
1366static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
1367{
1368 return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
1369}
1370#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
1371#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
1372static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
1373{
1374 return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
1375}
1376#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
1377#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
1378static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
1379{
1380 return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
1381}
1382
1383#define REG_A2XX_SQ_TEX_1 0x00000001
1384
1385#define REG_A2XX_SQ_TEX_2 0x00000002
1386#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
1387#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
1388static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
1389{
1390 return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
1391}
1392#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
1393#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
1394static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
1395{
1396 return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
1397}
1398
1399#define REG_A2XX_SQ_TEX_3 0x00000003
1400#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
1401#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
1402static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
1403{
1404 return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
1405}
1406#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
1407#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
1408static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
1409{
1410 return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
1411}
1412#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
1413#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
1414static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
1415{
1416 return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
1417}
1418#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
1419#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
1420static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
1421{
1422 return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
1423}
1424#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
1425#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
1426static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
1427{
1428 return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
1429}
1430#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
1431#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
1432static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
1433{
1434 return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
1435}
1436
1437
1438#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 000000000000..d183516067b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
1#ifndef A3XX_XML
2#define A3XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a3xx_render_mode {
44 RB_RENDERING_PASS = 0,
45 RB_TILING_PASS = 1,
46 RB_RESOLVE_PASS = 2,
47};
48
49enum a3xx_tile_mode {
50 LINEAR = 0,
51 TILE_32X32 = 2,
52};
53
54enum a3xx_threadmode {
55 MULTI = 0,
56 SINGLE = 1,
57};
58
59enum a3xx_instrbuffermode {
60 BUFFER = 1,
61};
62
63enum a3xx_threadsize {
64 TWO_QUADS = 0,
65 FOUR_QUADS = 1,
66};
67
68enum a3xx_state_block_id {
69 HLSQ_BLOCK_ID_TP_TEX = 2,
70 HLSQ_BLOCK_ID_TP_MIPMAP = 3,
71 HLSQ_BLOCK_ID_SP_VS = 4,
72 HLSQ_BLOCK_ID_SP_FS = 6,
73};
74
75enum a3xx_cache_opcode {
76 INVALIDATE = 1,
77};
78
79enum a3xx_vtx_fmt {
80 VFMT_FLOAT_32 = 0,
81 VFMT_FLOAT_32_32 = 1,
82 VFMT_FLOAT_32_32_32 = 2,
83 VFMT_FLOAT_32_32_32_32 = 3,
84 VFMT_FLOAT_16 = 4,
85 VFMT_FLOAT_16_16 = 5,
86 VFMT_FLOAT_16_16_16 = 6,
87 VFMT_FLOAT_16_16_16_16 = 7,
88 VFMT_FIXED_32 = 8,
89 VFMT_FIXED_32_32 = 9,
90 VFMT_FIXED_32_32_32 = 10,
91 VFMT_FIXED_32_32_32_32 = 11,
92 VFMT_SHORT_16 = 16,
93 VFMT_SHORT_16_16 = 17,
94 VFMT_SHORT_16_16_16 = 18,
95 VFMT_SHORT_16_16_16_16 = 19,
96 VFMT_USHORT_16 = 20,
97 VFMT_USHORT_16_16 = 21,
98 VFMT_USHORT_16_16_16 = 22,
99 VFMT_USHORT_16_16_16_16 = 23,
100 VFMT_NORM_SHORT_16 = 24,
101 VFMT_NORM_SHORT_16_16 = 25,
102 VFMT_NORM_SHORT_16_16_16 = 26,
103 VFMT_NORM_SHORT_16_16_16_16 = 27,
104 VFMT_NORM_USHORT_16 = 28,
105 VFMT_NORM_USHORT_16_16 = 29,
106 VFMT_NORM_USHORT_16_16_16 = 30,
107 VFMT_NORM_USHORT_16_16_16_16 = 31,
108 VFMT_UBYTE_8 = 40,
109 VFMT_UBYTE_8_8 = 41,
110 VFMT_UBYTE_8_8_8 = 42,
111 VFMT_UBYTE_8_8_8_8 = 43,
112 VFMT_NORM_UBYTE_8 = 44,
113 VFMT_NORM_UBYTE_8_8 = 45,
114 VFMT_NORM_UBYTE_8_8_8 = 46,
115 VFMT_NORM_UBYTE_8_8_8_8 = 47,
116 VFMT_BYTE_8 = 48,
117 VFMT_BYTE_8_8 = 49,
118 VFMT_BYTE_8_8_8 = 50,
119 VFMT_BYTE_8_8_8_8 = 51,
120 VFMT_NORM_BYTE_8 = 52,
121 VFMT_NORM_BYTE_8_8 = 53,
122 VFMT_NORM_BYTE_8_8_8 = 54,
123 VFMT_NORM_BYTE_8_8_8_8 = 55,
124 VFMT_UINT_10_10_10_2 = 60,
125 VFMT_NORM_UINT_10_10_10_2 = 61,
126 VFMT_INT_10_10_10_2 = 62,
127 VFMT_NORM_INT_10_10_10_2 = 63,
128};
129
130enum a3xx_tex_fmt {
131 TFMT_NORM_USHORT_565 = 4,
132 TFMT_NORM_USHORT_5551 = 6,
133 TFMT_NORM_USHORT_4444 = 7,
134 TFMT_NORM_UINT_X8Z24 = 10,
135 TFMT_NORM_UINT_NV12_UV_TILED = 17,
136 TFMT_NORM_UINT_NV12_Y_TILED = 19,
137 TFMT_NORM_UINT_NV12_UV = 21,
138 TFMT_NORM_UINT_NV12_Y = 23,
139 TFMT_NORM_UINT_I420_Y = 24,
140 TFMT_NORM_UINT_I420_U = 26,
141 TFMT_NORM_UINT_I420_V = 27,
142 TFMT_NORM_UINT_2_10_10_10 = 41,
143 TFMT_NORM_UINT_A8 = 44,
144 TFMT_NORM_UINT_L8_A8 = 47,
145 TFMT_NORM_UINT_8 = 48,
146 TFMT_NORM_UINT_8_8 = 49,
147 TFMT_NORM_UINT_8_8_8 = 50,
148 TFMT_NORM_UINT_8_8_8_8 = 51,
149 TFMT_FLOAT_16 = 64,
150 TFMT_FLOAT_16_16 = 65,
151 TFMT_FLOAT_16_16_16_16 = 67,
152 TFMT_FLOAT_32 = 84,
153 TFMT_FLOAT_32_32 = 85,
154 TFMT_FLOAT_32_32_32_32 = 87,
155};
156
157enum a3xx_tex_fetchsize {
158 TFETCH_DISABLE = 0,
159 TFETCH_1_BYTE = 1,
160 TFETCH_2_BYTE = 2,
161 TFETCH_4_BYTE = 3,
162 TFETCH_8_BYTE = 4,
163 TFETCH_16_BYTE = 5,
164};
165
166enum a3xx_color_fmt {
167 RB_R8G8B8_UNORM = 4,
168 RB_R8G8B8A8_UNORM = 8,
169 RB_Z16_UNORM = 12,
170 RB_A8_UNORM = 20,
171};
172
173enum a3xx_color_swap {
174 WZYX = 0,
175 WXYZ = 1,
176 ZYXW = 2,
177 XYZW = 3,
178};
179
180enum a3xx_msaa_samples {
181 MSAA_ONE = 0,
182 MSAA_TWO = 1,
183 MSAA_FOUR = 2,
184};
185
186enum a3xx_sp_perfcounter_select {
187 SP_FS_CFLOW_INSTRUCTIONS = 12,
188 SP_FS_FULL_ALU_INSTRUCTIONS = 14,
189 SP0_ICL1_MISSES = 26,
190 SP_ALU_ACTIVE_CYCLES = 29,
191};
192
193enum adreno_rb_copy_control_mode {
194 RB_COPY_RESOLVE = 1,
195 RB_COPY_DEPTH_STENCIL = 5,
196};
197
198enum a3xx_tex_filter {
199 A3XX_TEX_NEAREST = 0,
200 A3XX_TEX_LINEAR = 1,
201};
202
203enum a3xx_tex_clamp {
204 A3XX_TEX_REPEAT = 0,
205 A3XX_TEX_CLAMP_TO_EDGE = 1,
206 A3XX_TEX_MIRROR_REPEAT = 2,
207 A3XX_TEX_CLAMP_NONE = 3,
208};
209
210enum a3xx_tex_swiz {
211 A3XX_TEX_X = 0,
212 A3XX_TEX_Y = 1,
213 A3XX_TEX_Z = 2,
214 A3XX_TEX_W = 3,
215 A3XX_TEX_ZERO = 4,
216 A3XX_TEX_ONE = 5,
217};
218
219enum a3xx_tex_type {
220 A3XX_TEX_1D = 0,
221 A3XX_TEX_2D = 1,
222 A3XX_TEX_CUBE = 2,
223 A3XX_TEX_3D = 3,
224};
225
226#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
227#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
228#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
229#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
230#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
231#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
232#define A3XX_INT0_VFD_ERROR 0x00000040
233#define A3XX_INT0_CP_SW_INT 0x00000080
234#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
235#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
236#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
237#define A3XX_INT0_CP_HW_FAULT 0x00000800
238#define A3XX_INT0_CP_DMA 0x00001000
239#define A3XX_INT0_CP_IB2_INT 0x00002000
240#define A3XX_INT0_CP_IB1_INT 0x00004000
241#define A3XX_INT0_CP_RB_INT 0x00008000
242#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
243#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
244#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
245#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
246#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
247#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
248#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
249#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
250#define REG_A3XX_RBBM_HW_VERSION 0x00000000
251
252#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
253
254#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
255
256#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
257
258#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
259
260#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
261
262#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
263
264#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
265
266#define REG_A3XX_RBBM_AHB_CMD 0x00000022
267
268#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
269
270#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
271
272#define REG_A3XX_RBBM_STATUS 0x00000030
273#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
274#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
275#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
276#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
277#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
278#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
279#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
280#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
281#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
282#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
283#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
284#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
285#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
286#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
287#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
288#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
289#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
290#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
291#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
292#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
293#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
294
295#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
296
297#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
298
299#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
300
301#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
302
303#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
304
305#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
306
307#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
308
309#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
310
311#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
312
313#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
314
315#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
316
317#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
318
319#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
320
321#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
322
323#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
324
325#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
326
327#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
328
329#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
330
331#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
332
333#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
334
335#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
336
337#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
338
339#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
340
341#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
342
343#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
344
345#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
346
347#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
348
349#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
350
351#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
352
353#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
354
355#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
356
357#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
358
359#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
360
361#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
362
363#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
364
365#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
366
367#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
368
369#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
370
371#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
372
373#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
374
375#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
376
377#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
378
379#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
380
381#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
382
383#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
384
385#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
386
387#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
388
389#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
390
391#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
392
393#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
394
395#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
396
397#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
398
399#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
400
401#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
402
403#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
404
405#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
406
407#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
408
409#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
410
411#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
412
413#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
414
415#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
416
417#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
418
419#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
420
421#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
422
423#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
424
425#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
426
427#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
428
429#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
430
431#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
432
433#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
434
435#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
436
437#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
438
439#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
440
441#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
442
443#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
444
445#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
446
447#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
448
449#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
450
451#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
452
453#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
454
455#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
456
457#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
458
459#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
460
461#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
462
463#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
464
465#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
466
467#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
468
469#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
470
471#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
472
473#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
474
475#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
476
477#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
478
479#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
480
481#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
482
483#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
484
485#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
486
487#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
488
489#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
490
491#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
492
493#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
494
495#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
496
497#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
498
499#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
500
501#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
502
503#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
504
505#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
506
507#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
508
509#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
510
511#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
512
513#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
514
515#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
516
517#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
518
519#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
520
521#define REG_A3XX_CP_ROQ_DATA 0x000001cd
522
523#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
524
525#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
526
527#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
528
529#define REG_A3XX_CP_MEQ_ADDR 0x000001da
530
531#define REG_A3XX_CP_MEQ_DATA 0x000001db
532
533#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
534
535#define REG_A3XX_CP_HW_FAULT 0x0000045c
536
537#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
538
539#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
540
541static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
542
543static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
544
545#define REG_A3XX_CP_AHB_FAULT 0x0000054d
546
547#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
548#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
549#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
550#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
551#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
552#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
553#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
554
555#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
556#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
557#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
558static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
559{
560 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
561}
562#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
563#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
564static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
565{
566 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
567}
568
569#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
570#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
571#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
572static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
573{
574 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
575}
576
577#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
578#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
579#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
580static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
581{
582 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
583}
584
585#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
586#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
587#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
588static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
589{
590 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
591}
592
593#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
594#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
595#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
596static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
597{
598 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
599}
600
601#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
602#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
603#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
604static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
605{
606 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
607}
608
609#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
610#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
611#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
612static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
613{
614 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
615}
616
617#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
618
619#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
620
621#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
622#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
623#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
624static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
625{
626 return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
627}
628
629#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
630#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
631#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
632static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
633{
634 return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
635}
636
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
643{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647
648#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
649#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
650#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
651static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
652{
653 return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
654}
655#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
656#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
657static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
658{
659 return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
660}
661#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
662#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
663static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
664{
665 return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
666}
667
668#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
669#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
670#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
671#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
672static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
673{
674 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
675}
676#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
677#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
678static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
679{
680 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
681}
682
683#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
684#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
685#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
686#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
687static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
688{
689 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
690}
691#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
692#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
693static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
694{
695 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
696}
697
698#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
699#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
700#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
701#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
702static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
703{
704 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
705}
706#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
707#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
708static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
709{
710 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
711}
712
713#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
714#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
715#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
716#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
717static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
718{
719 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
720}
721#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
722#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
723static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
724{
725 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
726}
727
728#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
729#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
730#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
731#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
732static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
733{
734 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
735}
736#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
737#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
738
739#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
740#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
741#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
742static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
743{
744 return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
745}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
751{
752 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
753}
754
755#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
756#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
757#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
758#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
759static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
760{
761 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
762}
763#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
764#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
765static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
766{
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768}
769
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3
771
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773
774static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
775#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
776#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
777#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
778#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
779#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
780static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
781{
782 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
783}
784#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
785#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
786static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
787{
788 return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
789}
790#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
791#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
792static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
793{
794 return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
795}
796
797static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
798#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
799#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
800static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
801{
802 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
803}
804#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
805#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
806static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
807{
808 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
809}
810#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
811#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
812static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
813{
814 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
815}
816#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
817#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
818static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
819{
820 return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
821}
822
823static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
824#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
825#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
826static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
827{
828 return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
829}
830
831static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
832#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
833#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
834static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
835{
836 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
837}
838#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
839#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
840static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
841{
842 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
843}
844#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
845#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
846static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
847{
848 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
849}
850#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
851#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
852static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
853{
854 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
855}
856#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
857#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
858static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
859{
860 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
861}
862#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
863#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
864static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
865{
866 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
867}
868#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
869
870#define REG_A3XX_RB_BLEND_RED 0x000020e4
871#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
872#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
873static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
874{
875 return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
876}
877#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
878#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
879static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
880{
881 return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
882}
883
884#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
885#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
886#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
887static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
888{
889 return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
890}
891#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
892#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
893static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
894{
895 return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
896}
897
898#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
899#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
900#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
901static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
902{
903 return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
904}
905#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
906#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
907static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
908{
909 return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
910}
911
912#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
913#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
914#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
915static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
916{
917 return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
918}
919#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
920#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
921static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
922{
923 return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
924}
925
926#define REG_A3XX_UNKNOWN_20E8 0x000020e8
927
928#define REG_A3XX_UNKNOWN_20E9 0x000020e9
929
930#define REG_A3XX_UNKNOWN_20EA 0x000020ea
931
932#define REG_A3XX_UNKNOWN_20EB 0x000020eb
933
934#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
935#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
936#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
937static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
938{
939 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
940}
941#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
942#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
943static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
944{
945 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
946}
947#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
948#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
949static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
950{
951 return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
952}
953
954#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
955#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
956#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
957static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
958{
959 return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
960}
961
962#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
963#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
964#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
965static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
966{
967 return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
968}
969
970#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
971#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
972#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
973static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
974{
975 return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
976}
977#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
978#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
979static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
980{
981 return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
982}
983#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
984#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
985static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
986{
987 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
988}
989#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
990#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
991static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
992{
993 return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
994}
995#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
996#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
997static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
998{
999 return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
1000}
1001
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1009{
1010 return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
1011}
1012#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
1013#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
1014
1015#define REG_A3XX_UNKNOWN_2101 0x00002101
1016
1017#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1018#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
1019#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1020static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1021{
1022 return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
1023}
1024#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
1025#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
1026static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
1027{
1028 return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
1029}
1030
1031#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
1032#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
1033#define A3XX_RB_DEPTH_PITCH__SHIFT 0
1034static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1035{
1036 return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
1037}
1038
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
1045{
1046 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
1047}
1048#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
1049#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
1050static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
1051{
1052 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
1053}
1054#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
1055#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
1056static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
1057{
1058 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
1059}
1060#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
1061#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
1062static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
1063{
1064 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
1065}
1066#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
1067#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
1068static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
1069{
1070 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
1071}
1072#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
1073#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
1074static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
1075{
1076 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
1077}
1078#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
1079#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
1080static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
1081{
1082 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
1083}
1084#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
1085#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
1086static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
1087{
1088 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
1089}
1090
1091#define REG_A3XX_UNKNOWN_2105 0x00002105
1092
1093#define REG_A3XX_UNKNOWN_2106 0x00002106
1094
1095#define REG_A3XX_UNKNOWN_2107 0x00002107
1096
1097#define REG_A3XX_RB_STENCILREFMASK 0x00002108
1098#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
1099#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
1100static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
1101{
1102 return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
1103}
1104#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
1105#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
1106static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
1107{
1108 return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
1109}
1110#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
1111#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
1112static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
1113{
1114 return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
1115}
1116
1117#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
1118#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
1119#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
1120static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
1121{
1122 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
1123}
1124#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
1125#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
1126static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
1127{
1128 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
1129}
1130#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
1131#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
1132static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
1133{
1134 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
1135}
1136
1137#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
1138#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
1139#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
1140static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
1141{
1142 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
1143}
1144#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
1145#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
1146static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
1147{
1148 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
1149}
1150
1151#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
1152
1153#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
1154
1155#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
1156#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
1157#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
1158static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
1159{
1160 return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
1161}
1162#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
1163#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
1164static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
1165{
1166 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
1167}
1168#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
1169#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
1170static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
1171{
1172 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1173}
1174#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1175
1176#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
1177
1178#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
1179#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1180#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1181static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1182{
1183 return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1184}
1185#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1186#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1187#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1188#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1189#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
1190#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1191#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1192#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1193#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1194
1195#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
1196#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1197#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1198static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1199{
1200 return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1201}
1202#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1203#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1204
1205#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
1206#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1207#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1208static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1209{
1210 return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1211}
1212
1213#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
1214
1215#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
1216#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1217#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1218static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1219{
1220 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1221}
1222#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1223#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1224static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1225{
1226 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1227}
1228#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1229#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1230static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1231{
1232 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1233}
1234
1235#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
1236#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1237#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1238static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1239{
1240 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1241}
1242#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1243#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1244static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1245{
1246 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1247}
1248#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1249#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1250static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1251{
1252 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1253}
1254
1255#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
1256#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1257#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1258static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1259{
1260 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
1261}
1262#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1263#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1264static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1265{
1266 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
1267}
1268
1269#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
1270#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1271#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1272static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1273{
1274 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
1275}
1276#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1277#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1278static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1279{
1280 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
1281}
1282
1283#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
1284
1285#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
1286
1287#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
1288
1289#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
1290
1291#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
1292
1293#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
1294
1295#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
1296
1297#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
1298
1299#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
1300
1301#define REG_A3XX_VFD_CONTROL_0 0x00002240
1302#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
1303#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1304static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1305{
1306 return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1307}
1308#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
1309#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
1310static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
1311{
1312 return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
1313}
1314#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
1315#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
1316static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1317{
1318 return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1319}
1320#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
1321#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
1322static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1323{
1324 return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1325}
1326
1327#define REG_A3XX_VFD_CONTROL_1 0x00002241
1328#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1329#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1330static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1331{
1332 return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1333}
1334#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1335#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1336static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1337{
1338 return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
1339}
1340#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1341#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1342static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1343{
1344 return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
1345}
1346
1347#define REG_A3XX_VFD_INDEX_MIN 0x00002242
1348
1349#define REG_A3XX_VFD_INDEX_MAX 0x00002243
1350
1351#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
1352
1353#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1354
1355static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1356
1357static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1358#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1359#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1360static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1361{
1362 return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1363}
1364#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1365#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1366static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1367{
1368 return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1369}
1370#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
1371#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
1372#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
1373static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
1374{
1375 return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
1376}
1377#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1378#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1379static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1380{
1381 return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1382}
1383
1384static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
1385
1386static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1387
1388static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1389#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1390#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1391static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1392{
1393 return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1394}
1395#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1396#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1397#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1398static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
1399{
1400 return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
1401}
1402#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1403#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1404static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1405{
1406 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1407}
1408#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1409#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1410static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1411{
1412 return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1413}
1414#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1415#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1416
1417#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
1418#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
1419#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
1420static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
1421{
1422 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
1423}
1424#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
1425#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
1426static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
1427{
1428 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
1429}
1430
1431#define REG_A3XX_VPC_ATTR 0x00002280
1432#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
1433#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
1434static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
1435{
1436 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
1437}
1438#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
1439#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1440static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1441{
1442 return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
1443}
1444#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
1445#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
1446static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
1447{
1448 return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
1449}
1450
1451#define REG_A3XX_VPC_PACK 0x00002281
1452#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1453#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1454static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1455{
1456 return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1457}
1458#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1459#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1460static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1461{
1462 return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1463}
1464
1465static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1466
1467static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1468
1469static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1470
1471static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1472
1473#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
1474
1475#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
1476
1477#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
1478#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
1479#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
1480#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
1481static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
1482{
1483 return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
1484}
1485#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
1486#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
1487static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
1488{
1489 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
1490}
1491#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
1492#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
1493static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
1494{
1495 return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
1496}
1497
1498#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
1499#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
1500#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
1501static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1502{
1503 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
1504}
1505#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1506#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1507static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1508{
1509 return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1510}
1511#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
1512#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1513#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1514static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1515{
1516 return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1517}
1518#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1519#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1520static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1521{
1522 return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1523}
1524#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1525#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1526static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1527{
1528 return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1529}
1530#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1531#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
1532static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1533{
1534 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
1535}
1536#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1537#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
1538#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
1539#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
1540static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
1541{
1542 return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
1543}
1544
1545#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
1546#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1547#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1548static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1549{
1550 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
1551}
1552#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1553#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1554static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1555{
1556 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1557}
1558#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
1559#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
1560static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1561{
1562 return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1563}
1564
1565#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
1566#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
1567#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
1568static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
1569{
1570 return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
1571}
1572#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
1573#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
1574static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
1575{
1576 return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
1577}
1578#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
1579#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
1580static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
1581{
1582 return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1583}
1584
1585static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1586
1587static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1588#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1589#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1590static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1591{
1592 return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
1593}
1594#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1595#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1596static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1597{
1598 return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1599}
1600#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1601#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1602static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1603{
1604 return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
1605}
1606#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1607#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1608static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1609{
1610 return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1611}
1612
1613static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1614
1615static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1616#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1617#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1618static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1619{
1620 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1621}
1622#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1623#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1624static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1625{
1626 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1627}
1628#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1629#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1630static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1631{
1632 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1633}
1634#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1635#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1636static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1637{
1638 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1639}
1640
1641#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
1642#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1643#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1644static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1645{
1646 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1647}
1648#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1649#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1650static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1651{
1652 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1653}
1654
1655#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
1656
1657#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
1658
1659#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
1660
1661#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
1662
1663#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
1664#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1665#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1666static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1667{
1668 return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
1669}
1670
1671#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
1672#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1673#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1674static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1675{
1676 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1677}
1678#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1679#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1680static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1681{
1682 return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1683}
1684#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1685#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1686#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1687static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1688{
1689 return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1690}
1691#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1692#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1693static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1694{
1695 return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1696}
1697#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1698#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1699static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1700{
1701 return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1702}
1703#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1704#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1705static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1706{
1707 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1708}
1709#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1710#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1711#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
1712#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
1713static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
1714{
1715 return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
1716}
1717
1718#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
1719#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1720#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1721static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1722{
1723 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1724}
1725#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1726#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1727static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1728{
1729 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1730}
1731#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
1732#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
1733static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1734{
1735 return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1736}
1737#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
1738#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
1739static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
1740{
1741 return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
1742}
1743
1744#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
1745#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1746#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1747static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1748{
1749 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1750}
1751#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1752#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1753static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1754{
1755 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1756}
1757
1758#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
1759
1760#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
1761
1762#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
1763
1764#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
1765
1766#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
1767
1768#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
1769
1770#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
1771
1772static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1773
1774static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1775#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1776#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
1777static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1778{
1779 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1780}
1781#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1782
1783static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1784
1785static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1786#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
1787#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
1788static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
1789{
1790 return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
1791}
1792
1793#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
1794#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1795#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1796static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1797{
1798 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1799}
1800
1801#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1802#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1803#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1804static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1805{
1806 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1807}
1808#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1809#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1810static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1811{
1812 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1813}
1814#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1815#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1816static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1817{
1818 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
1819}
1820
1821#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
1822
1823#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
1824#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1825#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1826static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1827{
1828 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1829}
1830#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1831#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1832static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1833{
1834 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1835}
1836#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1837#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1838static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1839{
1840 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
1841}
1842
1843#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
1844
1845#define REG_A3XX_VBIF_CLKON 0x00003001
1846
1847#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
1848
1849#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
1850
1851#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
1852
1853#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
1854
1855#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
1856
1857#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1858
1859#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1860
1861#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1862
1863#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1864
1865#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1866
1867#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
1868
1869#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
1870
1871#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
1872
1873#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
1874
1875#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1876
1877#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
1878
1879#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
1880
1881#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
1882
1883#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
1884#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1885#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1886static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1887{
1888 return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
1889}
1890#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1891#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1892static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1893{
1894 return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
1895}
1896
1897#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
1898
1899static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1900
1901static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1902#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
1903#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
1904static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
1905{
1906 return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
1907}
1908#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
1909#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
1910static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
1911{
1912 return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
1913}
1914#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
1915#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
1916static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
1917{
1918 return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
1919}
1920#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
1921#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
1922static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
1923{
1924 return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
1925}
1926
1927static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
1928
1929static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
1930
1931#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
1932
1933#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
1934
1935#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
1936
1937#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
1938
1939#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
1940
1941#define REG_A3XX_UNKNOWN_0C81 0x00000c81
1942
1943#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
1944
1945#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
1946
1947#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
1948
1949#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
1950
1951static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1952
1953static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1954
1955static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
1956
1957static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
1958
1959static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
1960
1961#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
1962
1963#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
1964
1965#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
1966
1967#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
1968#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
1969#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
1970static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
1971{
1972 return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
1973}
1974#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
1975#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
1976static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
1977{
1978 return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
1979}
1980
1981#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
1982
1983#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
1984
1985#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
1986
1987#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
1988
1989#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
1990
1991#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
1992
1993#define REG_A3XX_UNKNOWN_0E43 0x00000e43
1994
1995#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
1996
1997#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
1998
1999#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
2000
2001#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
2002
2003#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
2004
2005#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
2006
2007#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
2008
2009#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
2010
2011#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
2012
2013#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
2014
2015#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
2016
2017#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
2018
2019#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
2020
2021#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
2022#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
2023#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
2024static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
2025{
2026 return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
2027}
2028
2029#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
2030#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
2031#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
2032static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
2033{
2034 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
2035}
2036#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
2037#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
2038static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
2039{
2040 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
2041}
2042#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
2043
2044#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
2045
2046#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
2047
2048#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
2049
2050#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
2051
2052#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
2053
2054#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
2055
2056#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
2057
2058#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
2059
2060#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
2061
2062#define REG_A3XX_UNKNOWN_0F03 0x00000f03
2063
2064#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
2065
2066#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
2067
2068#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
2069
2070#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
2071
2072#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
2073
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075
2076#define REG_A3XX_TEX_SAMP_0 0x00000000
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
2080{
2081 return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
2082}
2083#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
2084#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
2085static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
2086{
2087 return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
2088}
2089#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
2090#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
2091static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
2092{
2093 return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
2094}
2095#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
2096#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
2097static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
2098{
2099 return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
2100}
2101#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
2102#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
2103static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
2104{
2105 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
2106}
2107#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2108
2109#define REG_A3XX_TEX_SAMP_1 0x00000001
2110
2111#define REG_A3XX_TEX_CONST_0 0x00000000
2112#define A3XX_TEX_CONST_0_TILED 0x00000001
2113#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2114#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2115static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
2116{
2117 return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
2118}
2119#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2120#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2121static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
2122{
2123 return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
2124}
2125#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2126#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2127static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
2128{
2129 return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
2130}
2131#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2132#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2133static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
2140{
2141 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
2142}
2143#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
2144#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
2145static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
2146{
2147 return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
2148}
2149
2150#define REG_A3XX_TEX_CONST_1 0x00000001
2151#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
2152#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
2153static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
2154{
2155 return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
2156}
2157#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
2158#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
2159static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
2160{
2161 return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
2162}
2163#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
2164#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
2165static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
2166{
2167 return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
2168}
2169
2170#define REG_A3XX_TEX_CONST_2 0x00000002
2171#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
2172#define A3XX_TEX_CONST_2_INDX__SHIFT 0
2173static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
2174{
2175 return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
2176}
2177#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
2178#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
2179static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
2180{
2181 return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
2182}
2183#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2184#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
2185static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2186{
2187 return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
2188}
2189
2190#define REG_A3XX_TEX_CONST_3 0x00000003
2191
2192
2193#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 000000000000..035bd13dc8bd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "a3xx_gpu.h"
19
20#define A3XX_INT0_MASK \
21 (A3XX_INT0_RBBM_AHB_ERROR | \
22 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
23 A3XX_INT0_CP_T0_PACKET_IN_IB | \
24 A3XX_INT0_CP_OPCODE_ERROR | \
25 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
26 A3XX_INT0_CP_HW_FAULT | \
27 A3XX_INT0_CP_IB1_INT | \
28 A3XX_INT0_CP_IB2_INT | \
29 A3XX_INT0_CP_RB_INT | \
30 A3XX_INT0_CP_REG_PROTECT_FAULT | \
31 A3XX_INT0_CP_AHB_ERROR_HALT | \
32 A3XX_INT0_UCHE_OOB_ACCESS)
33
34static struct platform_device *a3xx_pdev;
35
36static void a3xx_me_init(struct msm_gpu *gpu)
37{
38 struct msm_ringbuffer *ring = gpu->rb;
39
40 OUT_PKT3(ring, CP_ME_INIT, 17);
41 OUT_RING(ring, 0x000003f7);
42 OUT_RING(ring, 0x00000000);
43 OUT_RING(ring, 0x00000000);
44 OUT_RING(ring, 0x00000000);
45 OUT_RING(ring, 0x00000080);
46 OUT_RING(ring, 0x00000100);
47 OUT_RING(ring, 0x00000180);
48 OUT_RING(ring, 0x00006600);
49 OUT_RING(ring, 0x00000150);
50 OUT_RING(ring, 0x0000014e);
51 OUT_RING(ring, 0x00000154);
52 OUT_RING(ring, 0x00000001);
53 OUT_RING(ring, 0x00000000);
54 OUT_RING(ring, 0x00000000);
55 OUT_RING(ring, 0x00000000);
56 OUT_RING(ring, 0x00000000);
57 OUT_RING(ring, 0x00000000);
58
59 gpu->funcs->flush(gpu);
60 gpu->funcs->idle(gpu);
61}
62
63static int a3xx_hw_init(struct msm_gpu *gpu)
64{
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
66 uint32_t *ptr, len;
67 int i, ret;
68
69 DBG("%s", gpu->name);
70
71 if (adreno_is_a305(adreno_gpu)) {
72 /* Set up 16 deep read/write request queues: */
73 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
74 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
75 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
76 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
77 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
78 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
79 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
80 /* Enable WR-REQ: */
81 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
82 /* Set up round robin arbitration between both AXI ports: */
83 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
84 /* Set up AOOO: */
85 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
86 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
87
88 } else if (adreno_is_a320(adreno_gpu)) {
89 /* Set up 16 deep read/write request queues: */
90 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
91 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
92 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
93 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
94 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
95 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
96 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
97 /* Enable WR-REQ: */
98 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
99 /* Set up round robin arbitration between both AXI ports: */
100 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
101 /* Set up AOOO: */
102 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
103 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
104 /* Enable 1K sort: */
105 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
106 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
107
108 } else if (adreno_is_a330(adreno_gpu)) {
109 /* Set up 16 deep read/write request queues: */
110 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
111 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
112 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
113 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
114 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
115 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
116 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
117 /* Enable WR-REQ: */
118 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
119 /* Set up round robin arbitration between both AXI ports: */
120 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
121 /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
122 gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
123 /* Set up AOOO: */
124 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
125 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
126 /* Enable 1K sort: */
127 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
128 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
129 /* Disable VBIF clock gating. This is to enable AXI running
130 * higher frequency than GPU:
131 */
132 gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
133
134 } else {
135 BUG();
136 }
137
138 /* Make all blocks contribute to the GPU BUSY perf counter: */
139 gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
140
141 /* Tune the hystersis counters for SP and CP idle detection: */
142 gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
143 gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
144
145 /* Enable the RBBM error reporting bits. This lets us get
146 * useful information on failure:
147 */
148 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
149
150 /* Enable AHB error reporting: */
151 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
152
153 /* Turn on the power counters: */
154 gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
155
156 /* Turn on hang detection - this spews a lot of useful information
157 * into the RBBM registers on a hang:
158 */
159 gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
160
161 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
162 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
163
164 /* Enable Clock gating: */
165 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
166
167 /* Set the OCMEM base address for A330 */
168//TODO:
169// if (adreno_is_a330(adreno_gpu)) {
170// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
171// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
172// }
173
174 /* Turn on performance counters: */
175 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
176
177 /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
178 * we will use this to augment our hang detection:
179 */
180 gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
181 SP_FS_FULL_ALU_INSTRUCTIONS);
182
183 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
184
185 ret = adreno_hw_init(gpu);
186 if (ret)
187 return ret;
188
189 /* setup access protection: */
190 gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
191
192 /* RBBM registers */
193 gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
194 gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
195 gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
196 gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
197 gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
198 gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
199
200 /* CP registers */
201 gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
202 gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
203 gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
204 gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
205 gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
206
207 /* RB registers */
208 gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
209
210 /* VBIF registers */
211 gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
212
213 /* NOTE: PM4/micro-engine firmware registers look to be the same
214 * for a2xx and a3xx.. we could possibly push that part down to
215 * adreno_gpu base class. Or push both PM4 and PFP but
216 * parameterize the pfp ucode addr/data registers..
217 */
218
219 /* Load PM4: */
220 ptr = (uint32_t *)(adreno_gpu->pm4->data);
221 len = adreno_gpu->pm4->size / 4;
222 DBG("loading PM4 ucode version: %u", ptr[0]);
223
224 gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
226 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
227 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
228 for (i = 1; i < len; i++)
229 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
230
231 /* Load PFP: */
232 ptr = (uint32_t *)(adreno_gpu->pfp->data);
233 len = adreno_gpu->pfp->size / 4;
234 DBG("loading PFP ucode version: %u", ptr[0]);
235
236 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
237 for (i = 1; i < len; i++)
238 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
239
240 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
241 if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
242 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
243 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
244 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
245 AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
246
247
248 /* clear ME_HALT to start micro engine */
249 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
250
251 a3xx_me_init(gpu);
252
253 return 0;
254}
255
256static void a3xx_destroy(struct msm_gpu *gpu)
257{
258 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
259 struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
260
261 DBG("%s", gpu->name);
262
263 adreno_gpu_cleanup(adreno_gpu);
264 put_device(&a3xx_gpu->pdev->dev);
265 kfree(a3xx_gpu);
266}
267
268static void a3xx_idle(struct msm_gpu *gpu)
269{
270 unsigned long t;
271
272 /* wait for ringbuffer to drain: */
273 adreno_idle(gpu);
274
275 t = jiffies + ADRENO_IDLE_TIMEOUT;
276
277 /* then wait for GPU to finish: */
278 do {
279 uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
280 if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
281 return;
282 } while(time_before(jiffies, t));
283
284 DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
285
286 /* TODO maybe we need to reset GPU here to recover from hang? */
287}
288
289static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
290{
291 uint32_t status;
292
293 status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
294 DBG("%s: %08x", gpu->name, status);
295
296 // TODO
297
298 gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
299
300 msm_gpu_retire(gpu);
301
302 return IRQ_HANDLED;
303}
304
305#ifdef CONFIG_DEBUG_FS
306static const unsigned int a3xx_registers[] = {
307 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
308 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
309 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
310 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
311 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
312 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
313 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
314 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
315 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
316 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
317 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
318 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
319 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
320 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
321 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
322 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
323 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
324 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
325 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
326 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
327 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
328 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
329 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
330 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
331 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
332 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
333 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
334 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
335 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
336 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
337 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
338 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
339 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
340 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
341 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
342 0x303c, 0x303c, 0x305e, 0x305f,
343};
344
345static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
346{
347 int i;
348
349 adreno_show(gpu, m);
350 seq_printf(m, "status: %08x\n",
351 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
352
353 /* dump these out in a form that can be parsed by demsm: */
354 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
355 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
356 uint32_t start = a3xx_registers[i];
357 uint32_t end = a3xx_registers[i+1];
358 uint32_t addr;
359
360 for (addr = start; addr <= end; addr++) {
361 uint32_t val = gpu_read(gpu, addr);
362 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
363 }
364 }
365}
366#endif
367
368static const struct adreno_gpu_funcs funcs = {
369 .base = {
370 .get_param = adreno_get_param,
371 .hw_init = a3xx_hw_init,
372 .pm_suspend = msm_gpu_pm_suspend,
373 .pm_resume = msm_gpu_pm_resume,
374 .recover = adreno_recover,
375 .last_fence = adreno_last_fence,
376 .submit = adreno_submit,
377 .flush = adreno_flush,
378 .idle = a3xx_idle,
379 .irq = a3xx_irq,
380 .destroy = a3xx_destroy,
381#ifdef CONFIG_DEBUG_FS
382 .show = a3xx_show,
383#endif
384 },
385};
386
387struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
388{
389 struct a3xx_gpu *a3xx_gpu = NULL;
390 struct msm_gpu *gpu;
391 struct platform_device *pdev = a3xx_pdev;
392 struct adreno_platform_config *config;
393 int ret;
394
395 if (!pdev) {
396 dev_err(dev->dev, "no a3xx device\n");
397 ret = -ENXIO;
398 goto fail;
399 }
400
401 config = pdev->dev.platform_data;
402
403 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
404 if (!a3xx_gpu) {
405 ret = -ENOMEM;
406 goto fail;
407 }
408
409 gpu = &a3xx_gpu->base.base;
410
411 get_device(&pdev->dev);
412 a3xx_gpu->pdev = pdev;
413
414 gpu->fast_rate = config->fast_rate;
415 gpu->slow_rate = config->slow_rate;
416 gpu->bus_freq = config->bus_freq;
417
418 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
419 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
420
421 ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
422 &funcs, config->rev);
423 if (ret)
424 goto fail;
425
426 return &a3xx_gpu->base.base;
427
428fail:
429 if (a3xx_gpu)
430 a3xx_destroy(&a3xx_gpu->base.base);
431
432 return ERR_PTR(ret);
433}
434
435/*
436 * The a3xx device:
437 */
438
439static int a3xx_probe(struct platform_device *pdev)
440{
441 static struct adreno_platform_config config = {};
442#ifdef CONFIG_OF
443 /* TODO */
444#else
445 uint32_t version = socinfo_get_version();
446 if (cpu_is_apq8064ab()) {
447 config.fast_rate = 450000000;
448 config.slow_rate = 27000000;
449 config.bus_freq = 4;
450 config.rev = ADRENO_REV(3, 2, 1, 0);
451 } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
452 config.fast_rate = 400000000;
453 config.slow_rate = 27000000;
454 config.bus_freq = 4;
455
456 if (SOCINFO_VERSION_MAJOR(version) == 2)
457 config.rev = ADRENO_REV(3, 2, 0, 2);
458 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
459 (SOCINFO_VERSION_MINOR(version) == 1))
460 config.rev = ADRENO_REV(3, 2, 0, 1);
461 else
462 config.rev = ADRENO_REV(3, 2, 0, 0);
463
464 } else if (cpu_is_msm8930()) {
465 config.fast_rate = 400000000;
466 config.slow_rate = 27000000;
467 config.bus_freq = 3;
468
469 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
470 (SOCINFO_VERSION_MINOR(version) == 2))
471 config.rev = ADRENO_REV(3, 0, 5, 2);
472 else
473 config.rev = ADRENO_REV(3, 0, 5, 0);
474
475 }
476#endif
477 pdev->dev.platform_data = &config;
478 a3xx_pdev = pdev;
479 return 0;
480}
481
482static int a3xx_remove(struct platform_device *pdev)
483{
484 a3xx_pdev = NULL;
485 return 0;
486}
487
488static struct platform_driver a3xx_driver = {
489 .probe = a3xx_probe,
490 .remove = a3xx_remove,
491 .driver.name = "kgsl-3d0",
492};
493
494void __init a3xx_register(void)
495{
496 platform_driver_register(&a3xx_driver);
497}
498
499void __exit a3xx_unregister(void)
500{
501 platform_driver_unregister(&a3xx_driver);
502}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 000000000000..32c398c2d00a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __A3XX_GPU_H__
19#define __A3XX_GPU_H__
20
21#include "adreno_gpu.h"
22#include "a3xx.xml.h"
23
24struct a3xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27};
28#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
29
30#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 000000000000..61979d458ac0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
1#ifndef ADRENO_COMMON_XML
2#define ADRENO_COMMON_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum adreno_pa_su_sc_draw {
44 PC_DRAW_POINTS = 0,
45 PC_DRAW_LINES = 1,
46 PC_DRAW_TRIANGLES = 2,
47};
48
49enum adreno_compare_func {
50 FUNC_NEVER = 0,
51 FUNC_LESS = 1,
52 FUNC_EQUAL = 2,
53 FUNC_LEQUAL = 3,
54 FUNC_GREATER = 4,
55 FUNC_NOTEQUAL = 5,
56 FUNC_GEQUAL = 6,
57 FUNC_ALWAYS = 7,
58};
59
60enum adreno_stencil_op {
61 STENCIL_KEEP = 0,
62 STENCIL_ZERO = 1,
63 STENCIL_REPLACE = 2,
64 STENCIL_INCR_CLAMP = 3,
65 STENCIL_DECR_CLAMP = 4,
66 STENCIL_INVERT = 5,
67 STENCIL_INCR_WRAP = 6,
68 STENCIL_DECR_WRAP = 7,
69};
70
71enum adreno_rb_blend_factor {
72 FACTOR_ZERO = 0,
73 FACTOR_ONE = 1,
74 FACTOR_SRC_COLOR = 4,
75 FACTOR_ONE_MINUS_SRC_COLOR = 5,
76 FACTOR_SRC_ALPHA = 6,
77 FACTOR_ONE_MINUS_SRC_ALPHA = 7,
78 FACTOR_DST_COLOR = 8,
79 FACTOR_ONE_MINUS_DST_COLOR = 9,
80 FACTOR_DST_ALPHA = 10,
81 FACTOR_ONE_MINUS_DST_ALPHA = 11,
82 FACTOR_CONSTANT_COLOR = 12,
83 FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
84 FACTOR_CONSTANT_ALPHA = 14,
85 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
86 FACTOR_SRC_ALPHA_SATURATE = 16,
87};
88
89enum adreno_rb_blend_opcode {
90 BLEND_DST_PLUS_SRC = 0,
91 BLEND_SRC_MINUS_DST = 1,
92 BLEND_MIN_DST_SRC = 2,
93 BLEND_MAX_DST_SRC = 3,
94 BLEND_DST_MINUS_SRC = 4,
95 BLEND_DST_PLUS_SRC_BIAS = 5,
96};
97
98enum adreno_rb_surface_endian {
99 ENDIAN_NONE = 0,
100 ENDIAN_8IN16 = 1,
101 ENDIAN_8IN32 = 2,
102 ENDIAN_16IN32 = 3,
103 ENDIAN_8IN64 = 4,
104 ENDIAN_8IN128 = 5,
105};
106
107enum adreno_rb_dither_mode {
108 DITHER_DISABLE = 0,
109 DITHER_ALWAYS = 1,
110 DITHER_IF_ALPHA_OFF = 2,
111};
112
113enum adreno_rb_depth_format {
114 DEPTHX_16 = 0,
115 DEPTHX_24_8 = 1,
116};
117
118enum adreno_mmu_clnt_beh {
119 BEH_NEVR = 0,
120 BEH_TRAN_RNG = 1,
121 BEH_TRAN_FLT = 2,
122};
123
124#define REG_AXXX_MH_MMU_CONFIG 0x00000040
125#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
126#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
127#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
128#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
129static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
130{
131 return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
132}
133#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
134#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
135static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
136{
137 return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
138}
139#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
140#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
141static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
142{
143 return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
144}
145#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
146#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
147static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
148{
149 return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
150}
151#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
152#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
153static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
154{
155 return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
156}
157#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
158#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
159static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
160{
161 return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
162}
163#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
164#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
165static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
166{
167 return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
168}
169#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
170#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
171static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
172{
173 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
174}
175#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
176#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
177static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
178{
179 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
180}
181#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
182#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
183static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
184{
185 return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
186}
187#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
188#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
189static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
190{
191 return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
192}
193
194#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
195
196#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
197
198#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
199
200#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
201
202#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
203
204#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
205
206#define REG_AXXX_MH_MMU_MPU_END 0x00000047
207
208#define REG_AXXX_CP_RB_BASE 0x000001c0
209
210#define REG_AXXX_CP_RB_CNTL 0x000001c1
211#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
212#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
213static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
214{
215 return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
216}
217#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
218#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
219static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
220{
221 return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
222}
223#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
224#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
225static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
226{
227 return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
228}
229#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
230#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
231#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
232
233#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
234#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
235#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
236static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
237{
238 return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
239}
240#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
241#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
242static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
243{
244 return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
245}
246
247#define REG_AXXX_CP_RB_RPTR 0x000001c4
248
249#define REG_AXXX_CP_RB_WPTR 0x000001c5
250
251#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
252
253#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
254
255#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
256
257#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
258#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
259#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
260static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
261{
262 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
263}
264#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
265#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
266static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
267{
268 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
269}
270#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
271#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
272static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
273{
274 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
275}
276
277#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
278
279#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
280#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
281#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
282static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
283{
284 return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
285}
286#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
287#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
288static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
289{
290 return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
291}
292#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
293#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
294static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
295{
296 return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
297}
298
299#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
300#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
301#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
302static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
303{
304 return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
305}
306
307#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
308#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
309#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
310static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
311{
312 return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
313}
314
315#define REG_AXXX_SCRATCH_UMSK 0x000001dc
316#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
317#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
318static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
319{
320 return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
321}
322#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
323#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
324static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
325{
326 return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
327}
328
329#define REG_AXXX_SCRATCH_ADDR 0x000001dd
330
331#define REG_AXXX_CP_ME_RDADDR 0x000001ea
332
333#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
334
335#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
336
337#define REG_AXXX_CP_INT_CNTL 0x000001f2
338
339#define REG_AXXX_CP_INT_STATUS 0x000001f3
340
341#define REG_AXXX_CP_INT_ACK 0x000001f4
342
343#define REG_AXXX_CP_ME_CNTL 0x000001f6
344
345#define REG_AXXX_CP_ME_STATUS 0x000001f7
346
347#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
348
349#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
350
351#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
352
353#define REG_AXXX_CP_DEBUG 0x000001fc
354#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
355#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
356#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
357#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
358#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
359#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
360#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
361#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
362
363#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
364#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
365#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
366static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
367{
368 return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
369}
370#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
371#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
372static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
373{
374 return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
375}
376
377#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
378#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
379#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
380static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
381{
382 return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
383}
384#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
385#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
386static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
387{
388 return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
389}
390
391#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
392#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
393#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
394static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
395{
396 return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
397}
398#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
399#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
400static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
401{
402 return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
403}
404
405#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
406
407#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
408
409#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
410
411#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
412
413#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
414
415#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
416
417#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
418
419#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
420
421#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
422
423#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
424
425#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
426
427#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
428
429#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
430
431
432#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 000000000000..a60584763b61
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19#include "msm_gem.h"
20
21struct adreno_info {
22 struct adreno_rev rev;
23 uint32_t revn;
24 const char *name;
25 const char *pm4fw, *pfpfw;
26 uint32_t gmem;
27};
28
29#define ANY_ID 0xff
30
31static const struct adreno_info gpulist[] = {
32 {
33 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
34 .revn = 305,
35 .name = "A305",
36 .pm4fw = "a300_pm4.fw",
37 .pfpfw = "a300_pfp.fw",
38 .gmem = SZ_256K,
39 }, {
40 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
41 .revn = 320,
42 .name = "A320",
43 .pm4fw = "a300_pm4.fw",
44 .pfpfw = "a300_pfp.fw",
45 .gmem = SZ_512K,
46 }, {
47 .rev = ADRENO_REV(3, 3, 0, 0),
48 .revn = 330,
49 .name = "A330",
50 .pm4fw = "a330_pm4.fw",
51 .pfpfw = "a330_pfp.fw",
52 .gmem = SZ_1M,
53 },
54};
55
56#define RB_SIZE SZ_32K
57#define RB_BLKSIZE 16
58
59int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
60{
61 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
62
63 switch (param) {
64 case MSM_PARAM_GPU_ID:
65 *value = adreno_gpu->info->revn;
66 return 0;
67 case MSM_PARAM_GMEM_SIZE:
68 *value = adreno_gpu->info->gmem;
69 return 0;
70 default:
71 DBG("%s: invalid param: %u", gpu->name, param);
72 return -EINVAL;
73 }
74}
75
76#define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
78
79int adreno_hw_init(struct msm_gpu *gpu)
80{
81 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
82
83 DBG("%s", gpu->name);
84
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
90
91 /* Setup ringbuffer address: */
92 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
93 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
94
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
97
98 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
99
100 return 0;
101}
102
103static uint32_t get_wptr(struct msm_ringbuffer *ring)
104{
105 return ring->cur - ring->start;
106}
107
108uint32_t adreno_last_fence(struct msm_gpu *gpu)
109{
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 return adreno_gpu->memptrs->fence;
112}
113
114void adreno_recover(struct msm_gpu *gpu)
115{
116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
117 struct drm_device *dev = gpu->dev;
118 int ret;
119
120 gpu->funcs->pm_suspend(gpu);
121
122 /* reset ringbuffer: */
123 gpu->rb->cur = gpu->rb->start;
124
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127
128 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu);
130 if (ret) {
131 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132 /* hmm, oh well? */
133 }
134}
135
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137 struct msm_file_private *ctx)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0;
143
144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) {
146 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147 /* ignore IB-targets */
148 break;
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 /* ignore if there has not been a ctx switch: */
151 if (priv->lastctx == ctx)
152 break;
153 case MSM_SUBMIT_CMD_BUF:
154 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
155 OUT_RING(ring, submit->cmd[i].iova);
156 OUT_RING(ring, submit->cmd[i].size);
157 ibs++;
158 break;
159 }
160 }
161
162 /* on a320, at least, we seem to need to pad things out to an
163 * even number of qwords to avoid issue w/ CP hanging on wrap-
164 * around:
165 */
166 if (ibs % 2)
167 OUT_PKT2(ring);
168
169 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
170 OUT_RING(ring, submit->fence);
171
172 if (adreno_is_a3xx(adreno_gpu)) {
173 /* Flush HLSQ lazy updates to make sure there is nothing
174 * pending for indirect loads after the timestamp has
175 * passed:
176 */
177 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
178 OUT_RING(ring, HLSQ_FLUSH);
179
180 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
181 OUT_RING(ring, 0x00000000);
182 }
183
184 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
185 OUT_RING(ring, CACHE_FLUSH_TS);
186 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
187 OUT_RING(ring, submit->fence);
188
189 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
192
193#if 0
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Dummy set-constant to trigger context rollover */
196 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
197 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
198 OUT_RING(ring, 0x00000000);
199 }
200#endif
201
202 gpu->funcs->flush(gpu);
203
204 return 0;
205}
206
207void adreno_flush(struct msm_gpu *gpu)
208{
209 uint32_t wptr = get_wptr(gpu->rb);
210
211 /* ensure writes to ringbuffer have hit system memory: */
212 mb();
213
214 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
215}
216
217void adreno_idle(struct msm_gpu *gpu)
218{
219 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
220 uint32_t rptr, wptr = get_wptr(gpu->rb);
221 unsigned long t;
222
223 t = jiffies + ADRENO_IDLE_TIMEOUT;
224
225 /* then wait for CP to drain ringbuffer: */
226 do {
227 rptr = adreno_gpu->memptrs->rptr;
228 if (rptr == wptr)
229 return;
230 } while(time_before(jiffies, t));
231
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
233
234 /* TODO maybe we need to reset GPU here to recover from hang? */
235}
236
237#ifdef CONFIG_DEBUG_FS
238void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
239{
240 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
241
242 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
243 adreno_gpu->info->revn, adreno_gpu->rev.core,
244 adreno_gpu->rev.major, adreno_gpu->rev.minor,
245 adreno_gpu->rev.patchid);
246
247 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
248 gpu->submitted_fence);
249 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
250 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
251 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
252}
253#endif
254
255void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords;
259 do {
260 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size;
264 } while(freedwords < ndwords);
265}
266
267static const char *iommu_ports[] = {
268 "gfx3d_user", "gfx3d_priv",
269 "gfx3d1_user", "gfx3d1_priv",
270};
271
272static inline bool _rev_match(uint8_t entry, uint8_t id)
273{
274 return (entry == ANY_ID) || (entry == id);
275}
276
277int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
278 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
279 struct adreno_rev rev)
280{
281 int i, ret;
282
283 /* identify gpu: */
284 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
285 const struct adreno_info *info = &gpulist[i];
286 if (_rev_match(info->rev.core, rev.core) &&
287 _rev_match(info->rev.major, rev.major) &&
288 _rev_match(info->rev.minor, rev.minor) &&
289 _rev_match(info->rev.patchid, rev.patchid)) {
290 gpu->info = info;
291 gpu->revn = info->revn;
292 break;
293 }
294 }
295
296 if (i == ARRAY_SIZE(gpulist)) {
297 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
298 rev.core, rev.major, rev.minor, rev.patchid);
299 return -ENXIO;
300 }
301
302 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
303 rev.core, rev.major, rev.minor, rev.patchid);
304
305 gpu->funcs = funcs;
306 gpu->rev = rev;
307
308 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
309 if (ret) {
310 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
311 gpu->info->pm4fw, ret);
312 return ret;
313 }
314
315 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
316 if (ret) {
317 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
318 gpu->info->pfpfw, ret);
319 return ret;
320 }
321
322 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
323 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
324 RB_SIZE);
325 if (ret)
326 return ret;
327
328 ret = msm_iommu_attach(drm, gpu->base.iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 return ret;
332
333 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
334 MSM_BO_UNCACHED);
335 if (IS_ERR(gpu->memptrs_bo)) {
336 ret = PTR_ERR(gpu->memptrs_bo);
337 gpu->memptrs_bo = NULL;
338 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
339 return ret;
340 }
341
342 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
343 if (!gpu->memptrs) {
344 dev_err(drm->dev, "could not vmap memptrs\n");
345 return -ENOMEM;
346 }
347
348 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
349 &gpu->memptrs_iova);
350 if (ret) {
351 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
352 return ret;
353 }
354
355 return 0;
356}
357
358void adreno_gpu_cleanup(struct adreno_gpu *gpu)
359{
360 if (gpu->memptrs_bo) {
361 if (gpu->memptrs_iova)
362 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
363 drm_gem_object_unreference(gpu->memptrs_bo);
364 }
365 if (gpu->pm4)
366 release_firmware(gpu->pm4);
367 if (gpu->pfp)
368 release_firmware(gpu->pfp);
369 msm_gpu_cleanup(&gpu->base);
370}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 000000000000..f73abfba7c22
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ADRENO_GPU_H__
19#define __ADRENO_GPU_H__
20
21#include <linux/firmware.h>
22
23#include "msm_gpu.h"
24
25#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h"
27
28struct adreno_rev {
29 uint8_t core;
30 uint8_t major;
31 uint8_t minor;
32 uint8_t patchid;
33};
34
35#define ADRENO_REV(core, major, minor, patchid) \
36 ((struct adreno_rev){ core, major, minor, patchid })
37
38struct adreno_gpu_funcs {
39 struct msm_gpu_funcs base;
40};
41
42struct adreno_info;
43
44struct adreno_rbmemptrs {
45 volatile uint32_t rptr;
46 volatile uint32_t wptr;
47 volatile uint32_t fence;
48};
49
50struct adreno_gpu {
51 struct msm_gpu base;
52 struct adreno_rev rev;
53 const struct adreno_info *info;
54 uint32_t revn; /* numeric revision name */
55 const struct adreno_gpu_funcs *funcs;
56
57 /* firmware: */
58 const struct firmware *pm4, *pfp;
59
60 /* ringbuffer rptr/wptr: */
61 // TODO should this be in msm_ringbuffer? I think it would be
62 // different for z180..
63 struct adreno_rbmemptrs *memptrs;
64 struct drm_gem_object *memptrs_bo;
65 uint32_t memptrs_iova;
66};
67#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
68
69/* platform config data (ie. from DT, or pdata) */
70struct adreno_platform_config {
71 struct adreno_rev rev;
72 uint32_t fast_rate, slow_rate, bus_freq;
73};
74
75#define ADRENO_IDLE_TIMEOUT (20 * 1000)
76
77static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
78{
79 return (gpu->revn >= 300) && (gpu->revn < 400);
80}
81
82static inline bool adreno_is_a305(struct adreno_gpu *gpu)
83{
84 return gpu->revn == 305;
85}
86
87static inline bool adreno_is_a320(struct adreno_gpu *gpu)
88{
89 return gpu->revn == 320;
90}
91
92static inline bool adreno_is_a330(struct adreno_gpu *gpu)
93{
94 return gpu->revn == 330;
95}
96
97int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
98int adreno_hw_init(struct msm_gpu *gpu);
99uint32_t adreno_last_fence(struct msm_gpu *gpu);
100void adreno_recover(struct msm_gpu *gpu);
101int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
102 struct msm_file_private *ctx);
103void adreno_flush(struct msm_gpu *gpu);
104void adreno_idle(struct msm_gpu *gpu);
105#ifdef CONFIG_DEBUG_FS
106void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
107#endif
108void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
109
110int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
111 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
112 struct adreno_rev rev);
113void adreno_gpu_cleanup(struct adreno_gpu *gpu);
114
115
116/* ringbuffer helpers (the parts that are adreno specific) */
117
118static inline void
119OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
120{
121 adreno_wait_ring(ring->gpu, cnt+1);
122 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
123}
124
125/* no-op packet: */
126static inline void
127OUT_PKT2(struct msm_ringbuffer *ring)
128{
129 adreno_wait_ring(ring->gpu, 1);
130 OUT_RING(ring, CP_TYPE2_PKT);
131}
132
133static inline void
134OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
135{
136 adreno_wait_ring(ring->gpu, cnt+1);
137 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
138}
139
140
141#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 000000000000..94c13f418e75
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
1#ifndef ADRENO_PM4_XML
2#define ADRENO_PM4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum vgt_event_type {
44 VS_DEALLOC = 0,
45 PS_DEALLOC = 1,
46 VS_DONE_TS = 2,
47 PS_DONE_TS = 3,
48 CACHE_FLUSH_TS = 4,
49 CONTEXT_DONE = 5,
50 CACHE_FLUSH = 6,
51 HLSQ_FLUSH = 7,
52 VIZQUERY_START = 7,
53 VIZQUERY_END = 8,
54 SC_WAIT_WC = 9,
55 RST_PIX_CNT = 13,
56 RST_VTX_CNT = 14,
57 TILE_FLUSH = 15,
58 CACHE_FLUSH_AND_INV_TS_EVENT = 20,
59 ZPASS_DONE = 21,
60 CACHE_FLUSH_AND_INV_EVENT = 22,
61 PERFCOUNTER_START = 23,
62 PERFCOUNTER_STOP = 24,
63 VS_FETCH_DONE = 27,
64 FACENESS_FLUSH = 28,
65};
66
67enum pc_di_primtype {
68 DI_PT_NONE = 0,
69 DI_PT_POINTLIST = 1,
70 DI_PT_LINELIST = 2,
71 DI_PT_LINESTRIP = 3,
72 DI_PT_TRILIST = 4,
73 DI_PT_TRIFAN = 5,
74 DI_PT_TRISTRIP = 6,
75 DI_PT_RECTLIST = 8,
76 DI_PT_QUADLIST = 13,
77 DI_PT_QUADSTRIP = 14,
78 DI_PT_POLYGON = 15,
79 DI_PT_2D_COPY_RECT_LIST_V0 = 16,
80 DI_PT_2D_COPY_RECT_LIST_V1 = 17,
81 DI_PT_2D_COPY_RECT_LIST_V2 = 18,
82 DI_PT_2D_COPY_RECT_LIST_V3 = 19,
83 DI_PT_2D_FILL_RECT_LIST = 20,
84 DI_PT_2D_LINE_STRIP = 21,
85 DI_PT_2D_TRI_STRIP = 22,
86};
87
88enum pc_di_src_sel {
89 DI_SRC_SEL_DMA = 0,
90 DI_SRC_SEL_IMMEDIATE = 1,
91 DI_SRC_SEL_AUTO_INDEX = 2,
92 DI_SRC_SEL_RESERVED = 3,
93};
94
95enum pc_di_index_size {
96 INDEX_SIZE_IGN = 0,
97 INDEX_SIZE_16_BIT = 0,
98 INDEX_SIZE_32_BIT = 1,
99 INDEX_SIZE_8_BIT = 2,
100 INDEX_SIZE_INVALID = 0,
101};
102
103enum pc_di_vis_cull_mode {
104 IGNORE_VISIBILITY = 0,
105};
106
107enum adreno_pm4_packet_type {
108 CP_TYPE0_PKT = 0,
109 CP_TYPE1_PKT = 0x40000000,
110 CP_TYPE2_PKT = 0x80000000,
111 CP_TYPE3_PKT = 0xc0000000,
112};
113
114enum adreno_pm4_type3_packets {
115 CP_ME_INIT = 72,
116 CP_NOP = 16,
117 CP_INDIRECT_BUFFER = 63,
118 CP_INDIRECT_BUFFER_PFD = 55,
119 CP_WAIT_FOR_IDLE = 38,
120 CP_WAIT_REG_MEM = 60,
121 CP_WAIT_REG_EQ = 82,
122 CP_WAT_REG_GTE = 83,
123 CP_WAIT_UNTIL_READ = 92,
124 CP_WAIT_IB_PFD_COMPLETE = 93,
125 CP_REG_RMW = 33,
126 CP_SET_BIN_DATA = 47,
127 CP_REG_TO_MEM = 62,
128 CP_MEM_WRITE = 61,
129 CP_MEM_WRITE_CNTR = 79,
130 CP_COND_EXEC = 68,
131 CP_COND_WRITE = 69,
132 CP_EVENT_WRITE = 70,
133 CP_EVENT_WRITE_SHD = 88,
134 CP_EVENT_WRITE_CFL = 89,
135 CP_EVENT_WRITE_ZPD = 91,
136 CP_RUN_OPENCL = 49,
137 CP_DRAW_INDX = 34,
138 CP_DRAW_INDX_2 = 54,
139 CP_DRAW_INDX_BIN = 52,
140 CP_DRAW_INDX_2_BIN = 53,
141 CP_VIZ_QUERY = 35,
142 CP_SET_STATE = 37,
143 CP_SET_CONSTANT = 45,
144 CP_IM_LOAD = 39,
145 CP_IM_LOAD_IMMEDIATE = 43,
146 CP_LOAD_CONSTANT_CONTEXT = 46,
147 CP_INVALIDATE_STATE = 59,
148 CP_SET_SHADER_BASES = 74,
149 CP_SET_BIN_MASK = 80,
150 CP_SET_BIN_SELECT = 81,
151 CP_CONTEXT_UPDATE = 94,
152 CP_INTERRUPT = 64,
153 CP_IM_STORE = 44,
154 CP_SET_BIN_BASE_OFFSET = 75,
155 CP_SET_DRAW_INIT_FLAGS = 75,
156 CP_SET_PROTECTED_MODE = 95,
157 CP_LOAD_STATE = 48,
158 CP_COND_INDIRECT_BUFFER_PFE = 58,
159 CP_COND_INDIRECT_BUFFER_PFD = 50,
160 CP_INDIRECT_BUFFER_PFE = 63,
161 CP_SET_BIN = 76,
162};
163
164enum adreno_state_block {
165 SB_VERT_TEX = 0,
166 SB_VERT_MIPADDR = 1,
167 SB_FRAG_TEX = 2,
168 SB_FRAG_MIPADDR = 3,
169 SB_VERT_SHADER = 4,
170 SB_FRAG_SHADER = 6,
171};
172
173enum adreno_state_type {
174 ST_SHADER = 0,
175 ST_CONSTANTS = 1,
176};
177
178enum adreno_state_src {
179 SS_DIRECT = 0,
180 SS_INDIRECT = 4,
181};
182
183#define REG_CP_LOAD_STATE_0 0x00000000
184#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
185#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
186static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
187{
188 return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
189}
190#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
191#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
192static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
193{
194 return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
195}
196#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
197#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
198static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
199{
200 return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
201}
202#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
203#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
204static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
205{
206 return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
207}
208
209#define REG_CP_LOAD_STATE_1 0x00000001
210#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
211#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
212static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
213{
214 return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
215}
216#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
217#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
218static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
219{
220 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
221}
222
223#define REG_CP_SET_BIN_0 0x00000000
224
225#define REG_CP_SET_BIN_1 0x00000001
226#define CP_SET_BIN_1_X1__MASK 0x0000ffff
227#define CP_SET_BIN_1_X1__SHIFT 0
228static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
229{
230 return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
231}
232#define CP_SET_BIN_1_Y1__MASK 0xffff0000
233#define CP_SET_BIN_1_Y1__SHIFT 16
234static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
235{
236 return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
237}
238
239#define REG_CP_SET_BIN_2 0x00000002
240#define CP_SET_BIN_2_X2__MASK 0x0000ffff
241#define CP_SET_BIN_2_X2__SHIFT 0
242static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
243{
244 return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
245}
246#define CP_SET_BIN_2_Y2__MASK 0xffff0000
247#define CP_SET_BIN_2_Y2__SHIFT 16
248static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
249{
250 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
251}
252
253
254#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 000000000000..6f8396be431d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
1#ifndef DSI_XML
2#define DSI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum dsi_traffic_mode {
46 NON_BURST_SYNCH_PULSE = 0,
47 NON_BURST_SYNCH_EVENT = 1,
48 BURST_MODE = 2,
49};
50
51enum dsi_dst_format {
52 DST_FORMAT_RGB565 = 0,
53 DST_FORMAT_RGB666 = 1,
54 DST_FORMAT_RGB666_LOOSE = 2,
55 DST_FORMAT_RGB888 = 3,
56};
57
58enum dsi_rgb_swap {
59 SWAP_RGB = 0,
60 SWAP_RBG = 1,
61 SWAP_BGR = 2,
62 SWAP_BRG = 3,
63 SWAP_GRB = 4,
64 SWAP_GBR = 5,
65};
66
67enum dsi_cmd_trigger {
68 TRIGGER_NONE = 0,
69 TRIGGER_TE = 2,
70 TRIGGER_SW = 4,
71 TRIGGER_SW_SEOF = 5,
72 TRIGGER_SW_TE = 6,
73};
74
75#define DSI_IRQ_CMD_DMA_DONE 0x00000001
76#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
77#define DSI_IRQ_CMD_MDP_DONE 0x00000100
78#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
79#define DSI_IRQ_VIDEO_DONE 0x00010000
80#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
81#define DSI_IRQ_ERROR 0x01000000
82#define DSI_IRQ_MASK_ERROR 0x02000000
83#define REG_DSI_CTRL 0x00000000
84#define DSI_CTRL_ENABLE 0x00000001
85#define DSI_CTRL_VID_MODE_EN 0x00000002
86#define DSI_CTRL_CMD_MODE_EN 0x00000004
87#define DSI_CTRL_LANE0 0x00000010
88#define DSI_CTRL_LANE1 0x00000020
89#define DSI_CTRL_LANE2 0x00000040
90#define DSI_CTRL_LANE3 0x00000080
91#define DSI_CTRL_CLK_EN 0x00000100
92#define DSI_CTRL_ECC_CHECK 0x00100000
93#define DSI_CTRL_CRC_CHECK 0x01000000
94
95#define REG_DSI_STATUS0 0x00000004
96#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
97#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
98#define DSI_STATUS0_DSI_BUSY 0x00000010
99
100#define REG_DSI_FIFO_STATUS 0x00000008
101
102#define REG_DSI_VID_CFG0 0x0000000c
103#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
104#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
105static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
106{
107 return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
108}
109#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
110#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
111static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
112{
113 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
114}
115#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
116#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
117static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
118{
119 return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
120}
121#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
122#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
123#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
124#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
125#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
126#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
127
128#define REG_DSI_VID_CFG1 0x0000001c
129#define DSI_VID_CFG1_R_SEL 0x00000010
130#define DSI_VID_CFG1_G_SEL 0x00000100
131#define DSI_VID_CFG1_B_SEL 0x00001000
132#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
133#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
134static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
135{
136 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
137}
138#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
139#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
140static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
141{
142 return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
143}
144
145#define REG_DSI_ACTIVE_H 0x00000020
146#define DSI_ACTIVE_H_START__MASK 0x00000fff
147#define DSI_ACTIVE_H_START__SHIFT 0
148static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
149{
150 return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
151}
152#define DSI_ACTIVE_H_END__MASK 0x0fff0000
153#define DSI_ACTIVE_H_END__SHIFT 16
154static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
155{
156 return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
157}
158
159#define REG_DSI_ACTIVE_V 0x00000024
160#define DSI_ACTIVE_V_START__MASK 0x00000fff
161#define DSI_ACTIVE_V_START__SHIFT 0
162static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
163{
164 return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
165}
166#define DSI_ACTIVE_V_END__MASK 0x0fff0000
167#define DSI_ACTIVE_V_END__SHIFT 16
168static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
169{
170 return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
171}
172
173#define REG_DSI_TOTAL 0x00000028
174#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
175#define DSI_TOTAL_H_TOTAL__SHIFT 0
176static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
177{
178 return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
179}
180#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
181#define DSI_TOTAL_V_TOTAL__SHIFT 16
182static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
183{
184 return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
185}
186
187#define REG_DSI_ACTIVE_HSYNC 0x0000002c
188#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
189#define DSI_ACTIVE_HSYNC_START__SHIFT 0
190static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
191{
192 return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
193}
194#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
195#define DSI_ACTIVE_HSYNC_END__SHIFT 16
196static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
197{
198 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
199}
200
201#define REG_DSI_ACTIVE_VSYNC 0x00000034
202#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
203#define DSI_ACTIVE_VSYNC_START__SHIFT 0
204static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
205{
206 return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
207}
208#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
209#define DSI_ACTIVE_VSYNC_END__SHIFT 16
210static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
211{
212 return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
213}
214
215#define REG_DSI_CMD_DMA_CTRL 0x00000038
216#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
217#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
218
219#define REG_DSI_CMD_CFG0 0x0000003c
220
221#define REG_DSI_CMD_CFG1 0x00000040
222
223#define REG_DSI_DMA_BASE 0x00000044
224
225#define REG_DSI_DMA_LEN 0x00000048
226
227#define REG_DSI_ACK_ERR_STATUS 0x00000064
228
229static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
230
231static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
232
233#define REG_DSI_TRIG_CTRL 0x00000080
234#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
235#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
236static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
237{
238 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
239}
240#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
241#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
242static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
243{
244 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
245}
246#define DSI_TRIG_CTRL_STREAM 0x00000100
247#define DSI_TRIG_CTRL_TE 0x80000000
248
249#define REG_DSI_TRIG_DMA 0x0000008c
250
251#define REG_DSI_DLN0_PHY_ERR 0x000000b0
252
253#define REG_DSI_TIMEOUT_STATUS 0x000000bc
254
255#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
256#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
257#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
258static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
259{
260 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
261}
262#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
263#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
264static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
265{
266 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
267}
268
269#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
270#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
271#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
272
273#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
274
275#define REG_DSI_ERR_INT_MASK0 0x00000108
276
277#define REG_DSI_INTR_CTRL 0x0000010c
278
279#define REG_DSI_RESET 0x00000114
280
281#define REG_DSI_CLK_CTRL 0x00000118
282
283#define REG_DSI_PHY_RESET 0x00000128
284
285#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
286#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
287
288#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
289
290#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
291
292#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
293
294#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
295
296#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
297
298#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
299
300#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
301
302#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
303
304#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
305
306#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
307
308#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
309
310#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
311
312#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
313
314#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
315
316#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
317
318#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
319
320#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
321
322#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
323
324#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
325
326#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
327
328#define REG_DSI_PHY_PLL_STATUS 0x00000280
329#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
330
331#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
332
333#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
334
335#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
336
337#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
338
339#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
340
341#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
342
343#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
344
345#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
346
347#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
348
349#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
350
351#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
352
353#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
354
355#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
356
357#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
358
359#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
360
361#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
362
363#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
364
365#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
366
367#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
368
369#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
370
371#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
372
373#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
374
375#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
376
377#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
378
379#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
380
381#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
382
383#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
384
385#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
386
387#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
388
389#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
390#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
391
392static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
393
394static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
395
396static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
397
398static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
399
400static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
401
402static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
403
404static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
405
406#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
407
408#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
409
410#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
411
412#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
413
414#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
415
416#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
417
418#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
419
420#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
421
422#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
423
424#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
425
426#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
427
428#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
429
430#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
431
432#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
433
434#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
435
436#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
437
438#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
439
440#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
441
442#define REG_DSI_8960_PHY_CTRL_0 0x00000470
443
444#define REG_DSI_8960_PHY_CTRL_1 0x00000474
445
446#define REG_DSI_8960_PHY_CTRL_2 0x00000478
447
448#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
449
450#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
451
452#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
453
454#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
455
456#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
457
458#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
459
460#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
461
462#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
463
464#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
465
466#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
467
468#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
469
470#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
471
472#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
473
474#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
475
476#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
477
478#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
479
480#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
481
482#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
483
484#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
485
486#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
487
488#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
489
490#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
491
492#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
493
494#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
495
496#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
497
498#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
499#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
500
501
502#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 000000000000..aefc1b8feae9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
1#ifndef MMSS_CC_XML
2#define MMSS_CC_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mmss_cc_clk {
46 CLK = 0,
47 PCLK = 1,
48};
49
50#define REG_MMSS_CC_AHB 0x00000008
51
52static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
53{
54 switch (idx) {
55 case CLK: return 0x0000004c;
56 case PCLK: return 0x00000130;
57 default: return INVALID_IDX(idx);
58 }
59}
60static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
61
62static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
63#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
64#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
65#define MMSS_CC_CLK_CC_MND_EN 0x00000020
66#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
67#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
68static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
69{
70 return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
71}
72#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
73#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
74static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
75{
76 return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
77}
78
79static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
80#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
81#define MMSS_CC_CLK_MD_D__SHIFT 0
82static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
83{
84 return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
85}
86#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
87#define MMSS_CC_CLK_MD_M__SHIFT 8
88static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
89{
90 return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
91}
92
93static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
94#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
95#define MMSS_CC_CLK_NS_SRC__SHIFT 0
96static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
97{
98 return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
99}
100#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
101#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
102static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
103{
104 return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
105}
106#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
107#define MMSS_CC_CLK_NS_VAL__SHIFT 24
108static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
109{
110 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
111}
112
113
114#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 000000000000..a225e8170b2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
1#ifndef SFPB_XML
2#define SFPB_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_SFPB_CFG 0x00000058
46
47
48#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 000000000000..12ecfb928f75
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20static struct platform_device *hdmi_pdev;
21
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{
24 uint32_t ctrl = 0;
25
26 if (power_on) {
27 ctrl |= HDMI_CTRL_ENABLE;
28 if (!hdmi->hdmi_mode) {
29 ctrl |= HDMI_CTRL_HDMI;
30 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
31 ctrl &= ~HDMI_CTRL_HDMI;
32 } else {
33 ctrl |= HDMI_CTRL_HDMI;
34 }
35 } else {
36 ctrl = HDMI_CTRL_HDMI;
37 }
38
39 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
40 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
41 power_on ? "Enable" : "Disable", ctrl);
42}
43
44static irqreturn_t hdmi_irq(int irq, void *dev_id)
45{
46 struct hdmi *hdmi = dev_id;
47
48 /* Process HPD: */
49 hdmi_connector_irq(hdmi->connector);
50
51 /* Process DDC: */
52 hdmi_i2c_irq(hdmi->i2c);
53
54 /* TODO audio.. */
55
56 return IRQ_HANDLED;
57}
58
59void hdmi_destroy(struct hdmi *hdmi)
60{
61 struct hdmi_phy *phy = hdmi->phy;
62
63 if (phy)
64 phy->funcs->destroy(phy);
65
66 if (hdmi->i2c)
67 hdmi_i2c_destroy(hdmi->i2c);
68
69 put_device(&hdmi->pdev->dev);
70}
71
72/* initialize connector */
73int hdmi_init(struct hdmi *hdmi, struct drm_device *dev,
74 struct drm_connector *connector)
75{
76 struct platform_device *pdev = hdmi_pdev;
77 struct hdmi_platform_config *config;
78 int ret;
79
80 if (!pdev) {
81 dev_err(dev->dev, "no hdmi device\n");
82 ret = -ENXIO;
83 goto fail;
84 }
85
86 config = pdev->dev.platform_data;
87
88 get_device(&pdev->dev);
89
90 hdmi->dev = dev;
91 hdmi->pdev = pdev;
92 hdmi->connector = connector;
93
94 /* not sure about which phy maps to which msm.. probably I miss some */
95 if (config->phy_init)
96 hdmi->phy = config->phy_init(hdmi);
97 else
98 hdmi->phy = ERR_PTR(-ENXIO);
99
100 if (IS_ERR(hdmi->phy)) {
101 ret = PTR_ERR(hdmi->phy);
102 dev_err(dev->dev, "failed to load phy: %d\n", ret);
103 hdmi->phy = NULL;
104 goto fail;
105 }
106
107 hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
108 if (IS_ERR(hdmi->mmio)) {
109 ret = PTR_ERR(hdmi->mmio);
110 goto fail;
111 }
112
113 hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
114 if (IS_ERR(hdmi->mvs))
115 hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
116 if (IS_ERR(hdmi->mvs)) {
117 ret = PTR_ERR(hdmi->mvs);
118 dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
119 goto fail;
120 }
121
122 hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
123 if (IS_ERR(hdmi->mpp0))
124 hdmi->mpp0 = NULL;
125
126 hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
127 if (IS_ERR(hdmi->clk)) {
128 ret = PTR_ERR(hdmi->clk);
129 dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
130 goto fail;
131 }
132
133 hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
134 if (IS_ERR(hdmi->m_pclk)) {
135 ret = PTR_ERR(hdmi->m_pclk);
136 dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
137 goto fail;
138 }
139
140 hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
141 if (IS_ERR(hdmi->s_pclk)) {
142 ret = PTR_ERR(hdmi->s_pclk);
143 dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
144 goto fail;
145 }
146
147 hdmi->i2c = hdmi_i2c_init(hdmi);
148 if (IS_ERR(hdmi->i2c)) {
149 ret = PTR_ERR(hdmi->i2c);
150 dev_err(dev->dev, "failed to get i2c: %d\n", ret);
151 hdmi->i2c = NULL;
152 goto fail;
153 }
154
155 hdmi->irq = platform_get_irq(pdev, 0);
156 if (hdmi->irq < 0) {
157 ret = hdmi->irq;
158 dev_err(dev->dev, "failed to get irq: %d\n", ret);
159 goto fail;
160 }
161
162 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
163 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
164 "hdmi_isr", hdmi);
165 if (ret < 0) {
166 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
167 hdmi->irq, ret);
168 goto fail;
169 }
170
171 return 0;
172
173fail:
174 if (hdmi)
175 hdmi_destroy(hdmi);
176
177 return ret;
178}
179
180/*
181 * The hdmi device:
182 */
183
184static int hdmi_dev_probe(struct platform_device *pdev)
185{
186 static struct hdmi_platform_config config = {};
187#ifdef CONFIG_OF
188 /* TODO */
189#else
190 if (cpu_is_apq8064()) {
191 config.phy_init = hdmi_phy_8960_init;
192 config.ddc_clk_gpio = 70;
193 config.ddc_data_gpio = 71;
194 config.hpd_gpio = 72;
195 config.pmic_gpio = 13 + NR_GPIO_IRQS;
196 } else if (cpu_is_msm8960()) {
197 config.phy_init = hdmi_phy_8960_init;
198 config.ddc_clk_gpio = 100;
199 config.ddc_data_gpio = 101;
200 config.hpd_gpio = 102;
201 config.pmic_gpio = -1;
202 } else if (cpu_is_msm8x60()) {
203 config.phy_init = hdmi_phy_8x60_init;
204 config.ddc_clk_gpio = 170;
205 config.ddc_data_gpio = 171;
206 config.hpd_gpio = 172;
207 config.pmic_gpio = -1;
208 }
209#endif
210 pdev->dev.platform_data = &config;
211 hdmi_pdev = pdev;
212 return 0;
213}
214
215static int hdmi_dev_remove(struct platform_device *pdev)
216{
217 hdmi_pdev = NULL;
218 return 0;
219}
220
221static struct platform_driver hdmi_driver = {
222 .probe = hdmi_dev_probe,
223 .remove = hdmi_dev_remove,
224 .driver.name = "hdmi_msm",
225};
226
227void __init hdmi_register(void)
228{
229 platform_driver_register(&hdmi_driver);
230}
231
232void __exit hdmi_unregister(void)
233{
234 platform_driver_unregister(&hdmi_driver);
235}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 000000000000..34703fea22ca
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __HDMI_CONNECTOR_H__
19#define __HDMI_CONNECTOR_H__
20
21#include <linux/i2c.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25
26#include "msm_drv.h"
27#include "hdmi.xml.h"
28
29
30struct hdmi_phy;
31
32struct hdmi {
33 struct drm_device *dev;
34 struct platform_device *pdev;
35
36 void __iomem *mmio;
37
38 struct regulator *mvs; /* HDMI_5V */
39 struct regulator *mpp0; /* External 5V */
40
41 struct clk *clk;
42 struct clk *m_pclk;
43 struct clk *s_pclk;
44
45 struct hdmi_phy *phy;
46 struct i2c_adapter *i2c;
47 struct drm_connector *connector;
48
49 bool hdmi_mode; /* are we in hdmi mode? */
50
51 int irq;
52};
53
54/* platform config data (ie. from DT, or pdata) */
55struct hdmi_platform_config {
56 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
57 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
58};
59
60void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
61void hdmi_destroy(struct hdmi *hdmi);
62int hdmi_init(struct hdmi *hdmi, struct drm_device *dev,
63 struct drm_connector *connector);
64
65static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
66{
67 msm_writel(data, hdmi->mmio + reg);
68}
69
70static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
71{
72 return msm_readl(hdmi->mmio + reg);
73}
74
75/*
76 * The phy appears to be different, for example between 8960 and 8x60,
77 * so split the phy related functions out and load the correct one at
78 * runtime:
79 */
80
81struct hdmi_phy_funcs {
82 void (*destroy)(struct hdmi_phy *phy);
83 void (*reset)(struct hdmi_phy *phy);
84 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
85 void (*powerdown)(struct hdmi_phy *phy);
86};
87
88struct hdmi_phy {
89 const struct hdmi_phy_funcs *funcs;
90};
91
92/*
93 * phy can be different on different generations:
94 */
95struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
96struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
97
98/*
99 * hdmi connector:
100 */
101
102void hdmi_connector_irq(struct drm_connector *connector);
103
104/*
105 * i2c adapter for ddc:
106 */
107
108void hdmi_i2c_irq(struct i2c_adapter *i2c);
109void hdmi_i2c_destroy(struct i2c_adapter *i2c);
110struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
111
112#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 000000000000..f5fa4865e059
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
1#ifndef HDMI_XML
2#define HDMI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum hdmi_hdcp_key_state {
46 NO_KEYS = 0,
47 NOT_CHECKED = 1,
48 CHECKING = 2,
49 KEYS_VALID = 3,
50 AKSV_INVALID = 4,
51 CHECKSUM_MISMATCH = 5,
52};
53
54enum hdmi_ddc_read_write {
55 DDC_WRITE = 0,
56 DDC_READ = 1,
57};
58
59enum hdmi_acr_cts {
60 ACR_NONE = 0,
61 ACR_32 = 1,
62 ACR_44 = 2,
63 ACR_48 = 3,
64};
65
66#define REG_HDMI_CTRL 0x00000000
67#define HDMI_CTRL_ENABLE 0x00000001
68#define HDMI_CTRL_HDMI 0x00000002
69#define HDMI_CTRL_ENCRYPTED 0x00000004
70
71#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
72#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
73
74#define REG_HDMI_ACR_PKT_CTRL 0x00000024
75#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
76#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
77#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
78#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
79static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
80{
81 return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
82}
83#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
84#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
85#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
86static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
87{
88 return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
89}
90#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
91
92#define REG_HDMI_VBI_PKT_CTRL 0x00000028
93#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
94#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
95#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
96#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
97#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
98#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
99
100#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
101#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
102#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
103#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
104#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
105#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
106#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
107
108#define REG_HDMI_GEN_PKT_CTRL 0x00000034
109#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
110#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
111#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
112#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
113static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
114{
115 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
116}
117#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
118#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
119#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
120#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
121static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
122{
123 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
124}
125#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
126#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
127static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
128{
129 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
130}
131
132#define REG_HDMI_GC 0x00000040
133#define HDMI_GC_MUTE 0x00000001
134
135#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
136#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
137#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
138
139static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
140
141#define REG_HDMI_GENERIC0_HDR 0x00000084
142
143static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
144
145#define REG_HDMI_GENERIC1_HDR 0x000000a4
146
147static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
148
149static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
150
151static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
152#define HDMI_ACR_0_CTS__MASK 0xfffff000
153#define HDMI_ACR_0_CTS__SHIFT 12
154static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
155{
156 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
157}
158
159static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
160#define HDMI_ACR_1_N__MASK 0xffffffff
161#define HDMI_ACR_1_N__SHIFT 0
162static inline uint32_t HDMI_ACR_1_N(uint32_t val)
163{
164 return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
165}
166
167#define REG_HDMI_AUDIO_INFO0 0x000000e4
168#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
169#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
170static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
171{
172 return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
173}
174#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
175#define HDMI_AUDIO_INFO0_CC__SHIFT 8
176static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
177{
178 return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
179}
180
181#define REG_HDMI_AUDIO_INFO1 0x000000e8
182#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
183#define HDMI_AUDIO_INFO1_CA__SHIFT 0
184static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
185{
186 return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
187}
188#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
189#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
190static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
191{
192 return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
193}
194#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
195
196#define REG_HDMI_HDCP_CTRL 0x00000110
197#define HDMI_HDCP_CTRL_ENABLE 0x00000001
198#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
199
200#define REG_HDMI_HDCP_INT_CTRL 0x00000118
201
202#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
203#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
204#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
205#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
206#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
207static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
208{
209 return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
210}
211
212#define REG_HDMI_HDCP_RESET 0x00000130
213#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
214
215#define REG_HDMI_AUDIO_CFG 0x000001d0
216#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
217#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
218#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
219static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
220{
221 return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
222}
223
224#define REG_HDMI_USEC_REFTIMER 0x00000208
225
226#define REG_HDMI_DDC_CTRL 0x0000020c
227#define HDMI_DDC_CTRL_GO 0x00000001
228#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
229#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
230#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
231#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
232#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
233static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
234{
235 return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
236}
237
238#define REG_HDMI_DDC_INT_CTRL 0x00000214
239#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
240#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
241#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
242
243#define REG_HDMI_DDC_SW_STATUS 0x00000218
244#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
245#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
246#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
247#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
248
249#define REG_HDMI_DDC_HW_STATUS 0x0000021c
250
251#define REG_HDMI_DDC_SPEED 0x00000220
252#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
253#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
254static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
255{
256 return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
257}
258#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
259#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
260static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
261{
262 return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
263}
264
265#define REG_HDMI_DDC_SETUP 0x00000224
266#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
267#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
268static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
269{
270 return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
271}
272
273static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
274
275static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
276#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
277#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
278static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
279{
280 return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
281}
282#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
283#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
284#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
285#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
286#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
287static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
288{
289 return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
290}
291
292#define REG_HDMI_DDC_DATA 0x00000238
293#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
294#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
295static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
296{
297 return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
298}
299#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
300#define HDMI_DDC_DATA_DATA__SHIFT 8
301static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
302{
303 return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
304}
305#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
306#define HDMI_DDC_DATA_INDEX__SHIFT 16
307static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
308{
309 return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
310}
311#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
312
313#define REG_HDMI_HPD_INT_STATUS 0x00000250
314#define HDMI_HPD_INT_STATUS_INT 0x00000001
315#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
316
317#define REG_HDMI_HPD_INT_CTRL 0x00000254
318#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
319#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
320#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
321#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
322#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
323#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
324
325#define REG_HDMI_HPD_CTRL 0x00000258
326#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
327#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
328static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
329{
330 return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
331}
332#define HDMI_HPD_CTRL_ENABLE 0x10000000
333
334#define REG_HDMI_DDC_REF 0x0000027c
335#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
336#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
337#define HDMI_DDC_REF_REFTIMER__SHIFT 0
338static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
339{
340 return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
341}
342
343#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
344#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
345#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
346static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
347{
348 return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
349}
350#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
351#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
352static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
353{
354 return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
355}
356
357#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
358#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
359#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
360static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
361{
362 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
363}
364#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
365#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
366static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
367{
368 return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
369}
370
371#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
372#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
373#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
374static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
375{
376 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
377}
378#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
379#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
380static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
381{
382 return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
383}
384
385#define REG_HDMI_TOTAL 0x000002c0
386#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
387#define HDMI_TOTAL_H_TOTAL__SHIFT 0
388static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
389{
390 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
391}
392#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
393#define HDMI_TOTAL_V_TOTAL__SHIFT 16
394static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
395{
396 return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
397}
398
399#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
400#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
401#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
402static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
403{
404 return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
405}
406
407#define REG_HDMI_FRAME_CTRL 0x000002c8
408#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
409#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
410#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
411#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
412
413#define REG_HDMI_PHY_CTRL 0x000002d4
414#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
415#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
416#define HDMI_PHY_CTRL_SW_RESET 0x00000004
417#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
418
419#define REG_HDMI_AUD_INT 0x000002cc
420#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
421#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
422#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
423#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
424
425#define REG_HDMI_8x60_PHY_REG0 0x00000300
426#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
427#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
428static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
429{
430 return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
431}
432
433#define REG_HDMI_8x60_PHY_REG1 0x00000304
434#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
435#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
436static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
437{
438 return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
439}
440#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
441#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
442static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
443{
444 return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
445}
446
447#define REG_HDMI_8x60_PHY_REG2 0x00000308
448#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
449#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
450#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
451#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
452#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
453#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
454#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
455#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
456
457#define REG_HDMI_8x60_PHY_REG3 0x0000030c
458#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
459
460#define REG_HDMI_8x60_PHY_REG4 0x00000310
461
462#define REG_HDMI_8x60_PHY_REG5 0x00000314
463
464#define REG_HDMI_8x60_PHY_REG6 0x00000318
465
466#define REG_HDMI_8x60_PHY_REG7 0x0000031c
467
468#define REG_HDMI_8x60_PHY_REG8 0x00000320
469
470#define REG_HDMI_8x60_PHY_REG9 0x00000324
471
472#define REG_HDMI_8x60_PHY_REG10 0x00000328
473
474#define REG_HDMI_8x60_PHY_REG11 0x0000032c
475
476#define REG_HDMI_8x60_PHY_REG12 0x00000330
477#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
478#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
479#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
480
481#define REG_HDMI_8960_PHY_REG0 0x00000400
482
483#define REG_HDMI_8960_PHY_REG1 0x00000404
484
485#define REG_HDMI_8960_PHY_REG2 0x00000408
486
487#define REG_HDMI_8960_PHY_REG3 0x0000040c
488
489#define REG_HDMI_8960_PHY_REG4 0x00000410
490
491#define REG_HDMI_8960_PHY_REG5 0x00000414
492
493#define REG_HDMI_8960_PHY_REG6 0x00000418
494
495#define REG_HDMI_8960_PHY_REG7 0x0000041c
496
497#define REG_HDMI_8960_PHY_REG8 0x00000420
498
499#define REG_HDMI_8960_PHY_REG9 0x00000424
500
501#define REG_HDMI_8960_PHY_REG10 0x00000428
502
503#define REG_HDMI_8960_PHY_REG11 0x0000042c
504
505#define REG_HDMI_8960_PHY_REG12 0x00000430
506
507
508#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 000000000000..7d63f5ffa7ba
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/gpio.h>
19
20#include "msm_connector.h"
21#include "hdmi.h"
22
23struct hdmi_connector {
24 struct msm_connector base;
25 struct hdmi hdmi;
26 unsigned long int pixclock;
27 bool enabled;
28};
29#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
30
31static int gpio_config(struct hdmi *hdmi, bool on)
32{
33 struct drm_device *dev = hdmi->dev;
34 struct hdmi_platform_config *config =
35 hdmi->pdev->dev.platform_data;
36 int ret;
37
38 if (on) {
39 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
40 if (ret) {
41 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
42 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
43 goto error1;
44 }
45 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
46 if (ret) {
47 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
48 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
49 goto error2;
50 }
51 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
52 if (ret) {
53 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
54 "HDMI_HPD", config->hpd_gpio, ret);
55 goto error3;
56 }
57 if (config->pmic_gpio != -1) {
58 ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
59 if (ret) {
60 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
61 "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
62 goto error4;
63 }
64 gpio_set_value_cansleep(config->pmic_gpio, 0);
65 }
66 DBG("gpio on");
67 } else {
68 gpio_free(config->ddc_clk_gpio);
69 gpio_free(config->ddc_data_gpio);
70 gpio_free(config->hpd_gpio);
71
72 if (config->pmic_gpio != -1) {
73 gpio_set_value_cansleep(config->pmic_gpio, 1);
74 gpio_free(config->pmic_gpio);
75 }
76 DBG("gpio off");
77 }
78
79 return 0;
80
81error4:
82 gpio_free(config->hpd_gpio);
83error3:
84 gpio_free(config->ddc_data_gpio);
85error2:
86 gpio_free(config->ddc_clk_gpio);
87error1:
88 return ret;
89}
90
91static int hpd_enable(struct hdmi_connector *hdmi_connector)
92{
93 struct hdmi *hdmi = &hdmi_connector->hdmi;
94 struct drm_device *dev = hdmi_connector->base.base.dev;
95 struct hdmi_phy *phy = hdmi->phy;
96 uint32_t hpd_ctrl;
97 int ret;
98
99 ret = gpio_config(hdmi, true);
100 if (ret) {
101 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
102 goto fail;
103 }
104
105 ret = clk_prepare_enable(hdmi->clk);
106 if (ret) {
107 dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
108 goto fail;
109 }
110
111 ret = clk_prepare_enable(hdmi->m_pclk);
112 if (ret) {
113 dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
114 goto fail;
115 }
116
117 ret = clk_prepare_enable(hdmi->s_pclk);
118 if (ret) {
119 dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
120 goto fail;
121 }
122
123 if (hdmi->mpp0)
124 ret = regulator_enable(hdmi->mpp0);
125 if (!ret)
126 ret = regulator_enable(hdmi->mvs);
127 if (ret) {
128 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
129 goto fail;
130 }
131
132 hdmi_set_mode(hdmi, false);
133 phy->funcs->reset(phy);
134 hdmi_set_mode(hdmi, true);
135
136 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
137
138 /* enable HPD events: */
139 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
140 HDMI_HPD_INT_CTRL_INT_CONNECT |
141 HDMI_HPD_INT_CTRL_INT_EN);
142
143 /* set timeout to 4.1ms (max) for hardware debounce */
144 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
145 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
146
147 /* Toggle HPD circuit to trigger HPD sense */
148 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
149 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
150 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
151 HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
152
153 return 0;
154
155fail:
156 return ret;
157}
158
159static int hdp_disable(struct hdmi_connector *hdmi_connector)
160{
161 struct hdmi *hdmi = &hdmi_connector->hdmi;
162 struct drm_device *dev = hdmi_connector->base.base.dev;
163 int ret = 0;
164
165 /* Disable HPD interrupt */
166 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
167
168 hdmi_set_mode(hdmi, false);
169
170 if (hdmi->mpp0)
171 ret = regulator_disable(hdmi->mpp0);
172 if (!ret)
173 ret = regulator_disable(hdmi->mvs);
174 if (ret) {
175 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
176 goto fail;
177 }
178
179 clk_disable_unprepare(hdmi->clk);
180 clk_disable_unprepare(hdmi->m_pclk);
181 clk_disable_unprepare(hdmi->s_pclk);
182
183 ret = gpio_config(hdmi, false);
184 if (ret) {
185 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
186 goto fail;
187 }
188
189 return 0;
190
191fail:
192 return ret;
193}
194
195void hdmi_connector_irq(struct drm_connector *connector)
196{
197 struct msm_connector *msm_connector = to_msm_connector(connector);
198 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
199 struct hdmi *hdmi = &hdmi_connector->hdmi;
200 uint32_t hpd_int_status, hpd_int_ctrl;
201
202 /* Process HPD: */
203 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
204 hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
205
206 if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
207 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
208 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
209
210 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
211
212 /* ack the irq: */
213 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
214 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
215
216 drm_helper_hpd_irq_event(connector->dev);
217
218 /* detect disconnect if we are connected or visa versa: */
219 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
220 if (!detected)
221 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
222 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
223 }
224}
225
226static enum drm_connector_status hdmi_connector_detect(
227 struct drm_connector *connector, bool force)
228{
229 struct msm_connector *msm_connector = to_msm_connector(connector);
230 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
231 struct hdmi *hdmi = &hdmi_connector->hdmi;
232 uint32_t hpd_int_status;
233 int retry = 20;
234
235 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
236
237 /* sense seems to in some cases be momentarily de-asserted, don't
238 * let that trick us into thinking the monitor is gone:
239 */
240 while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
241 mdelay(10);
242 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
243 DBG("status=%08x", hpd_int_status);
244 }
245
246 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
247 connector_status_connected : connector_status_disconnected;
248}
249
250static void hdmi_connector_destroy(struct drm_connector *connector)
251{
252 struct msm_connector *msm_connector = to_msm_connector(connector);
253 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
254
255 hdp_disable(hdmi_connector);
256
257 drm_sysfs_connector_remove(connector);
258 drm_connector_cleanup(connector);
259
260 hdmi_destroy(&hdmi_connector->hdmi);
261
262 kfree(hdmi_connector);
263}
264
265static int hdmi_connector_get_modes(struct drm_connector *connector)
266{
267 struct msm_connector *msm_connector = to_msm_connector(connector);
268 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
269 struct hdmi *hdmi = &hdmi_connector->hdmi;
270 struct edid *edid;
271 uint32_t hdmi_ctrl;
272 int ret = 0;
273
274 hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
275 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
276
277 edid = drm_get_edid(connector, hdmi->i2c);
278
279 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
280
281 drm_mode_connector_update_edid_property(connector, edid);
282
283 if (edid) {
284 ret = drm_add_edid_modes(connector, edid);
285 kfree(edid);
286 }
287
288 return ret;
289}
290
291static int hdmi_connector_mode_valid(struct drm_connector *connector,
292 struct drm_display_mode *mode)
293{
294 struct msm_connector *msm_connector = to_msm_connector(connector);
295 struct msm_drm_private *priv = connector->dev->dev_private;
296 struct msm_kms *kms = priv->kms;
297 long actual, requested;
298
299 requested = 1000 * mode->clock;
300 actual = kms->funcs->round_pixclk(kms,
301 requested, msm_connector->encoder);
302
303 DBG("requested=%ld, actual=%ld", requested, actual);
304
305 if (actual != requested)
306 return MODE_CLOCK_RANGE;
307
308 return 0;
309}
310
311static const struct drm_connector_funcs hdmi_connector_funcs = {
312 .dpms = drm_helper_connector_dpms,
313 .detect = hdmi_connector_detect,
314 .fill_modes = drm_helper_probe_single_connector_modes,
315 .destroy = hdmi_connector_destroy,
316};
317
318static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
319 .get_modes = hdmi_connector_get_modes,
320 .mode_valid = hdmi_connector_mode_valid,
321 .best_encoder = msm_connector_attached_encoder,
322};
323
324static void hdmi_connector_dpms(struct msm_connector *msm_connector, int mode)
325{
326 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
327 struct hdmi *hdmi = &hdmi_connector->hdmi;
328 struct hdmi_phy *phy = hdmi->phy;
329 bool enabled = (mode == DRM_MODE_DPMS_ON);
330
331 DBG("mode=%d", mode);
332
333 if (enabled == hdmi_connector->enabled)
334 return;
335
336 if (enabled) {
337 phy->funcs->powerup(phy, hdmi_connector->pixclock);
338 hdmi_set_mode(hdmi, true);
339 } else {
340 hdmi_set_mode(hdmi, false);
341 phy->funcs->powerdown(phy);
342 }
343
344 hdmi_connector->enabled = enabled;
345}
346
347static void hdmi_connector_mode_set(struct msm_connector *msm_connector,
348 struct drm_display_mode *mode)
349{
350 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
351 struct hdmi *hdmi = &hdmi_connector->hdmi;
352 int hstart, hend, vstart, vend;
353 uint32_t frame_ctrl;
354
355 hdmi_connector->pixclock = mode->clock * 1000;
356
357 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
358
359 hstart = mode->htotal - mode->hsync_start;
360 hend = mode->htotal - mode->hsync_start + mode->hdisplay;
361
362 vstart = mode->vtotal - mode->vsync_start - 1;
363 vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
364
365 DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
366 mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
367
368 hdmi_write(hdmi, REG_HDMI_TOTAL,
369 HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
370 HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
371
372 hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
373 HDMI_ACTIVE_HSYNC_START(hstart) |
374 HDMI_ACTIVE_HSYNC_END(hend));
375 hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
376 HDMI_ACTIVE_VSYNC_START(vstart) |
377 HDMI_ACTIVE_VSYNC_END(vend));
378
379 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
380 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
381 HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
382 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
383 HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
384 HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
385 } else {
386 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
387 HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
388 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
389 HDMI_VSYNC_ACTIVE_F2_START(0) |
390 HDMI_VSYNC_ACTIVE_F2_END(0));
391 }
392
393 frame_ctrl = 0;
394 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
395 frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
396 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
397 frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
398 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
399 frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
400 DBG("frame_ctrl=%08x", frame_ctrl);
401 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
402
403 // TODO until we have audio, this might be safest:
404 if (hdmi->hdmi_mode)
405 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
406}
407
408static const struct msm_connector_funcs msm_connector_funcs = {
409 .dpms = hdmi_connector_dpms,
410 .mode_set = hdmi_connector_mode_set,
411};
412
413/* initialize connector */
414struct drm_connector *hdmi_connector_init(struct drm_device *dev,
415 struct drm_encoder *encoder)
416{
417 struct drm_connector *connector = NULL;
418 struct hdmi_connector *hdmi_connector;
419 int ret;
420
421 hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
422 if (!hdmi_connector) {
423 ret = -ENOMEM;
424 goto fail;
425 }
426
427 connector = &hdmi_connector->base.base;
428
429 msm_connector_init(&hdmi_connector->base,
430 &msm_connector_funcs, encoder);
431 drm_connector_init(dev, connector, &hdmi_connector_funcs,
432 DRM_MODE_CONNECTOR_HDMIA);
433 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
434
435 connector->polled = DRM_CONNECTOR_POLL_HPD;
436
437 connector->interlace_allowed = 1;
438 connector->doublescan_allowed = 0;
439
440 drm_sysfs_connector_add(connector);
441
442 ret = hdmi_init(&hdmi_connector->hdmi, dev, connector);
443 if (ret)
444 goto fail;
445
446 ret = hpd_enable(hdmi_connector);
447 if (ret) {
448 dev_err(dev->dev, "failed to enable HPD: %d\n", ret);
449 goto fail;
450 }
451
452 drm_mode_connector_attach_encoder(connector, encoder);
453
454 return connector;
455
456fail:
457 if (connector)
458 hdmi_connector_destroy(connector);
459
460 return ERR_PTR(ret);
461}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 000000000000..f4ab7f70fed1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_i2c_adapter {
21 struct i2c_adapter base;
22 struct hdmi *hdmi;
23 bool sw_done;
24 wait_queue_head_t ddc_event;
25};
26#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
27
28static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
29{
30 struct hdmi *hdmi = hdmi_i2c->hdmi;
31
32 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
33 HDMI_DDC_CTRL_SW_STATUS_RESET);
34 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
35 HDMI_DDC_CTRL_SOFT_RESET);
36
37 hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
38 HDMI_DDC_SPEED_THRESHOLD(2) |
39 HDMI_DDC_SPEED_PRESCALE(10));
40
41 hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
42 HDMI_DDC_SETUP_TIMEOUT(0xff));
43
44 /* enable reference timer for 27us */
45 hdmi_write(hdmi, REG_HDMI_DDC_REF,
46 HDMI_DDC_REF_REFTIMER_ENABLE |
47 HDMI_DDC_REF_REFTIMER(27));
48}
49
50static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
51{
52 struct hdmi *hdmi = hdmi_i2c->hdmi;
53 struct drm_device *dev = hdmi->dev;
54 uint32_t retry = 0xffff;
55 uint32_t ddc_int_ctrl;
56
57 do {
58 --retry;
59
60 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
61 HDMI_DDC_INT_CTRL_SW_DONE_ACK |
62 HDMI_DDC_INT_CTRL_SW_DONE_MASK);
63
64 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
65
66 } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
67
68 if (!retry) {
69 dev_err(dev->dev, "timeout waiting for DDC\n");
70 return -ETIMEDOUT;
71 }
72
73 hdmi_i2c->sw_done = false;
74
75 return 0;
76}
77
78#define MAX_TRANSACTIONS 4
79
80static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
81{
82 struct hdmi *hdmi = hdmi_i2c->hdmi;
83
84 if (!hdmi_i2c->sw_done) {
85 uint32_t ddc_int_ctrl;
86
87 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
88
89 if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
90 (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
91 hdmi_i2c->sw_done = true;
92 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
93 HDMI_DDC_INT_CTRL_SW_DONE_ACK);
94 }
95 }
96
97 return hdmi_i2c->sw_done;
98}
99
100static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
101 struct i2c_msg *msgs, int num)
102{
103 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
104 struct hdmi *hdmi = hdmi_i2c->hdmi;
105 struct drm_device *dev = hdmi->dev;
106 static const uint32_t nack[] = {
107 HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
108 HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
109 };
110 int indices[MAX_TRANSACTIONS];
111 int ret, i, j, index = 0;
112 uint32_t ddc_status, ddc_data, i2c_trans;
113
114 num = min(num, MAX_TRANSACTIONS);
115
116 WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
117
118 if (num == 0)
119 return num;
120
121 init_ddc(hdmi_i2c);
122
123 ret = ddc_clear_irq(hdmi_i2c);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < num; i++) {
128 struct i2c_msg *p = &msgs[i];
129 uint32_t raw_addr = p->addr << 1;
130
131 if (p->flags & I2C_M_RD)
132 raw_addr |= 1;
133
134 ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
135 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
136
137 if (i == 0) {
138 ddc_data |= HDMI_DDC_DATA_INDEX(0) |
139 HDMI_DDC_DATA_INDEX_WRITE;
140 }
141
142 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
143 index++;
144
145 indices[i] = index;
146
147 if (p->flags & I2C_M_RD) {
148 index += p->len;
149 } else {
150 for (j = 0; j < p->len; j++) {
151 ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
152 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
153 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
154 index++;
155 }
156 }
157
158 i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
159 HDMI_I2C_TRANSACTION_REG_RW(
160 (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
161 HDMI_I2C_TRANSACTION_REG_START;
162
163 if (i == (num - 1))
164 i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
165
166 hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
167 }
168
169 /* trigger the transfer: */
170 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
171 HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
172 HDMI_DDC_CTRL_GO);
173
174 ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
175 if (ret <= 0) {
176 if (ret == 0)
177 ret = -ETIMEDOUT;
178 dev_warn(dev->dev, "DDC timeout: %d\n", ret);
179 DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
180 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
181 hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
182 hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
183 return ret;
184 }
185
186 ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
187
188 /* read back results of any read transactions: */
189 for (i = 0; i < num; i++) {
190 struct i2c_msg *p = &msgs[i];
191
192 if (!(p->flags & I2C_M_RD))
193 continue;
194
195 /* check for NACK: */
196 if (ddc_status & nack[i]) {
197 DBG("ddc_status=%08x", ddc_status);
198 break;
199 }
200
201 ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
202 HDMI_DDC_DATA_INDEX(indices[i]) |
203 HDMI_DDC_DATA_INDEX_WRITE;
204
205 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
206
207 /* discard first byte: */
208 hdmi_read(hdmi, REG_HDMI_DDC_DATA);
209
210 for (j = 0; j < p->len; j++) {
211 ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
212 p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
213 }
214 }
215
216 return i;
217}
218
219static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
220{
221 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
222}
223
224static const struct i2c_algorithm hdmi_i2c_algorithm = {
225 .master_xfer = hdmi_i2c_xfer,
226 .functionality = hdmi_i2c_func,
227};
228
229void hdmi_i2c_irq(struct i2c_adapter *i2c)
230{
231 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
232
233 if (sw_done(hdmi_i2c))
234 wake_up_all(&hdmi_i2c->ddc_event);
235}
236
237void hdmi_i2c_destroy(struct i2c_adapter *i2c)
238{
239 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
240 i2c_del_adapter(i2c);
241 kfree(hdmi_i2c);
242}
243
244struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
245{
246 struct drm_device *dev = hdmi->dev;
247 struct hdmi_i2c_adapter *hdmi_i2c;
248 struct i2c_adapter *i2c = NULL;
249 int ret;
250
251 hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
252 if (!hdmi_i2c) {
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 i2c = &hdmi_i2c->base;
258
259 hdmi_i2c->hdmi = hdmi;
260 init_waitqueue_head(&hdmi_i2c->ddc_event);
261
262
263 i2c->owner = THIS_MODULE;
264 i2c->class = I2C_CLASS_DDC;
265 snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
266 i2c->dev.parent = &hdmi->pdev->dev;
267 i2c->algo = &hdmi_i2c_algorithm;
268
269 ret = i2c_add_adapter(i2c);
270 if (ret) {
271 dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
272 goto fail;
273 }
274
275 return i2c;
276
277fail:
278 if (i2c)
279 hdmi_i2c_destroy(i2c);
280 return ERR_PTR(ret);
281}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 000000000000..e5b7ed5b8f01
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8960 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
25
26static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
29 kfree(phy_8960);
30}
31
32static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
35 struct hdmi *hdmi = phy_8960->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
51 /* pull low */
52 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
53 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
54 } else {
55 /* pull high */
56 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
57 val | HDMI_PHY_CTRL_SW_RESET_PLL);
58 }
59
60 msleep(100);
61
62 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
63 /* pull high */
64 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
65 val | HDMI_PHY_CTRL_SW_RESET);
66 } else {
67 /* pull low */
68 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
69 val & ~HDMI_PHY_CTRL_SW_RESET);
70 }
71
72 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
73 /* pull high */
74 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
75 val | HDMI_PHY_CTRL_SW_RESET_PLL);
76 } else {
77 /* pull low */
78 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
79 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
80 }
81}
82
83static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
84 unsigned long int pixclock)
85{
86 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
87 struct hdmi *hdmi = phy_8960->hdmi;
88
89 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
90 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
91 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
92 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
93 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
94 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
95 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
96 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
97 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
98 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
99 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
100}
101
102static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
103{
104 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
105 struct hdmi *hdmi = phy_8960->hdmi;
106
107 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
108}
109
110static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
111 .destroy = hdmi_phy_8960_destroy,
112 .reset = hdmi_phy_8960_reset,
113 .powerup = hdmi_phy_8960_powerup,
114 .powerdown = hdmi_phy_8960_powerdown,
115};
116
117struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
118{
119 struct hdmi_phy_8960 *phy_8960;
120 struct hdmi_phy *phy = NULL;
121 int ret;
122
123 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
124 if (!phy_8960) {
125 ret = -ENOMEM;
126 goto fail;
127 }
128
129 phy = &phy_8960->base;
130
131 phy->funcs = &hdmi_phy_8960_funcs;
132
133 phy_8960->hdmi = hdmi;
134
135 return phy;
136
137fail:
138 if (phy)
139 hdmi_phy_8960_destroy(phy);
140 return ERR_PTR(ret);
141}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 000000000000..391433c1af7c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8x60 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
25
26static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
29 kfree(phy_8x60);
30}
31
32static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
35 struct hdmi *hdmi = phy_8x60->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 msleep(100);
51
52 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
53 /* pull high */
54 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
55 val | HDMI_PHY_CTRL_SW_RESET);
56 } else {
57 /* pull low */
58 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
59 val & ~HDMI_PHY_CTRL_SW_RESET);
60 }
61}
62
63static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
64 unsigned long int pixclock)
65{
66 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
67 struct hdmi *hdmi = phy_8x60->hdmi;
68
69 /* De-serializer delay D/C for non-lbk mode: */
70 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
71 HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
72
73 if (pixclock == 27000000) {
74 /* video_format == HDMI_VFRMT_720x480p60_16_9 */
75 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
76 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
77 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
78 } else {
79 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
80 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
81 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
82 }
83
84 /* No matter what, start from the power down mode: */
85 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
86 HDMI_8x60_PHY_REG2_PD_PWRGEN |
87 HDMI_8x60_PHY_REG2_PD_PLL |
88 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
89 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
90 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
91 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
92 HDMI_8x60_PHY_REG2_PD_DESER);
93
94 /* Turn PowerGen on: */
95 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
96 HDMI_8x60_PHY_REG2_PD_PLL |
97 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
98 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
99 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
100 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
101 HDMI_8x60_PHY_REG2_PD_DESER);
102
103 /* Turn PLL power on: */
104 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
105 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
106 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
107 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
108 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
109 HDMI_8x60_PHY_REG2_PD_DESER);
110
111 /* Write to HIGH after PLL power down de-assert: */
112 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
113 HDMI_8x60_PHY_REG3_PLL_ENABLE);
114
115 /* ASIC power on; PHY REG9 = 0 */
116 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
117
118 /* Enable PLL lock detect, PLL lock det will go high after lock
119 * Enable the re-time logic
120 */
121 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
122 HDMI_8x60_PHY_REG12_RETIMING_EN |
123 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
124
125 /* Drivers are on: */
126 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
127 HDMI_8x60_PHY_REG2_PD_DESER);
128
129 /* If the RX detector is needed: */
130 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
131 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
132 HDMI_8x60_PHY_REG2_PD_DESER);
133
134 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
135 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
136 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
137 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
138 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
139 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
140 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
141 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
142
143 /* If we want to use lock enable based on counting: */
144 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
145 HDMI_8x60_PHY_REG12_RETIMING_EN |
146 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
147 HDMI_8x60_PHY_REG12_FORCE_LOCK);
148}
149
150static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
151{
152 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
153 struct hdmi *hdmi = phy_8x60->hdmi;
154
155 /* Assert RESET PHY from controller */
156 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
157 HDMI_PHY_CTRL_SW_RESET);
158 udelay(10);
159 /* De-assert RESET PHY from controller */
160 hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
161 /* Turn off Driver */
162 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
163 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
164 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
165 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
166 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
167 HDMI_8x60_PHY_REG2_PD_DESER);
168 udelay(10);
169 /* Disable PLL */
170 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
171 /* Power down PHY, but keep RX-sense: */
172 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
173 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
174 HDMI_8x60_PHY_REG2_PD_PWRGEN |
175 HDMI_8x60_PHY_REG2_PD_PLL |
176 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
177 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
178 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
179 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
180 HDMI_8x60_PHY_REG2_PD_DESER);
181}
182
183static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
184 .destroy = hdmi_phy_8x60_destroy,
185 .reset = hdmi_phy_8x60_reset,
186 .powerup = hdmi_phy_8x60_powerup,
187 .powerdown = hdmi_phy_8x60_powerdown,
188};
189
190struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
191{
192 struct hdmi_phy_8x60 *phy_8x60;
193 struct hdmi_phy *phy = NULL;
194 int ret;
195
196 phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
197 if (!phy_8x60) {
198 ret = -ENOMEM;
199 goto fail;
200 }
201
202 phy = &phy_8x60->base;
203
204 phy->funcs = &hdmi_phy_8x60_funcs;
205
206 phy_8x60->hdmi = hdmi;
207
208 return phy;
209
210fail:
211 if (phy)
212 hdmi_phy_8x60_destroy(phy);
213 return ERR_PTR(ret);
214}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 000000000000..bee36363bcd0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
1#ifndef QFPROM_XML
2#define QFPROM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
46#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
47#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
48
49
50#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 000000000000..bbeeebe2db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
1#ifndef MDP4_XML
2#define MDP4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mpd4_bpc {
46 BPC1 = 0,
47 BPC5 = 1,
48 BPC6 = 2,
49 BPC8 = 3,
50};
51
52enum mpd4_bpc_alpha {
53 BPC1A = 0,
54 BPC4A = 1,
55 BPC6A = 2,
56 BPC8A = 3,
57};
58
59enum mpd4_alpha_type {
60 FG_CONST = 0,
61 BG_CONST = 1,
62 FG_PIXEL = 2,
63 BG_PIXEL = 3,
64};
65
66enum mpd4_pipe {
67 VG1 = 0,
68 VG2 = 1,
69 RGB1 = 2,
70 RGB2 = 3,
71 RGB3 = 4,
72 VG3 = 5,
73 VG4 = 6,
74};
75
76enum mpd4_mixer {
77 MIXER0 = 0,
78 MIXER1 = 1,
79 MIXER2 = 2,
80};
81
82enum mpd4_mixer_stage_id {
83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1,
85 STAGE0 = 2,
86 STAGE1 = 3,
87 STAGE2 = 4,
88 STAGE3 = 5,
89};
90
91enum mdp4_intf {
92 INTF_LCDC_DTV = 0,
93 INTF_DSI_VIDEO = 1,
94 INTF_DSI_CMD = 2,
95 INTF_EBI2_TV = 3,
96};
97
98enum mdp4_cursor_format {
99 CURSOR_ARGB = 1,
100 CURSOR_XRGB = 2,
101};
102
103enum mdp4_dma {
104 DMA_P = 0,
105 DMA_S = 1,
106 DMA_E = 2,
107};
108
109#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
110#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
111#define MDP4_IRQ_DMA_S_DONE 0x00000004
112#define MDP4_IRQ_DMA_E_DONE 0x00000008
113#define MDP4_IRQ_DMA_P_DONE 0x00000010
114#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
115#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
116#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
117#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
118#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
119#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
120#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
121#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
122#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
123#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
124#define REG_MDP4_VERSION 0x00000000
125#define MDP4_VERSION_MINOR__MASK 0x00ff0000
126#define MDP4_VERSION_MINOR__SHIFT 16
127static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
128{
129 return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
130}
131#define MDP4_VERSION_MAJOR__MASK 0xff000000
132#define MDP4_VERSION_MAJOR__SHIFT 24
133static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
134{
135 return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
136}
137
138#define REG_MDP4_OVLP0_KICK 0x00000004
139
140#define REG_MDP4_OVLP1_KICK 0x00000008
141
142#define REG_MDP4_OVLP2_KICK 0x000000d0
143
144#define REG_MDP4_DMA_P_KICK 0x0000000c
145
146#define REG_MDP4_DMA_S_KICK 0x00000010
147
148#define REG_MDP4_DMA_E_KICK 0x00000014
149
150#define REG_MDP4_DISP_STATUS 0x00000018
151
152#define REG_MDP4_DISP_INTF_SEL 0x00000038
153#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
154#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
155static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
156{
157 return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
158}
159#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
160#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
161static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
162{
163 return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
164}
165#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
166#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
167static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
168{
169 return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
170}
171#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
172#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
173
174#define REG_MDP4_RESET_STATUS 0x0000003c
175
176#define REG_MDP4_READ_CNFG 0x0000004c
177
178#define REG_MDP4_INTR_ENABLE 0x00000050
179
180#define REG_MDP4_INTR_STATUS 0x00000054
181
182#define REG_MDP4_INTR_CLEAR 0x00000058
183
184#define REG_MDP4_EBI2_LCD0 0x00000060
185
186#define REG_MDP4_EBI2_LCD1 0x00000064
187
188#define REG_MDP4_PORTMAP_MODE 0x00000070
189
190#define REG_MDP4_CS_CONTROLLER0 0x000000c0
191
192#define REG_MDP4_CS_CONTROLLER1 0x000000c4
193
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249}
250#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
251
252#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
253
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309}
310#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
311
312#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
313
314#define REG_MDP4_VG2_CONST_COLOR 0x00031008
315
316#define REG_MDP4_OVERLAY_FLUSH 0x00018000
317#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
318#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
319#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
320#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
321#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
322#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
323
324static inline uint32_t __offset_OVLP(uint32_t idx)
325{
326 switch (idx) {
327 case 0: return 0x00010000;
328 case 1: return 0x00018000;
329 case 2: return 0x00088000;
330 default: return INVALID_IDX(idx);
331 }
332}
333static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
334
335static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
336
337static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
338#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
339#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
340static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
341{
342 return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
343}
344#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
345#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
346static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
347{
348 return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
349}
350
351static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
352
353static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
354
355static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
356
357static inline uint32_t __offset_STAGE(uint32_t idx)
358{
359 switch (idx) {
360 case 0: return 0x00000104;
361 case 1: return 0x00000124;
362 case 2: return 0x00000144;
363 case 3: return 0x00000160;
364 default: return INVALID_IDX(idx);
365 }
366}
367static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
368
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375}
376#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383}
384#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
385#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
386#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
387#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
388
389static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
390
391static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
392
393static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
394
395static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
396
397static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
398
399static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
400
401static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
402{
403 switch (idx) {
404 case 0: return 0x00001004;
405 case 1: return 0x00001404;
406 case 2: return 0x00001804;
407 case 3: return 0x00001b84;
408 default: return INVALID_IDX(idx);
409 }
410}
411static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
412
413static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
414#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
415
416static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
417
418static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
419
420static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
421
422static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
423
424static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
425
426static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
427
428
429static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
430
431static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
432
433static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
434
435static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
436
437static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
438
439static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
440
441static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
442
443static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
444
445static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
446
447static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
448
449#define REG_MDP4_DMA_P_OP_MODE 0x00090070
450
451static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
452
453static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
454
455static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
456
457#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
458
459static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
460
461static inline uint32_t __offset_DMA(enum mdp4_dma idx)
462{
463 switch (idx) {
464 case DMA_P: return 0x00090000;
465 case DMA_S: return 0x000a0000;
466 case DMA_E: return 0x000b0000;
467 default: return INVALID_IDX(idx);
468 }
469}
470static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
471
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490}
491#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
492#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
493#define MDP4_DMA_CONFIG_PACK__SHIFT 8
494static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
495{
496 return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
497}
498#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
499#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
500
501static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
502#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
503#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
504static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
505{
506 return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
507}
508#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
509#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
510static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
511{
512 return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
513}
514
515static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
516
517static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
518
519static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
520#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
521#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
522static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
523{
524 return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
525}
526#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
527#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
528static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
529{
530 return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
531}
532
533static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
534#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
535#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
536static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
537{
538 return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
539}
540#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
541#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
542static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
543{
544 return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
545}
546
547static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
548
549static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
550#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
551#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
552static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
553{
554 return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
555}
556#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
557#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
558static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
559{
560 return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
561}
562
563static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
564#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
565#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
566#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
567static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
568{
569 return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
570}
571#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
572
573static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
574
575static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
576
577static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
578
579static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
580
581static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
582
583
584static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
585
586static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
587
588static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
589
590static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
591
592static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
593
594static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
595
596static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
597
598static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
599
600static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
610{
611 return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
612}
613#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
614#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
615static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
616{
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618}
619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
624{
625 return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
626}
627#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
628#define MDP4_PIPE_SRC_XY_X__SHIFT 0
629static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
630{
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632}
633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
638{
639 return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
640}
641#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
642#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
643static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
644{
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646}
647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
652{
653 return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
654}
655#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
656#define MDP4_PIPE_DST_XY_X__SHIFT 0
657static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
658{
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660}
661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
672{
673 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
674}
675#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
676#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
677static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
678{
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680}
681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
686{
687 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
688}
689#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
690#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
691static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
692{
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694}
695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
700{
701 return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
702}
703#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
704#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
705static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
706{
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708}
709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734}
735#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
736#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
737#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
738static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
739{
740 return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
741}
742#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
743#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
744#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
745static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
746{
747 return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
748}
749#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
757{
758 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
759}
760#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
761#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
762static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
763{
764 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
765}
766#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
767#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
768static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
769{
770 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
771}
772#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
773#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
774static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
775{
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777}
778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
783#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
784#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
785#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
786#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
787#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
788#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801
802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822
823#define REG_MDP4_LCDC 0x000c0000
824
825#define REG_MDP4_LCDC_ENABLE 0x000c0000
826
827#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
828#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
829#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
830static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
831{
832 return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
833}
834#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
835#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
836static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
837{
838 return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
839}
840
841#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
842
843#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
844
845#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
846#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
847#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
848static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
849{
850 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
851}
852#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
853#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
854static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
855{
856 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
857}
858
859#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
860
861#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
862
863#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
864#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
865#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
866static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
867{
868 return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
869}
870#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
871#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
872static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
873{
874 return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
875}
876#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
877
878#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
879
880#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
881
882#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
883
884#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
885#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
886#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
887static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
888{
889 return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
890}
891#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
892
893#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
894
895#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
896
897#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
898#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
899#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
900#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
901
902#define REG_MDP4_DTV 0x000d0000
903
904#define REG_MDP4_DTV_ENABLE 0x000d0000
905
906#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
907#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
908#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
909static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
910{
911 return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
912}
913#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
914#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
915static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
916{
917 return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
918}
919
920#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
921
922#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
923
924#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
925#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
926#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
927static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
928{
929 return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
930}
931#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
932#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
933static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
934{
935 return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
936}
937
938#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
939
940#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
941
942#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
943#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
944#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
945static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
946{
947 return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
948}
949#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
950#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
951static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
952{
953 return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
954}
955#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
956
957#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
958
959#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
960
961#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
962
963#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
964#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
965#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
966static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
967{
968 return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
969}
970#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
971
972#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
973
974#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
975
976#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
977#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
978#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
979#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
980
981#define REG_MDP4_DSI 0x000e0000
982
983#define REG_MDP4_DSI_ENABLE 0x000e0000
984
985#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
986#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
987#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
988static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
989{
990 return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
991}
992#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
993#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
994static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
995{
996 return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
997}
998
999#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
1000
1001#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
1002
1003#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
1004#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
1005#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
1006static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
1007{
1008 return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
1009}
1010#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
1011#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
1012static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
1013{
1014 return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
1015}
1016
1017#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
1018
1019#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
1020
1021#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
1022#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
1023#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
1024static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
1025{
1026 return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
1027}
1028#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
1029#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
1030static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
1031{
1032 return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
1033}
1034#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
1035
1036#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
1037
1038#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
1039
1040#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
1041
1042#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
1043#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
1044#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
1045static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
1046{
1047 return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
1048}
1049#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
1050
1051#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
1052
1053#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
1054
1055#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
1056#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
1057#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
1058#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
1059
1060
1061#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 000000000000..de6bea297cda
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,685 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20#include <drm/drm_mode.h>
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23#include "drm_flip_work.h"
24
25struct mdp4_crtc {
26 struct drm_crtc base;
27 char name[8];
28 struct drm_plane *plane;
29 int id;
30 int ovlp;
31 enum mdp4_dma dma;
32 bool enabled;
33
34 /* which mixer/encoder we route output to: */
35 int mixer;
36
37 struct {
38 spinlock_t lock;
39 bool stale;
40 uint32_t width, height;
41
42 /* next cursor to scan-out: */
43 uint32_t next_iova;
44 struct drm_gem_object *next_bo;
45
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
48 } cursor;
49
50
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work;
54
55 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb;
57
58 /* for unref'ing framebuffers after scanout completes: */
59 struct drm_flip_work unref_fb_work;
60
61 /* for unref'ing cursor bo's after scanout completes: */
62 struct drm_flip_work unref_cursor_work;
63
64 struct mdp4_irq vblank;
65 struct mdp4_irq err;
66};
67#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
68
69static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
70{
71 struct msm_drm_private *priv = crtc->dev->dev_private;
72 return to_mdp4_kms(priv->kms);
73}
74
75static void update_fb(struct drm_crtc *crtc, bool async,
76 struct drm_framebuffer *new_fb)
77{
78 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
79 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
80
81 if (old_fb)
82 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
83
84 /* grab reference to incoming scanout fb: */
85 drm_framebuffer_reference(new_fb);
86 mdp4_crtc->base.fb = new_fb;
87 mdp4_crtc->fb = new_fb;
88
89 if (!async) {
90 /* enable vblank to pick up the old_fb */
91 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
92 }
93}
94
95static void complete_flip(struct drm_crtc *crtc, bool canceled)
96{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev;
99 struct drm_pending_vblank_event *event;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event;
104 if (event) {
105 mdp4_crtc->event = NULL;
106 if (canceled)
107 event->base.destroy(&event->base);
108 else
109 drm_send_vblank_event(dev, mdp4_crtc->id, event);
110 }
111 spin_unlock_irqrestore(&dev->event_lock, flags);
112}
113
114static void crtc_flush(struct drm_crtc *crtc)
115{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0;
119
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
121 flush |= ovlp2flush(mdp4_crtc->ovlp);
122
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
124
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126}
127
128static void pageflip_worker(struct work_struct *work)
129{
130 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work);
132 struct drm_crtc *crtc = &mdp4_crtc->base;
133
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
135 crtc_flush(crtc);
136
137 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
139}
140
141static void unref_fb_worker(struct drm_flip_work *work, void *val)
142{
143 struct mdp4_crtc *mdp4_crtc =
144 container_of(work, struct mdp4_crtc, unref_fb_work);
145 struct drm_device *dev = mdp4_crtc->base.dev;
146
147 mutex_lock(&dev->mode_config.mutex);
148 drm_framebuffer_unreference(val);
149 mutex_unlock(&dev->mode_config.mutex);
150}
151
152static void unref_cursor_worker(struct drm_flip_work *work, void *val)
153{
154 struct mdp4_crtc *mdp4_crtc =
155 container_of(work, struct mdp4_crtc, unref_cursor_work);
156 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
157
158 msm_gem_put_iova(val, mdp4_kms->id);
159 drm_gem_object_unreference_unlocked(val);
160}
161
162static void mdp4_crtc_destroy(struct drm_crtc *crtc)
163{
164 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
165
166 mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
167
168 drm_crtc_cleanup(crtc);
169 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
170 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
171
172 kfree(mdp4_crtc);
173}
174
175static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
176{
177 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
178 struct mdp4_kms *mdp4_kms = get_kms(crtc);
179 bool enabled = (mode == DRM_MODE_DPMS_ON);
180
181 DBG("%s: mode=%d", mdp4_crtc->name, mode);
182
183 if (enabled != mdp4_crtc->enabled) {
184 if (enabled) {
185 mdp4_enable(mdp4_kms);
186 mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
187 } else {
188 mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
189 mdp4_disable(mdp4_kms);
190 }
191 mdp4_crtc->enabled = enabled;
192 }
193}
194
195static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
196 const struct drm_display_mode *mode,
197 struct drm_display_mode *adjusted_mode)
198{
199 return true;
200}
201
202static void blend_setup(struct drm_crtc *crtc)
203{
204 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
205 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0;
208
209 /*
210 * This probably would also need to be triggered by any attached
211 * plane when it changes.. for now since we are only using a single
212 * private plane, the configuration is hard-coded:
213 */
214
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219
220 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 }
232
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270}
271
272static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
273 struct drm_display_mode *mode,
274 struct drm_display_mode *adjusted_mode,
275 int x, int y,
276 struct drm_framebuffer *old_fb)
277{
278 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
279 struct mdp4_kms *mdp4_kms = get_kms(crtc);
280 enum mdp4_dma dma = mdp4_crtc->dma;
281 int ret, ovlp = mdp4_crtc->ovlp;
282
283 mode = adjusted_mode;
284
285 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
286 mdp4_crtc->name, mode->base.id, mode->name,
287 mode->vrefresh, mode->clock,
288 mode->hdisplay, mode->hsync_start,
289 mode->hsync_end, mode->htotal,
290 mode->vdisplay, mode->vsync_start,
291 mode->vsync_end, mode->vtotal,
292 mode->type, mode->flags);
293
294 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
295 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
296 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
297
298 /* take data from pipe: */
299 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
300 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
301 crtc->fb->pitches[0]);
302 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
303 MDP4_DMA_DST_SIZE_WIDTH(0) |
304 MDP4_DMA_DST_SIZE_HEIGHT(0));
305
306 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
307 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
308 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
309 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
310 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
311 crtc->fb->pitches[0]);
312
313 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
314
315 update_fb(crtc, false, crtc->fb);
316
317 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
318 0, 0, mode->hdisplay, mode->vdisplay,
319 x << 16, y << 16,
320 mode->hdisplay << 16, mode->vdisplay << 16);
321 if (ret) {
322 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
323 mdp4_crtc->name, ret);
324 return ret;
325 }
326
327 if (dma == DMA_E) {
328 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
329 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
330 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
331 }
332
333 return 0;
334}
335
336static void mdp4_crtc_prepare(struct drm_crtc *crtc)
337{
338 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
339 DBG("%s", mdp4_crtc->name);
340 /* make sure we hold a ref to mdp clks while setting up mode: */
341 mdp4_enable(get_kms(crtc));
342 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
343}
344
345static void mdp4_crtc_commit(struct drm_crtc *crtc)
346{
347 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
348 crtc_flush(crtc);
349 /* drop the ref to mdp clk's that we got in prepare: */
350 mdp4_disable(get_kms(crtc));
351}
352
353static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
354 struct drm_framebuffer *old_fb)
355{
356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
357 struct drm_plane *plane = mdp4_crtc->plane;
358 struct drm_display_mode *mode = &crtc->mode;
359
360 update_fb(crtc, false, crtc->fb);
361
362 return mdp4_plane_mode_set(plane, crtc, crtc->fb,
363 0, 0, mode->hdisplay, mode->vdisplay,
364 x << 16, y << 16,
365 mode->hdisplay << 16, mode->vdisplay << 16);
366}
367
368static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
369{
370}
371
372static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
373 struct drm_framebuffer *new_fb,
374 struct drm_pending_vblank_event *event,
375 uint32_t page_flip_flags)
376{
377 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
378 struct drm_device *dev = crtc->dev;
379 struct drm_gem_object *obj;
380
381 if (mdp4_crtc->event) {
382 dev_err(dev->dev, "already pending flip!\n");
383 return -EBUSY;
384 }
385
386 obj = msm_framebuffer_bo(new_fb, 0);
387
388 mdp4_crtc->event = event;
389 update_fb(crtc, true, new_fb);
390
391 return msm_gem_queue_inactive_work(obj,
392 &mdp4_crtc->pageflip_work);
393}
394
395static int mdp4_crtc_set_property(struct drm_crtc *crtc,
396 struct drm_property *property, uint64_t val)
397{
398 // XXX
399 return -EINVAL;
400}
401
402#define CURSOR_WIDTH 64
403#define CURSOR_HEIGHT 64
404
405/* called from IRQ to update cursor related registers (if needed). The
406 * cursor registers, other than x/y position, appear not to be double
407 * buffered, and changing them other than from vblank seems to trigger
408 * underflow.
409 */
410static void update_cursor(struct drm_crtc *crtc)
411{
412 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
413 enum mdp4_dma dma = mdp4_crtc->dma;
414 unsigned long flags;
415
416 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
417 if (mdp4_crtc->cursor.stale) {
418 struct mdp4_kms *mdp4_kms = get_kms(crtc);
419 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
420 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
421 uint32_t iova = mdp4_crtc->cursor.next_iova;
422
423 if (next_bo) {
424 /* take a obj ref + iova ref when we start scanning out: */
425 drm_gem_object_reference(next_bo);
426 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
427
428 /* enable cursor: */
429 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
430 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
431 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
432 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
433 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
434 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
435 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
436 } else {
437 /* disable cursor: */
438 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
439 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
440 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
441 }
442
443 /* and drop the iova ref + obj rev when done scanning out: */
444 if (prev_bo)
445 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
446
447 mdp4_crtc->cursor.scanout_bo = next_bo;
448 mdp4_crtc->cursor.stale = false;
449 }
450 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
451}
452
453static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
454 struct drm_file *file_priv, uint32_t handle,
455 uint32_t width, uint32_t height)
456{
457 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
458 struct mdp4_kms *mdp4_kms = get_kms(crtc);
459 struct drm_device *dev = crtc->dev;
460 struct drm_gem_object *cursor_bo, *old_bo;
461 unsigned long flags;
462 uint32_t iova;
463 int ret;
464
465 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
466 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
467 return -EINVAL;
468 }
469
470 if (handle) {
471 cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
472 if (!cursor_bo)
473 return -ENOENT;
474 } else {
475 cursor_bo = NULL;
476 }
477
478 if (cursor_bo) {
479 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
480 if (ret)
481 goto fail;
482 } else {
483 iova = 0;
484 }
485
486 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
487 old_bo = mdp4_crtc->cursor.next_bo;
488 mdp4_crtc->cursor.next_bo = cursor_bo;
489 mdp4_crtc->cursor.next_iova = iova;
490 mdp4_crtc->cursor.width = width;
491 mdp4_crtc->cursor.height = height;
492 mdp4_crtc->cursor.stale = true;
493 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
494
495 if (old_bo) {
496 /* drop our previous reference: */
497 msm_gem_put_iova(old_bo, mdp4_kms->id);
498 drm_gem_object_unreference_unlocked(old_bo);
499 }
500
501 return 0;
502
503fail:
504 drm_gem_object_unreference_unlocked(cursor_bo);
505 return ret;
506}
507
508static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
509{
510 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
511 struct mdp4_kms *mdp4_kms = get_kms(crtc);
512 enum mdp4_dma dma = mdp4_crtc->dma;
513
514 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
515 MDP4_DMA_CURSOR_POS_X(x) |
516 MDP4_DMA_CURSOR_POS_Y(y));
517
518 return 0;
519}
520
521static const struct drm_crtc_funcs mdp4_crtc_funcs = {
522 .set_config = drm_crtc_helper_set_config,
523 .destroy = mdp4_crtc_destroy,
524 .page_flip = mdp4_crtc_page_flip,
525 .set_property = mdp4_crtc_set_property,
526 .cursor_set = mdp4_crtc_cursor_set,
527 .cursor_move = mdp4_crtc_cursor_move,
528};
529
530static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
531 .dpms = mdp4_crtc_dpms,
532 .mode_fixup = mdp4_crtc_mode_fixup,
533 .mode_set = mdp4_crtc_mode_set,
534 .prepare = mdp4_crtc_prepare,
535 .commit = mdp4_crtc_commit,
536 .mode_set_base = mdp4_crtc_mode_set_base,
537 .load_lut = mdp4_crtc_load_lut,
538};
539
540static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
541{
542 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
543 struct drm_crtc *crtc = &mdp4_crtc->base;
544 struct msm_drm_private *priv = crtc->dev->dev_private;
545
546 update_cursor(crtc);
547 complete_flip(crtc, false);
548 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
549
550 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
551 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
552}
553
554static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
555{
556 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
557 struct drm_crtc *crtc = &mdp4_crtc->base;
558 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
559 crtc_flush(crtc);
560}
561
562uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
563{
564 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
565 return mdp4_crtc->vblank.irqmask;
566}
567
568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
569{
570 complete_flip(crtc, true);
571}
572
573/* set dma config, ie. the format the encoder wants. */
574void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
575{
576 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
577 struct mdp4_kms *mdp4_kms = get_kms(crtc);
578
579 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
580}
581
582/* set interface for routing crtc->encoder: */
583void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
584{
585 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
586 struct mdp4_kms *mdp4_kms = get_kms(crtc);
587 uint32_t intf_sel;
588
589 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
590
591 switch (mdp4_crtc->dma) {
592 case DMA_P:
593 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
594 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
595 break;
596 case DMA_S:
597 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
598 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
599 break;
600 case DMA_E:
601 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
602 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
603 break;
604 }
605
606 if (intf == INTF_DSI_VIDEO) {
607 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
608 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
609 mdp4_crtc->mixer = 0;
610 } else if (intf == INTF_DSI_CMD) {
611 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
612 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
613 mdp4_crtc->mixer = 0;
614 } else if (intf == INTF_LCDC_DTV){
615 mdp4_crtc->mixer = 1;
616 }
617
618 blend_setup(crtc);
619
620 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
621
622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
623}
624
625static const char *dma_names[] = {
626 "DMA_P", "DMA_S", "DMA_E",
627};
628
629/* initialize crtc */
630struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
631 struct drm_plane *plane, int id, int ovlp_id,
632 enum mdp4_dma dma_id)
633{
634 struct drm_crtc *crtc = NULL;
635 struct mdp4_crtc *mdp4_crtc;
636 int ret;
637
638 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
639 if (!mdp4_crtc) {
640 ret = -ENOMEM;
641 goto fail;
642 }
643
644 crtc = &mdp4_crtc->base;
645
646 mdp4_crtc->plane = plane;
647 mdp4_crtc->plane->crtc = crtc;
648
649 mdp4_crtc->ovlp = ovlp_id;
650 mdp4_crtc->dma = dma_id;
651
652 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
653 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
654
655 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
656 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
657
658 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
659 dma_names[dma_id], ovlp_id);
660
661 spin_lock_init(&mdp4_crtc->cursor.lock);
662
663 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
664 "unref fb", unref_fb_worker);
665 if (ret)
666 goto fail;
667
668 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
669 "unref cursor", unref_cursor_worker);
670
671 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
672
673 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
674 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
675
676 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
677
678 return crtc;
679
680fail:
681 if (crtc)
682 mdp4_crtc_destroy(crtc);
683
684 return ERR_PTR(ret);
685}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 000000000000..06d49e309d34
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <mach/clk.h>
19
20#include "mdp4_kms.h"
21#include "msm_connector.h"
22
23#include "drm_crtc.h"
24#include "drm_crtc_helper.h"
25
26
27struct mdp4_dtv_encoder {
28 struct drm_encoder base;
29 struct clk *src_clk;
30 struct clk *hdmi_clk;
31 struct clk *mdp_clk;
32 unsigned long int pixclock;
33 bool enabled;
34 uint32_t bsc;
35};
36#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
37
38static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
39{
40 struct msm_drm_private *priv = encoder->dev->dev_private;
41 return to_mdp4_kms(priv->kms);
42}
43
44#ifdef CONFIG_MSM_BUS_SCALING
45#include <mach/board.h>
46/* not ironically named at all.. no, really.. */
47static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
48{
49 struct drm_device *dev = mdp4_dtv_encoder->base.dev;
50 struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
51
52 if (!dtv_pdata) {
53 dev_err(dev->dev, "could not find dtv pdata\n");
54 return;
55 }
56
57 if (dtv_pdata->bus_scale_table) {
58 mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
59 dtv_pdata->bus_scale_table);
60 DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
61 DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
62 if (dtv_pdata->lcdc_power_save)
63 dtv_pdata->lcdc_power_save(1);
64 }
65}
66
67static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
68{
69 if (mdp4_dtv_encoder->bsc) {
70 msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
71 mdp4_dtv_encoder->bsc = 0;
72 }
73}
74
75static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
76{
77 if (mdp4_dtv_encoder->bsc) {
78 DBG("set bus scaling: %d", idx);
79 msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
80 }
81}
82#else
83static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
84static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
85static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
86#endif
87
88static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
89{
90 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
91 bs_fini(mdp4_dtv_encoder);
92 drm_encoder_cleanup(encoder);
93 kfree(mdp4_dtv_encoder);
94}
95
96static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
97 .destroy = mdp4_dtv_encoder_destroy,
98};
99
100static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
101{
102 struct drm_device *dev = encoder->dev;
103 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
104 struct msm_connector *msm_connector = get_connector(encoder);
105 struct mdp4_kms *mdp4_kms = get_kms(encoder);
106 bool enabled = (mode == DRM_MODE_DPMS_ON);
107
108 DBG("mode=%d", mode);
109
110 if (enabled == mdp4_dtv_encoder->enabled)
111 return;
112
113 if (enabled) {
114 unsigned long pc = mdp4_dtv_encoder->pixclock;
115 int ret;
116
117 bs_set(mdp4_dtv_encoder, 1);
118
119 if (msm_connector)
120 msm_connector->funcs->dpms(msm_connector, mode);
121
122 DBG("setting src_clk=%lu", pc);
123
124 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
125 if (ret)
126 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
127 clk_prepare_enable(mdp4_dtv_encoder->src_clk);
128 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
129 if (ret)
130 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
131 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
132 if (ret)
133 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
134
135 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
136 } else {
137 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
138
139 /*
140 * Wait for a vsync so we know the ENABLE=0 latched before
141 * the (connector) source of the vsync's gets disabled,
142 * otherwise we end up in a funny state if we re-enable
143 * before the disable latches, which results that some of
144 * the settings changes for the new modeset (like new
145 * scanout buffer) don't latch properly..
146 */
147 mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
148
149 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
150 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
151 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
152
153 if (msm_connector)
154 msm_connector->funcs->dpms(msm_connector, mode);
155
156 bs_set(mdp4_dtv_encoder, 0);
157 }
158
159 mdp4_dtv_encoder->enabled = enabled;
160}
161
162static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
163 const struct drm_display_mode *mode,
164 struct drm_display_mode *adjusted_mode)
165{
166 return true;
167}
168
169static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
170 struct drm_display_mode *mode,
171 struct drm_display_mode *adjusted_mode)
172{
173 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
174 struct msm_connector *msm_connector = get_connector(encoder);
175 struct mdp4_kms *mdp4_kms = get_kms(encoder);
176 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
177 uint32_t display_v_start, display_v_end;
178 uint32_t hsync_start_x, hsync_end_x;
179
180 mode = adjusted_mode;
181
182 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
183 mode->base.id, mode->name,
184 mode->vrefresh, mode->clock,
185 mode->hdisplay, mode->hsync_start,
186 mode->hsync_end, mode->htotal,
187 mode->vdisplay, mode->vsync_start,
188 mode->vsync_end, mode->vtotal,
189 mode->type, mode->flags);
190
191 mdp4_dtv_encoder->pixclock = mode->clock * 1000;
192
193 DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
194
195 ctrl_pol = 0;
196 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
197 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
198 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
199 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
200 /* probably need to get DATA_EN polarity from panel.. */
201
202 dtv_hsync_skew = 0; /* get this from panel? */
203
204 hsync_start_x = (mode->htotal - mode->hsync_start);
205 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
206
207 vsync_period = mode->vtotal * mode->htotal;
208 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
209 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
210 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
211
212 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
213 MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
214 MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
215 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
216 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
217 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
218 MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
219 MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
220 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
221 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
222 mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
223 mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
224 MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
225 MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
226 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
227 mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
228 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
229 MDP4_DTV_ACTIVE_HCTL_START(0) |
230 MDP4_DTV_ACTIVE_HCTL_END(0));
231 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
232 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
233
234 if (msm_connector)
235 msm_connector->funcs->mode_set(msm_connector, mode);
236}
237
238static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
239{
240 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
241}
242
243static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
244{
245 mdp4_crtc_set_config(encoder->crtc,
246 MDP4_DMA_CONFIG_R_BPC(BPC8) |
247 MDP4_DMA_CONFIG_G_BPC(BPC8) |
248 MDP4_DMA_CONFIG_B_BPC(BPC8) |
249 MDP4_DMA_CONFIG_PACK(0x21));
250 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
251 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
252}
253
254static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
255 .dpms = mdp4_dtv_encoder_dpms,
256 .mode_fixup = mdp4_dtv_encoder_mode_fixup,
257 .mode_set = mdp4_dtv_encoder_mode_set,
258 .prepare = mdp4_dtv_encoder_prepare,
259 .commit = mdp4_dtv_encoder_commit,
260};
261
262long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
263{
264 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
265 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
266}
267
268/* initialize encoder */
269struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
270{
271 struct drm_encoder *encoder = NULL;
272 struct mdp4_dtv_encoder *mdp4_dtv_encoder;
273 int ret;
274
275 mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
276 if (!mdp4_dtv_encoder) {
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 encoder = &mdp4_dtv_encoder->base;
282
283 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
284 DRM_MODE_ENCODER_TMDS);
285 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
286
287 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
288 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
289 dev_err(dev->dev, "failed to get src_clk\n");
290 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
291 goto fail;
292 }
293
294 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
295 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
296 dev_err(dev->dev, "failed to get hdmi_clk\n");
297 ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
298 goto fail;
299 }
300
301 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
302 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
303 dev_err(dev->dev, "failed to get mdp_clk\n");
304 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
305 goto fail;
306 }
307
308 bs_init(mdp4_dtv_encoder);
309
310 return encoder;
311
312fail:
313 if (encoder)
314 mdp4_dtv_encoder_destroy(encoder);
315
316 return ERR_PTR(ret);
317}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 000000000000..7b645f2e837a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
23 .base = { .pixel_format = DRM_FORMAT_ ## name }, \
24 .bpc_a = BPC ## a ## A, \
25 .bpc_r = BPC ## r, \
26 .bpc_g = BPC ## g, \
27 .bpc_b = BPC ## b, \
28 .unpack = { e0, e1, e2, e3 }, \
29 .alpha_enable = alpha, \
30 .unpack_tight = tight, \
31 .cpp = c, \
32 .unpack_count = cnt, \
33 }
34
35#define BPC0A 0
36
37static const struct mdp4_format formats[] = {
38 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
39 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
40 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
41 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
42 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
43 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45};
46
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{
49 int i;
50 for (i = 0; i < ARRAY_SIZE(formats); i++) {
51 const struct mdp4_format *f = &formats[i];
52 if (f->base.pixel_format == format)
53 return &f->base;
54 }
55 return NULL;
56}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..5c6b7fca4edd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22
23struct mdp4_irq_wait {
24 struct mdp4_irq irq;
25 int count;
26};
27
28static DECLARE_WAIT_QUEUE_HEAD(wait_event);
29
30static DEFINE_SPINLOCK(list_lock);
31
32static void update_irq(struct mdp4_kms *mdp4_kms)
33{
34 struct mdp4_irq *irq;
35 uint32_t irqmask = mdp4_kms->vblank_mask;
36
37 BUG_ON(!spin_is_locked(&list_lock));
38
39 list_for_each_entry(irq, &mdp4_kms->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
43}
44
45static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
46{
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp4_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
51}
52
53static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
54{
55 DRM_ERROR("errors: %08x\n", irqstatus);
56}
57
58void mdp4_irq_preinstall(struct msm_kms *kms)
59{
60 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
61 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
62}
63
64int mdp4_irq_postinstall(struct msm_kms *kms)
65{
66 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
67 struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
68
69 INIT_LIST_HEAD(&mdp4_kms->irq_list);
70
71 error_handler->irq = mdp4_irq_error_handler;
72 error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
73 MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
74
75 mdp4_irq_register(mdp4_kms, error_handler);
76
77 return 0;
78}
79
80void mdp4_irq_uninstall(struct msm_kms *kms)
81{
82 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
83 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
84}
85
86irqreturn_t mdp4_irq(struct msm_kms *kms)
87{
88 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
89 struct drm_device *dev = mdp4_kms->dev;
90 struct msm_drm_private *priv = dev->dev_private;
91 struct mdp4_irq *handler, *n;
92 unsigned long flags;
93 unsigned int id;
94 uint32_t status;
95
96 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
97 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
98
99 VERB("status=%08x", status);
100
101 for (id = 0; id < priv->num_crtcs; id++)
102 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
103 drm_handle_vblank(dev, id);
104
105 spin_lock_irqsave(&list_lock, flags);
106 mdp4_kms->in_irq = true;
107 list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
108 if (handler->irqmask & status) {
109 spin_unlock_irqrestore(&list_lock, flags);
110 handler->irq(handler, handler->irqmask & status);
111 spin_lock_irqsave(&list_lock, flags);
112 }
113 }
114 mdp4_kms->in_irq = false;
115 update_irq(mdp4_kms);
116 spin_unlock_irqrestore(&list_lock, flags);
117
118 return IRQ_HANDLED;
119}
120
121int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
122{
123 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
124 unsigned long flags;
125
126 spin_lock_irqsave(&list_lock, flags);
127 mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
128 update_irq(mdp4_kms);
129 spin_unlock_irqrestore(&list_lock, flags);
130
131 return 0;
132}
133
134void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135{
136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
137 unsigned long flags;
138
139 spin_lock_irqsave(&list_lock, flags);
140 mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
141 update_irq(mdp4_kms);
142 spin_unlock_irqrestore(&list_lock, flags);
143}
144
145static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
146{
147 struct mdp4_irq_wait *wait =
148 container_of(irq, struct mdp4_irq_wait, irq);
149 wait->count--;
150 wake_up_all(&wait_event);
151}
152
153void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
154{
155 struct mdp4_irq_wait wait = {
156 .irq = {
157 .irq = wait_irq,
158 .irqmask = irqmask,
159 },
160 .count = 1,
161 };
162 mdp4_irq_register(mdp4_kms, &wait.irq);
163 wait_event(wait_event, (wait.count <= 0));
164 mdp4_irq_unregister(mdp4_kms, &wait.irq);
165}
166
167void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
168{
169 unsigned long flags;
170 bool needs_update = false;
171
172 spin_lock_irqsave(&list_lock, flags);
173
174 if (!irq->registered) {
175 irq->registered = true;
176 list_add(&irq->node, &mdp4_kms->irq_list);
177 needs_update = !mdp4_kms->in_irq;
178 }
179
180 spin_unlock_irqrestore(&list_lock, flags);
181
182 if (needs_update)
183 update_irq_unlocked(mdp4_kms);
184}
185
186void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
187{
188 unsigned long flags;
189 bool needs_update = false;
190
191 spin_lock_irqsave(&list_lock, flags);
192
193 if (irq->registered) {
194 irq->registered = false;
195 list_del(&irq->node);
196 needs_update = !mdp4_kms->in_irq;
197 }
198
199 spin_unlock_irqrestore(&list_lock, flags);
200
201 if (needs_update)
202 update_irq_unlocked(mdp4_kms);
203}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 000000000000..960cd894da78
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25
26static int mdp4_hw_init(struct msm_kms *kms)
27{
28 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
29 struct drm_device *dev = mdp4_kms->dev;
30 uint32_t version, major, minor, dmap_cfg, vg_cfg;
31 unsigned long clk;
32 int ret = 0;
33
34 pm_runtime_get_sync(dev->dev);
35
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37
38 major = FIELD(version, MDP4_VERSION_MAJOR);
39 minor = FIELD(version, MDP4_VERSION_MINOR);
40
41 DBG("found MDP version v%d.%d", major, minor);
42
43 if (major != 4) {
44 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
45 major, minor);
46 ret = -ENXIO;
47 goto out;
48 }
49
50 mdp4_kms->rev = minor;
51
52 if (mdp4_kms->dsi_pll_vdda) {
53 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
54 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
55 1200000, 1200000);
56 if (ret) {
57 dev_err(dev->dev,
58 "failed to set dsi_pll_vdda voltage: %d\n", ret);
59 goto out;
60 }
61 }
62 }
63
64 if (mdp4_kms->dsi_pll_vddio) {
65 if (mdp4_kms->rev == 2) {
66 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
67 1800000, 1800000);
68 if (ret) {
69 dev_err(dev->dev,
70 "failed to set dsi_pll_vddio voltage: %d\n", ret);
71 goto out;
72 }
73 }
74 }
75
76 if (mdp4_kms->rev > 1) {
77 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
79 }
80
81 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
82
83 /* max read pending cmd config, 3 pending requests: */
84 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
85
86 clk = clk_get_rate(mdp4_kms->clk);
87
88 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
89 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
90 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
91 } else {
92 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
93 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
94 }
95
96 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
97
98 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
100
101 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
105
106 if (mdp4_kms->rev >= 2)
107 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
108
109 /* disable CSC matrix / YUV by default: */
110 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
112 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
113 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
115 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
116
117 if (mdp4_kms->rev > 1)
118 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
119
120out:
121 pm_runtime_put_sync(dev->dev);
122
123 return ret;
124}
125
126static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
127 struct drm_encoder *encoder)
128{
129 /* if we had >1 encoder, we'd need something more clever: */
130 return mdp4_dtv_round_pixclk(encoder, rate);
131}
132
133static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
134{
135 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
136 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
137 unsigned i;
138
139 for (i = 0; i < priv->num_crtcs; i++)
140 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
141}
142
143static void mdp4_destroy(struct msm_kms *kms)
144{
145 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
146 kfree(mdp4_kms);
147}
148
149static const struct msm_kms_funcs kms_funcs = {
150 .hw_init = mdp4_hw_init,
151 .irq_preinstall = mdp4_irq_preinstall,
152 .irq_postinstall = mdp4_irq_postinstall,
153 .irq_uninstall = mdp4_irq_uninstall,
154 .irq = mdp4_irq,
155 .enable_vblank = mdp4_enable_vblank,
156 .disable_vblank = mdp4_disable_vblank,
157 .get_format = mdp4_get_format,
158 .round_pixclk = mdp4_round_pixclk,
159 .preclose = mdp4_preclose,
160 .destroy = mdp4_destroy,
161};
162
163int mdp4_disable(struct mdp4_kms *mdp4_kms)
164{
165 DBG("");
166
167 clk_disable_unprepare(mdp4_kms->clk);
168 if (mdp4_kms->pclk)
169 clk_disable_unprepare(mdp4_kms->pclk);
170 clk_disable_unprepare(mdp4_kms->lut_clk);
171
172 return 0;
173}
174
175int mdp4_enable(struct mdp4_kms *mdp4_kms)
176{
177 DBG("");
178
179 clk_prepare_enable(mdp4_kms->clk);
180 if (mdp4_kms->pclk)
181 clk_prepare_enable(mdp4_kms->pclk);
182 clk_prepare_enable(mdp4_kms->lut_clk);
183
184 return 0;
185}
186
187static int modeset_init(struct mdp4_kms *mdp4_kms)
188{
189 struct drm_device *dev = mdp4_kms->dev;
190 struct msm_drm_private *priv = dev->dev_private;
191 struct drm_plane *plane;
192 struct drm_crtc *crtc;
193 struct drm_encoder *encoder;
194 struct drm_connector *connector;
195 int ret;
196
197 /*
198 * NOTE: this is a bit simplistic until we add support
199 * for more than just RGB1->DMA_E->DTV->HDMI
200 */
201
202 /* the CRTCs get constructed with a private plane: */
203 plane = mdp4_plane_init(dev, RGB1, true);
204 if (IS_ERR(plane)) {
205 dev_err(dev->dev, "failed to construct plane for RGB1\n");
206 ret = PTR_ERR(plane);
207 goto fail;
208 }
209
210 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
211 if (IS_ERR(crtc)) {
212 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
213 ret = PTR_ERR(crtc);
214 goto fail;
215 }
216 priv->crtcs[priv->num_crtcs++] = crtc;
217
218 encoder = mdp4_dtv_encoder_init(dev);
219 if (IS_ERR(encoder)) {
220 dev_err(dev->dev, "failed to construct DTV encoder\n");
221 ret = PTR_ERR(encoder);
222 goto fail;
223 }
224 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
225 priv->encoders[priv->num_encoders++] = encoder;
226
227 connector = hdmi_connector_init(dev, encoder);
228 if (IS_ERR(connector)) {
229 dev_err(dev->dev, "failed to construct HDMI connector\n");
230 ret = PTR_ERR(connector);
231 goto fail;
232 }
233 priv->connectors[priv->num_connectors++] = connector;
234
235 return 0;
236
237fail:
238 return ret;
239}
240
241static const char *iommu_ports[] = {
242 "mdp_port0_cb0", "mdp_port1_cb0",
243};
244
245struct msm_kms *mdp4_kms_init(struct drm_device *dev)
246{
247 struct platform_device *pdev = dev->platformdev;
248 struct mdp4_platform_config *config = mdp4_get_config(pdev);
249 struct mdp4_kms *mdp4_kms;
250 struct msm_kms *kms = NULL;
251 int ret;
252
253 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
254 if (!mdp4_kms) {
255 dev_err(dev->dev, "failed to allocate kms\n");
256 ret = -ENOMEM;
257 goto fail;
258 }
259
260 kms = &mdp4_kms->base;
261 kms->funcs = &kms_funcs;
262
263 mdp4_kms->dev = dev;
264
265 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
266 if (IS_ERR(mdp4_kms->mmio)) {
267 ret = PTR_ERR(mdp4_kms->mmio);
268 goto fail;
269 }
270
271 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
272 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
273 mdp4_kms->dsi_pll_vdda = NULL;
274
275 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
276 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
277 mdp4_kms->dsi_pll_vddio = NULL;
278
279 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
280 if (IS_ERR(mdp4_kms->vdd))
281 mdp4_kms->vdd = NULL;
282
283 if (mdp4_kms->vdd) {
284 ret = regulator_enable(mdp4_kms->vdd);
285 if (ret) {
286 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
287 goto fail;
288 }
289 }
290
291 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
292 if (IS_ERR(mdp4_kms->clk)) {
293 dev_err(dev->dev, "failed to get core_clk\n");
294 ret = PTR_ERR(mdp4_kms->clk);
295 goto fail;
296 }
297
298 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
299 if (IS_ERR(mdp4_kms->pclk))
300 mdp4_kms->pclk = NULL;
301
302 // XXX if (rev >= MDP_REV_42) { ???
303 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
304 if (IS_ERR(mdp4_kms->lut_clk)) {
305 dev_err(dev->dev, "failed to get lut_clk\n");
306 ret = PTR_ERR(mdp4_kms->lut_clk);
307 goto fail;
308 }
309
310 clk_set_rate(mdp4_kms->clk, config->max_clk);
311 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
312
313 if (!config->iommu) {
314 dev_err(dev->dev, "no iommu\n");
315 ret = -ENXIO;
316 goto fail;
317 }
318
319 /* make sure things are off before attaching iommu (bootloader could
320 * have left things on, in which case we'll start getting faults if
321 * we don't disable):
322 */
323 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
324 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
325 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
326 mdelay(16);
327
328 ret = msm_iommu_attach(dev, config->iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 goto fail;
332
333 mdp4_kms->id = msm_register_iommu(dev, config->iommu);
334 if (mdp4_kms->id < 0) {
335 ret = mdp4_kms->id;
336 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
337 goto fail;
338 }
339
340 ret = modeset_init(mdp4_kms);
341 if (ret) {
342 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
343 goto fail;
344 }
345
346 return kms;
347
348fail:
349 if (kms)
350 mdp4_destroy(kms);
351 return ERR_PTR(ret);
352}
353
354static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
355{
356 static struct mdp4_platform_config config = {};
357#ifdef CONFIG_OF
358 /* TODO */
359#else
360 if (cpu_is_apq8064())
361 config.max_clk = 266667000;
362 else
363 config.max_clk = 200000000;
364
365 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
366#endif
367 return &config;
368}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 000000000000..1e83554955f3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP4_KMS_H__
19#define __MDP4_KMS_H__
20
21#include <linux/clk.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/consumer.h>
24
25#include "msm_drv.h"
26#include "mdp4.xml.h"
27
28
29/* For transiently registering for different MDP4 irqs that various parts
30 * of the KMS code need during setup/configuration. We these are not
31 * necessarily the same as what drm_vblank_get/put() are requesting, and
32 * the hysteresis in drm_vblank_put() is not necessarily desirable for
33 * internal housekeeping related irq usage.
34 */
35struct mdp4_irq {
36 struct list_head node;
37 uint32_t irqmask;
38 bool registered;
39 void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
40};
41
42struct mdp4_kms {
43 struct msm_kms base;
44
45 struct drm_device *dev;
46
47 int rev;
48
49 /* mapper-id used to request GEM buffer mapped for scanout: */
50 int id;
51
52 void __iomem *mmio;
53
54 struct regulator *dsi_pll_vdda;
55 struct regulator *dsi_pll_vddio;
56 struct regulator *vdd;
57
58 struct clk *clk;
59 struct clk *pclk;
60 struct clk *lut_clk;
61
62 /* irq handling: */
63 bool in_irq;
64 struct list_head irq_list; /* list of mdp4_irq */
65 uint32_t vblank_mask; /* irq bits set for userspace vblank */
66 struct mdp4_irq error_handler;
67};
68#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp4_platform_config {
72 struct iommu_domain *iommu;
73 uint32_t max_clk;
74};
75
76struct mdp4_format {
77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a;
80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count;
83};
84#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
85
86static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
87{
88 msm_writel(data, mdp4_kms->mmio + reg);
89}
90
91static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
92{
93 return msm_readl(mdp4_kms->mmio + reg);
94}
95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
97{
98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
100 case VG2: return MDP4_OVERLAY_FLUSH_VG2;
101 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
102 case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
103 default: return 0;
104 }
105}
106
107static inline uint32_t ovlp2flush(int ovlp)
108{
109 switch (ovlp) {
110 case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
111 case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
112 default: return 0;
113 }
114}
115
116static inline uint32_t dma2irq(enum mdp4_dma dma)
117{
118 switch (dma) {
119 case DMA_P: return MDP4_IRQ_DMA_P_DONE;
120 case DMA_S: return MDP4_IRQ_DMA_S_DONE;
121 case DMA_E: return MDP4_IRQ_DMA_E_DONE;
122 default: return 0;
123 }
124}
125
126static inline uint32_t dma2err(enum mdp4_dma dma)
127{
128 switch (dma) {
129 case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
130 case DMA_S: return 0; // ???
131 case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
132 default: return 0;
133 }
134}
135
136int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms);
138
139void mdp4_irq_preinstall(struct msm_kms *kms);
140int mdp4_irq_postinstall(struct msm_kms *kms);
141void mdp4_irq_uninstall(struct msm_kms *kms);
142irqreturn_t mdp4_irq(struct msm_kms *kms);
143void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
144void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
145void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150
151void mdp4_plane_install_properties(struct drm_plane *plane,
152 struct drm_mode_object *obj);
153void mdp4_plane_set_scanout(struct drm_plane *plane,
154 struct drm_framebuffer *fb);
155int mdp4_plane_mode_set(struct drm_plane *plane,
156 struct drm_crtc *crtc, struct drm_framebuffer *fb,
157 int crtc_x, int crtc_y,
158 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane);
164
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id);
172
173long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
174struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
175
176#ifdef CONFIG_MSM_BUS_SCALING
177static inline int match_dev_name(struct device *dev, void *data)
178{
179 return !strcmp(dev_name(dev), data);
180}
181/* bus scaling data is associated with extra pointless platform devices,
182 * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
183 * to find their pdata to make the bus-scaling stuff work.
184 */
185static inline void *mdp4_find_pdata(const char *devname)
186{
187 struct device *dev;
188 dev = bus_find_device(&platform_bus_type, NULL,
189 (void *)devname, match_dev_name);
190 return dev ? dev->platform_data : NULL;
191}
192#endif
193
194#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 000000000000..3468229d58b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20
21struct mdp4_plane {
22 struct drm_plane base;
23 const char *name;
24
25 enum mpd4_pipe pipe;
26
27 uint32_t nformats;
28 uint32_t formats[32];
29
30 bool enabled;
31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33
34static struct mdp4_kms *get_kms(struct drm_plane *plane)
35{
36 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp4_kms(priv->kms);
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60
61static int mdp4_plane_disable(struct drm_plane *plane)
62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX
65 return 0;
66}
67
68static void mdp4_plane_destroy(struct drm_plane *plane)
69{
70 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
71
72 mdp4_plane_disable(plane);
73 drm_plane_cleanup(plane);
74
75 kfree(mdp4_plane);
76}
77
78/* helper to install properties which are common to planes and crtcs */
79void mdp4_plane_install_properties(struct drm_plane *plane,
80 struct drm_mode_object *obj)
81{
82 // XXX
83}
84
85int mdp4_plane_set_property(struct drm_plane *plane,
86 struct drm_property *property, uint64_t val)
87{
88 // XXX
89 return -EINVAL;
90}
91
92static const struct drm_plane_funcs mdp4_plane_funcs = {
93 .update_plane = mdp4_plane_update,
94 .disable_plane = mdp4_plane_disable,
95 .destroy = mdp4_plane_destroy,
96 .set_property = mdp4_plane_set_property,
97};
98
99void mdp4_plane_set_scanout(struct drm_plane *plane,
100 struct drm_framebuffer *fb)
101{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova;
106
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
108 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
109 MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
110
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
112 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
113 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
114
115 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
116 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
117
118 plane->fb = fb;
119}
120
121#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
122
123int mdp4_plane_mode_set(struct drm_plane *plane,
124 struct drm_crtc *crtc, struct drm_framebuffer *fb,
125 int crtc_x, int crtc_y,
126 unsigned int crtc_w, unsigned int crtc_h,
127 uint32_t src_x, uint32_t src_y,
128 uint32_t src_w, uint32_t src_h)
129{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format;
134 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
136 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
137
138 /* src values are in Q16 fixed point, convert to integer: */
139 src_x = src_x >> 16;
140 src_y = src_y >> 16;
141 src_w = src_w >> 16;
142 src_h = src_h >> 16;
143
144 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */
147 }
148
149 if (src_h != crtc_h) {
150 op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
151 /* TODO calc phasey_step */
152 }
153
154 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
155 MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
156 MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
157
158 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
159 MDP4_PIPE_SRC_XY_X(src_x) |
160 MDP4_PIPE_SRC_XY_Y(src_y));
161
162 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
163 MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
164 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
165
166 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
167 MDP4_PIPE_SRC_XY_X(crtc_x) |
168 MDP4_PIPE_SRC_XY_Y(crtc_y));
169
170 mdp4_plane_set_scanout(plane, fb);
171
172 format = to_mdp4_format(msm_framebuffer_format(fb));
173
174 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
175 MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
176 MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
177 MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
178 MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
179 COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
180 MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
181 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
182 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
183
184 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
185 MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
186 MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
187 MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
188 MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
189
190 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193
194 plane->crtc = crtc;
195
196 return 0;
197}
198
199static const char *pipe_names[] = {
200 "VG1", "VG2",
201 "RGB1", "RGB2", "RGB3",
202 "VG3", "VG4",
203};
204
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe;
209}
210
211/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane)
214{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane;
218 int ret;
219
220 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
221 if (!mdp4_plane) {
222 ret = -ENOMEM;
223 goto fail;
224 }
225
226 plane = &mdp4_plane->base;
227
228 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id];
230
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane);
233
234 mdp4_plane_install_properties(plane, &plane->base);
235
236 return plane;
237
238fail:
239 if (plane)
240 mdp4_plane_destroy(plane);
241
242 return ERR_PTR(ret);
243}
diff --git a/drivers/gpu/drm/msm/msm_connector.c b/drivers/gpu/drm/msm/msm_connector.c
new file mode 100644
index 000000000000..aeea8879e36f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_connector.h"
20
21void msm_connector_init(struct msm_connector *connector,
22 const struct msm_connector_funcs *funcs,
23 struct drm_encoder *encoder)
24{
25 connector->funcs = funcs;
26 connector->encoder = encoder;
27}
28
29struct drm_encoder *msm_connector_attached_encoder(
30 struct drm_connector *connector)
31{
32 struct msm_connector *msm_connector = to_msm_connector(connector);
33 return msm_connector->encoder;
34}
diff --git a/drivers/gpu/drm/msm/msm_connector.h b/drivers/gpu/drm/msm/msm_connector.h
new file mode 100644
index 000000000000..0b41866adc08
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_CONNECTOR_H__
19#define __MSM_CONNECTOR_H__
20
21#include "msm_drv.h"
22
23/*
24 * Base class for MSM connectors. Typically a connector is a bit more
25 * passive. But with the split between (for example) DTV within MDP4,
26 * and HDMI encoder, we really need two parts to an encoder. Instead
27 * what we do is have the part external to the display controller block
28 * in the connector, which is called from the encoder to delegate the
29 * appropriate parts of modeset.
30 */
31
32struct msm_connector;
33
34struct msm_connector_funcs {
35 void (*dpms)(struct msm_connector *connector, int mode);
36 void (*mode_set)(struct msm_connector *connector,
37 struct drm_display_mode *mode);
38};
39
40struct msm_connector {
41 struct drm_connector base;
42 struct drm_encoder *encoder;
43 const struct msm_connector_funcs *funcs;
44};
45#define to_msm_connector(x) container_of(x, struct msm_connector, base)
46
47void msm_connector_init(struct msm_connector *connector,
48 const struct msm_connector_funcs *funcs,
49 struct drm_encoder *encoder);
50
51struct drm_encoder *msm_connector_attached_encoder(
52 struct drm_connector *connector);
53
54static inline struct msm_connector *get_connector(struct drm_encoder *encoder)
55{
56 struct msm_drm_private *priv = encoder->dev->dev_private;
57 int i;
58
59 for (i = 0; i < priv->num_connectors; i++) {
60 struct drm_connector *connector = priv->connectors[i];
61 if (msm_connector_attached_encoder(connector) == encoder)
62 return to_msm_connector(connector);
63 }
64
65 return NULL;
66}
67
68#endif /* __MSM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 000000000000..864c9773636b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev)
24{
25 struct msm_drm_private *priv = dev->dev_private;
26 if (priv->fbdev)
27 drm_fb_helper_hotplug_event(priv->fbdev);
28}
29
30static const struct drm_mode_config_funcs mode_config_funcs = {
31 .fb_create = msm_framebuffer_create,
32 .output_poll_changed = msm_fb_output_poll_changed,
33};
34
35static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
36 unsigned long iova, int flags, void *arg)
37{
38 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
39 return 0;
40}
41
42int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
43{
44 struct msm_drm_private *priv = dev->dev_private;
45 int idx = priv->num_iommus++;
46
47 if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
48 return -EINVAL;
49
50 priv->iommus[idx] = iommu;
51
52 iommu_set_fault_handler(iommu, msm_fault_handler, dev);
53
54 /* need to iommu_attach_device() somewhere?? on resume?? */
55
56 return idx;
57}
58
59int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
60 const char **names, int cnt)
61{
62 int i, ret;
63
64 for (i = 0; i < cnt; i++) {
65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx)
67 continue;
68 ret = iommu_attach_device(iommu, ctx);
69 if (ret) {
70 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
71 return ret;
72 }
73 }
74 return 0;
75}
76
77#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
78static bool reglog = false;
79MODULE_PARM_DESC(reglog, "Enable register read/write logging");
80module_param(reglog, bool, 0600);
81#else
82#define reglog 0
83#endif
84
85void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
86 const char *dbgname)
87{
88 struct resource *res;
89 unsigned long size;
90 void __iomem *ptr;
91
92 if (name)
93 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
94 else
95 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
96
97 if (!res) {
98 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
99 return ERR_PTR(-EINVAL);
100 }
101
102 size = resource_size(res);
103
104 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
105 if (!ptr) {
106 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
107 return ERR_PTR(-ENOMEM);
108 }
109
110 if (reglog)
111 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
112
113 return ptr;
114}
115
116void msm_writel(u32 data, void __iomem *addr)
117{
118 if (reglog)
119 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
120 writel(data, addr);
121}
122
123u32 msm_readl(const void __iomem *addr)
124{
125 u32 val = readl(addr);
126 if (reglog)
127 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
128 return val;
129}
130
131/*
132 * DRM operations:
133 */
134
135static int msm_unload(struct drm_device *dev)
136{
137 struct msm_drm_private *priv = dev->dev_private;
138 struct msm_kms *kms = priv->kms;
139 struct msm_gpu *gpu = priv->gpu;
140
141 drm_kms_helper_poll_fini(dev);
142 drm_mode_config_cleanup(dev);
143 drm_vblank_cleanup(dev);
144
145 pm_runtime_get_sync(dev->dev);
146 drm_irq_uninstall(dev);
147 pm_runtime_put_sync(dev->dev);
148
149 flush_workqueue(priv->wq);
150 destroy_workqueue(priv->wq);
151
152 if (kms) {
153 pm_runtime_disable(dev->dev);
154 kms->funcs->destroy(kms);
155 }
156
157 if (gpu) {
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_suspend(gpu);
160 gpu->funcs->destroy(gpu);
161 mutex_unlock(&dev->struct_mutex);
162 }
163
164 dev->dev_private = NULL;
165
166 kfree(priv);
167
168 return 0;
169}
170
171static int msm_load(struct drm_device *dev, unsigned long flags)
172{
173 struct platform_device *pdev = dev->platformdev;
174 struct msm_drm_private *priv;
175 struct msm_kms *kms;
176 int ret;
177
178 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) {
180 dev_err(dev->dev, "failed to allocate private data\n");
181 return -ENOMEM;
182 }
183
184 dev->dev_private = priv;
185
186 priv->wq = alloc_ordered_workqueue("msm", 0);
187 init_waitqueue_head(&priv->fence_event);
188
189 INIT_LIST_HEAD(&priv->inactive_list);
190
191 drm_mode_config_init(dev);
192
193 kms = mdp4_kms_init(dev);
194 if (IS_ERR(kms)) {
195 /*
196 * NOTE: once we have GPU support, having no kms should not
197 * be considered fatal.. ideally we would still support gpu
198 * and (for example) use dmabuf/prime to share buffers with
199 * imx drm driver on iMX5
200 */
201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms);
203 goto fail;
204 }
205
206 priv->kms = kms;
207
208 if (kms) {
209 pm_runtime_enable(dev->dev);
210 ret = kms->funcs->hw_init(kms);
211 if (ret) {
212 dev_err(dev->dev, "kms hw init failed: %d\n", ret);
213 goto fail;
214 }
215 }
216
217 dev->mode_config.min_width = 0;
218 dev->mode_config.min_height = 0;
219 dev->mode_config.max_width = 2048;
220 dev->mode_config.max_height = 2048;
221 dev->mode_config.funcs = &mode_config_funcs;
222
223 ret = drm_vblank_init(dev, 1);
224 if (ret < 0) {
225 dev_err(dev->dev, "failed to initialize vblank\n");
226 goto fail;
227 }
228
229 pm_runtime_get_sync(dev->dev);
230 ret = drm_irq_install(dev);
231 pm_runtime_put_sync(dev->dev);
232 if (ret < 0) {
233 dev_err(dev->dev, "failed to install IRQ handler\n");
234 goto fail;
235 }
236
237 platform_set_drvdata(pdev, dev);
238
239#ifdef CONFIG_DRM_MSM_FBDEV
240 priv->fbdev = msm_fbdev_init(dev);
241#endif
242
243 drm_kms_helper_poll_init(dev);
244
245 return 0;
246
247fail:
248 msm_unload(dev);
249 return ret;
250}
251
252static void load_gpu(struct drm_device *dev)
253{
254 struct msm_drm_private *priv = dev->dev_private;
255 struct msm_gpu *gpu;
256
257 if (priv->gpu)
258 return;
259
260 mutex_lock(&dev->struct_mutex);
261 gpu = a3xx_gpu_init(dev);
262 if (IS_ERR(gpu)) {
263 dev_warn(dev->dev, "failed to load a3xx gpu\n");
264 gpu = NULL;
265 /* not fatal */
266 }
267 mutex_unlock(&dev->struct_mutex);
268
269 if (gpu) {
270 int ret;
271 gpu->funcs->pm_resume(gpu);
272 ret = gpu->funcs->hw_init(gpu);
273 if (ret) {
274 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
275 gpu->funcs->destroy(gpu);
276 gpu = NULL;
277 }
278 }
279
280 priv->gpu = gpu;
281}
282
283static int msm_open(struct drm_device *dev, struct drm_file *file)
284{
285 struct msm_file_private *ctx;
286
287 /* For now, load gpu on open.. to avoid the requirement of having
288 * firmware in the initrd.
289 */
290 load_gpu(dev);
291
292 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
293 if (!ctx)
294 return -ENOMEM;
295
296 file->driver_priv = ctx;
297
298 return 0;
299}
300
301static void msm_preclose(struct drm_device *dev, struct drm_file *file)
302{
303 struct msm_drm_private *priv = dev->dev_private;
304 struct msm_file_private *ctx = file->driver_priv;
305 struct msm_kms *kms = priv->kms;
306
307 if (kms)
308 kms->funcs->preclose(kms, file);
309
310 mutex_lock(&dev->struct_mutex);
311 if (ctx == priv->lastctx)
312 priv->lastctx = NULL;
313 mutex_unlock(&dev->struct_mutex);
314
315 kfree(ctx);
316}
317
318static void msm_lastclose(struct drm_device *dev)
319{
320 struct msm_drm_private *priv = dev->dev_private;
321 if (priv->fbdev) {
322 drm_modeset_lock_all(dev);
323 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
324 drm_modeset_unlock_all(dev);
325 }
326}
327
328static irqreturn_t msm_irq(DRM_IRQ_ARGS)
329{
330 struct drm_device *dev = arg;
331 struct msm_drm_private *priv = dev->dev_private;
332 struct msm_kms *kms = priv->kms;
333 BUG_ON(!kms);
334 return kms->funcs->irq(kms);
335}
336
337static void msm_irq_preinstall(struct drm_device *dev)
338{
339 struct msm_drm_private *priv = dev->dev_private;
340 struct msm_kms *kms = priv->kms;
341 BUG_ON(!kms);
342 kms->funcs->irq_preinstall(kms);
343}
344
345static int msm_irq_postinstall(struct drm_device *dev)
346{
347 struct msm_drm_private *priv = dev->dev_private;
348 struct msm_kms *kms = priv->kms;
349 BUG_ON(!kms);
350 return kms->funcs->irq_postinstall(kms);
351}
352
353static void msm_irq_uninstall(struct drm_device *dev)
354{
355 struct msm_drm_private *priv = dev->dev_private;
356 struct msm_kms *kms = priv->kms;
357 BUG_ON(!kms);
358 kms->funcs->irq_uninstall(kms);
359}
360
361static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
362{
363 struct msm_drm_private *priv = dev->dev_private;
364 struct msm_kms *kms = priv->kms;
365 if (!kms)
366 return -ENXIO;
367 DBG("dev=%p, crtc=%d", dev, crtc_id);
368 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
369}
370
371static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
372{
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_kms *kms = priv->kms;
375 if (!kms)
376 return;
377 DBG("dev=%p, crtc=%d", dev, crtc_id);
378 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
379}
380
381/*
382 * DRM debugfs:
383 */
384
385#ifdef CONFIG_DEBUG_FS
386static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
387{
388 struct msm_drm_private *priv = dev->dev_private;
389 struct msm_gpu *gpu = priv->gpu;
390
391 if (gpu) {
392 seq_printf(m, "%s Status:\n", gpu->name);
393 gpu->funcs->show(gpu, m);
394 }
395
396 return 0;
397}
398
399static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
400{
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gpu *gpu = priv->gpu;
403
404 if (gpu) {
405 seq_printf(m, "Active Objects (%s):\n", gpu->name);
406 msm_gem_describe_objects(&gpu->active_list, m);
407 }
408
409 seq_printf(m, "Inactive Objects:\n");
410 msm_gem_describe_objects(&priv->inactive_list, m);
411
412 return 0;
413}
414
415static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
416{
417 return drm_mm_dump_table(m, dev->mm_private);
418}
419
420static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
421{
422 struct msm_drm_private *priv = dev->dev_private;
423 struct drm_framebuffer *fb, *fbdev_fb = NULL;
424
425 if (priv->fbdev) {
426 seq_printf(m, "fbcon ");
427 fbdev_fb = priv->fbdev->fb;
428 msm_framebuffer_describe(fbdev_fb, m);
429 }
430
431 mutex_lock(&dev->mode_config.fb_lock);
432 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
433 if (fb == fbdev_fb)
434 continue;
435
436 seq_printf(m, "user ");
437 msm_framebuffer_describe(fb, m);
438 }
439 mutex_unlock(&dev->mode_config.fb_lock);
440
441 return 0;
442}
443
444static int show_locked(struct seq_file *m, void *arg)
445{
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 int (*show)(struct drm_device *dev, struct seq_file *m) =
449 node->info_ent->data;
450 int ret;
451
452 ret = mutex_lock_interruptible(&dev->struct_mutex);
453 if (ret)
454 return ret;
455
456 ret = show(dev, m);
457
458 mutex_unlock(&dev->struct_mutex);
459
460 return ret;
461}
462
463static struct drm_info_list msm_debugfs_list[] = {
464 {"gpu", show_locked, 0, msm_gpu_show},
465 {"gem", show_locked, 0, msm_gem_show},
466 { "mm", show_locked, 0, msm_mm_show },
467 { "fb", show_locked, 0, msm_fb_show },
468};
469
470static int msm_debugfs_init(struct drm_minor *minor)
471{
472 struct drm_device *dev = minor->dev;
473 int ret;
474
475 ret = drm_debugfs_create_files(msm_debugfs_list,
476 ARRAY_SIZE(msm_debugfs_list),
477 minor->debugfs_root, minor);
478
479 if (ret) {
480 dev_err(dev->dev, "could not install msm_debugfs_list\n");
481 return ret;
482 }
483
484 return ret;
485}
486
487static void msm_debugfs_cleanup(struct drm_minor *minor)
488{
489 drm_debugfs_remove_files(msm_debugfs_list,
490 ARRAY_SIZE(msm_debugfs_list), minor);
491}
492#endif
493
494/*
495 * Fences:
496 */
497
498int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout)
500{
501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret;
506
507 if (time_after(start_jiffies, timeout_jiffies))
508 remaining_jiffies = 0;
509 else
510 remaining_jiffies = timeout_jiffies - start_jiffies;
511
512 ret = wait_event_interruptible_timeout(priv->fence_event,
513 priv->completed_fence >= fence,
514 remaining_jiffies);
515 if (ret == 0) {
516 DBG("timeout waiting for fence: %u (completed: %u)",
517 fence, priv->completed_fence);
518 ret = -ETIMEDOUT;
519 } else if (ret != -ERESTARTSYS) {
520 ret = 0;
521 }
522
523 return ret;
524}
525
526/* call under struct_mutex */
527void msm_update_fence(struct drm_device *dev, uint32_t fence)
528{
529 struct msm_drm_private *priv = dev->dev_private;
530
531 if (fence > priv->completed_fence) {
532 priv->completed_fence = fence;
533 wake_up_all(&priv->fence_event);
534 }
535}
536
537/*
538 * DRM ioctls:
539 */
540
541static int msm_ioctl_get_param(struct drm_device *dev, void *data,
542 struct drm_file *file)
543{
544 struct msm_drm_private *priv = dev->dev_private;
545 struct drm_msm_param *args = data;
546 struct msm_gpu *gpu;
547
548 /* for now, we just have 3d pipe.. eventually this would need to
549 * be more clever to dispatch to appropriate gpu module:
550 */
551 if (args->pipe != MSM_PIPE_3D0)
552 return -EINVAL;
553
554 gpu = priv->gpu;
555
556 if (!gpu)
557 return -ENXIO;
558
559 return gpu->funcs->get_param(gpu, args->param, &args->value);
560}
561
562static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
563 struct drm_file *file)
564{
565 struct drm_msm_gem_new *args = data;
566 return msm_gem_new_handle(dev, file, args->size,
567 args->flags, &args->handle);
568}
569
570#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
571
572static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
573 struct drm_file *file)
574{
575 struct drm_msm_gem_cpu_prep *args = data;
576 struct drm_gem_object *obj;
577 int ret;
578
579 obj = drm_gem_object_lookup(dev, file, args->handle);
580 if (!obj)
581 return -ENOENT;
582
583 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
584
585 drm_gem_object_unreference_unlocked(obj);
586
587 return ret;
588}
589
590static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
591 struct drm_file *file)
592{
593 struct drm_msm_gem_cpu_fini *args = data;
594 struct drm_gem_object *obj;
595 int ret;
596
597 obj = drm_gem_object_lookup(dev, file, args->handle);
598 if (!obj)
599 return -ENOENT;
600
601 ret = msm_gem_cpu_fini(obj);
602
603 drm_gem_object_unreference_unlocked(obj);
604
605 return ret;
606}
607
608static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
609 struct drm_file *file)
610{
611 struct drm_msm_gem_info *args = data;
612 struct drm_gem_object *obj;
613 int ret = 0;
614
615 if (args->pad)
616 return -EINVAL;
617
618 obj = drm_gem_object_lookup(dev, file, args->handle);
619 if (!obj)
620 return -ENOENT;
621
622 args->offset = msm_gem_mmap_offset(obj);
623
624 drm_gem_object_unreference_unlocked(obj);
625
626 return ret;
627}
628
629static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
630 struct drm_file *file)
631{
632 struct drm_msm_wait_fence *args = data;
633 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
634}
635
636static const struct drm_ioctl_desc msm_ioctls[] = {
637 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
638 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
639 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
640 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
641 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
642 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
643 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
644};
645
646static const struct vm_operations_struct vm_ops = {
647 .fault = msm_gem_fault,
648 .open = drm_gem_vm_open,
649 .close = drm_gem_vm_close,
650};
651
652static const struct file_operations fops = {
653 .owner = THIS_MODULE,
654 .open = drm_open,
655 .release = drm_release,
656 .unlocked_ioctl = drm_ioctl,
657#ifdef CONFIG_COMPAT
658 .compat_ioctl = drm_compat_ioctl,
659#endif
660 .poll = drm_poll,
661 .read = drm_read,
662 .llseek = no_llseek,
663 .mmap = msm_gem_mmap,
664};
665
666static struct drm_driver msm_driver = {
667 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
668 .load = msm_load,
669 .unload = msm_unload,
670 .open = msm_open,
671 .preclose = msm_preclose,
672 .lastclose = msm_lastclose,
673 .irq_handler = msm_irq,
674 .irq_preinstall = msm_irq_preinstall,
675 .irq_postinstall = msm_irq_postinstall,
676 .irq_uninstall = msm_irq_uninstall,
677 .get_vblank_counter = drm_vblank_count,
678 .enable_vblank = msm_enable_vblank,
679 .disable_vblank = msm_disable_vblank,
680 .gem_free_object = msm_gem_free_object,
681 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup,
688#endif
689 .ioctls = msm_ioctls,
690 .num_ioctls = DRM_MSM_NUM_IOCTLS,
691 .fops = &fops,
692 .name = "msm",
693 .desc = "MSM Snapdragon DRM",
694 .date = "20130625",
695 .major = 1,
696 .minor = 0,
697};
698
699#ifdef CONFIG_PM_SLEEP
700static int msm_pm_suspend(struct device *dev)
701{
702 struct drm_device *ddev = dev_get_drvdata(dev);
703
704 drm_kms_helper_poll_disable(ddev);
705
706 return 0;
707}
708
709static int msm_pm_resume(struct device *dev)
710{
711 struct drm_device *ddev = dev_get_drvdata(dev);
712
713 drm_kms_helper_poll_enable(ddev);
714
715 return 0;
716}
717#endif
718
719static const struct dev_pm_ops msm_pm_ops = {
720 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
721};
722
723/*
724 * Platform driver:
725 */
726
727static int msm_pdev_probe(struct platform_device *pdev)
728{
729 return drm_platform_init(&msm_driver, pdev);
730}
731
732static int msm_pdev_remove(struct platform_device *pdev)
733{
734 drm_platform_exit(&msm_driver, pdev);
735
736 return 0;
737}
738
739static const struct platform_device_id msm_id[] = {
740 { "mdp", 0 },
741 { }
742};
743
744static struct platform_driver msm_platform_driver = {
745 .probe = msm_pdev_probe,
746 .remove = msm_pdev_remove,
747 .driver = {
748 .owner = THIS_MODULE,
749 .name = "msm",
750 .pm = &msm_pm_ops,
751 },
752 .id_table = msm_id,
753};
754
755static int __init msm_drm_register(void)
756{
757 DBG("init");
758 hdmi_register();
759 a3xx_register();
760 return platform_driver_register(&msm_platform_driver);
761}
762
763static void __exit msm_drm_unregister(void)
764{
765 DBG("fini");
766 platform_driver_unregister(&msm_platform_driver);
767 hdmi_unregister();
768 a3xx_unregister();
769}
770
771module_init(msm_drm_register);
772module_exit(msm_drm_unregister);
773
774MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
775MODULE_DESCRIPTION("MSM DRM Driver");
776MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 000000000000..34c36b2911d9
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRV_H__
19#define __MSM_DRV_H__
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm.h>
27#include <linux/pm_runtime.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/iommu.h>
31#include <linux/types.h>
32#include <asm/sizes.h>
33
34#ifndef CONFIG_OF
35#include <mach/board.h>
36#include <mach/socinfo.h>
37#include <mach/iommu_domains.h>
38#endif
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc_helper.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/msm_drm.h>
44
45struct msm_kms;
46struct msm_gpu;
47
48#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
49
50struct msm_file_private {
51 /* currently we don't do anything useful with this.. but when
52 * per-context address spaces are supported we'd keep track of
53 * the context's page-tables here.
54 */
55 int dummy;
56};
57
58struct msm_drm_private {
59
60 struct msm_kms *kms;
61
62 /* when we have more than one 'msm_gpu' these need to be an array: */
63 struct msm_gpu *gpu;
64 struct msm_file_private *lastctx;
65
66 struct drm_fb_helper *fbdev;
67
68 uint32_t next_fence, completed_fence;
69 wait_queue_head_t fence_event;
70
71 /* list of GEM objects: */
72 struct list_head inactive_list;
73
74 struct workqueue_struct *wq;
75
76 /* registered IOMMU domains: */
77 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS];
79
80 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8];
82
83 unsigned int num_encoders;
84 struct drm_encoder *encoders[8];
85
86 unsigned int num_connectors;
87 struct drm_connector *connectors[8];
88};
89
90struct msm_format {
91 uint32_t pixel_format;
92};
93
94/* As there are different display controller blocks depending on the
95 * snapdragon version, the kms support is split out and the appropriate
96 * implementation is loaded at runtime. The kms module is responsible
97 * for constructing the appropriate planes/crtcs/encoders/connectors.
98 */
99struct msm_kms_funcs {
100 /* hw initialization: */
101 int (*hw_init)(struct msm_kms *kms);
102 /* irq handling: */
103 void (*irq_preinstall)(struct msm_kms *kms);
104 int (*irq_postinstall)(struct msm_kms *kms);
105 void (*irq_uninstall)(struct msm_kms *kms);
106 irqreturn_t (*irq)(struct msm_kms *kms);
107 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
108 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
109 /* misc: */
110 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
111 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
112 struct drm_encoder *encoder);
113 /* cleanup: */
114 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
115 void (*destroy)(struct msm_kms *kms);
116};
117
118struct msm_kms {
119 const struct msm_kms_funcs *funcs;
120};
121
122struct msm_kms *mdp4_kms_init(struct drm_device *dev);
123
124int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
125int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
126 const char **names, int cnt);
127
128int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
129 struct timespec *timeout);
130void msm_update_fence(struct drm_device *dev, uint32_t fence);
131
132int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
133 struct drm_file *file);
134
135int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
136int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
137uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
138int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
139 uint32_t *iova);
140int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
141void msm_gem_put_iova(struct drm_gem_object *obj, int id);
142int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
143 struct drm_mode_create_dumb *args);
144int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
145 uint32_t handle);
146int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
147 uint32_t handle, uint64_t *offset);
148void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
149void *msm_gem_vaddr(struct drm_gem_object *obj);
150int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
151 struct work_struct *work);
152void msm_gem_move_to_active(struct drm_gem_object *obj,
153 struct msm_gpu *gpu, uint32_t fence);
154void msm_gem_move_to_inactive(struct drm_gem_object *obj);
155int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
156 struct timespec *timeout);
157int msm_gem_cpu_fini(struct drm_gem_object *obj);
158void msm_gem_free_object(struct drm_gem_object *obj);
159int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
160 uint32_t size, uint32_t flags, uint32_t *handle);
161struct drm_gem_object *msm_gem_new(struct drm_device *dev,
162 uint32_t size, uint32_t flags);
163
164struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
165const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
166struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
167 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
168struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
169 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
170
171struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
172
173struct drm_connector *hdmi_connector_init(struct drm_device *dev,
174 struct drm_encoder *encoder);
175void __init hdmi_register(void);
176void __exit hdmi_unregister(void);
177
178#ifdef CONFIG_DEBUG_FS
179void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
180void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
181void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
182#endif
183
184void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
185 const char *dbgname);
186void msm_writel(u32 data, void __iomem *addr);
187u32 msm_readl(const void __iomem *addr);
188
189#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
190#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
191
192static inline int align_pitch(int width, int bpp)
193{
194 int bytespp = (bpp + 7) / 8;
195 /* adreno needs pitch aligned to 32 pixels: */
196 return bytespp * ALIGN(width, 32);
197}
198
199/* for the generated headers: */
200#define INVALID_IDX(idx) ({BUG(); 0;})
201#define fui(x) ({BUG(); 0;})
202#define util_float_to_half(x) ({BUG(); 0;})
203
204
205#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
206
207/* for conditionally setting boolean flag(s): */
208#define COND(bool, val) ((bool) ? (val) : 0)
209
210
211#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 000000000000..0286c0eeb10c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_crtc_helper.h"
22
23struct msm_framebuffer {
24 struct drm_framebuffer base;
25 const struct msm_format *format;
26 struct drm_gem_object *planes[2];
27};
28#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
29
30
31static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
32 struct drm_file *file_priv,
33 unsigned int *handle)
34{
35 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
36 return drm_gem_handle_create(file_priv,
37 msm_fb->planes[0], handle);
38}
39
40static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
41{
42 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
43 int i, n = drm_format_num_planes(fb->pixel_format);
44
45 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
46
47 drm_framebuffer_cleanup(fb);
48
49 for (i = 0; i < n; i++) {
50 struct drm_gem_object *bo = msm_fb->planes[i];
51 if (bo)
52 drm_gem_object_unreference_unlocked(bo);
53 }
54
55 kfree(msm_fb);
56}
57
58static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
59 struct drm_file *file_priv, unsigned flags, unsigned color,
60 struct drm_clip_rect *clips, unsigned num_clips)
61{
62 return 0;
63}
64
65static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
66 .create_handle = msm_framebuffer_create_handle,
67 .destroy = msm_framebuffer_destroy,
68 .dirty = msm_framebuffer_dirty,
69};
70
71#ifdef CONFIG_DEBUG_FS
72void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
73{
74 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
75 int i, n = drm_format_num_planes(fb->pixel_format);
76
77 seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
78 fb->width, fb->height, (char *)&fb->pixel_format,
79 fb->refcount.refcount.counter, fb->base.id);
80
81 for (i = 0; i < n; i++) {
82 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
83 i, fb->offsets[i], fb->pitches[i]);
84 msm_gem_describe(msm_fb->planes[i], m);
85 }
86}
87#endif
88
89struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
90{
91 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
92 return msm_fb->planes[plane];
93}
94
95const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 return msm_fb->format;
99}
100
101struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
102 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
103{
104 struct drm_gem_object *bos[4] = {0};
105 struct drm_framebuffer *fb;
106 int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
107
108 for (i = 0; i < n; i++) {
109 bos[i] = drm_gem_object_lookup(dev, file,
110 mode_cmd->handles[i]);
111 if (!bos[i]) {
112 ret = -ENXIO;
113 goto out_unref;
114 }
115 }
116
117 fb = msm_framebuffer_init(dev, mode_cmd, bos);
118 if (IS_ERR(fb)) {
119 ret = PTR_ERR(fb);
120 goto out_unref;
121 }
122
123 return fb;
124
125out_unref:
126 for (i = 0; i < n; i++)
127 drm_gem_object_unreference_unlocked(bos[i]);
128 return ERR_PTR(ret);
129}
130
131struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
132 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
133{
134 struct msm_drm_private *priv = dev->dev_private;
135 struct msm_kms *kms = priv->kms;
136 struct msm_framebuffer *msm_fb;
137 struct drm_framebuffer *fb = NULL;
138 const struct msm_format *format;
139 int ret, i, n;
140 unsigned int hsub, vsub;
141
142 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
143 dev, mode_cmd, mode_cmd->width, mode_cmd->height,
144 (char *)&mode_cmd->pixel_format);
145
146 n = drm_format_num_planes(mode_cmd->pixel_format);
147 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
148 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
149
150 format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
151 if (!format) {
152 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
153 (char *)&mode_cmd->pixel_format);
154 ret = -EINVAL;
155 goto fail;
156 }
157
158 msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
159 if (!msm_fb) {
160 ret = -ENOMEM;
161 goto fail;
162 }
163
164 fb = &msm_fb->base;
165
166 msm_fb->format = format;
167
168 for (i = 0; i < n; i++) {
169 unsigned int width = mode_cmd->width / (i ? hsub : 1);
170 unsigned int height = mode_cmd->height / (i ? vsub : 1);
171 unsigned int min_size;
172
173 min_size = (height - 1) * mode_cmd->pitches[i]
174 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
175 + mode_cmd->offsets[i];
176
177 if (bos[i]->size < min_size) {
178 ret = -EINVAL;
179 goto fail;
180 }
181
182 msm_fb->planes[i] = bos[i];
183 }
184
185 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
186
187 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
188 if (ret) {
189 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
190 goto fail;
191 }
192
193 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
194
195 return fb;
196
197fail:
198 if (fb)
199 msm_framebuffer_destroy(fb);
200
201 return ERR_PTR(ret);
202}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 000000000000..6c6d7d4c9b4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_fb_helper.h"
22
23/*
24 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
25 */
26
27#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
28
29struct msm_fbdev {
30 struct drm_fb_helper base;
31 struct drm_framebuffer *fb;
32 struct drm_gem_object *bo;
33};
34
35static struct fb_ops msm_fb_ops = {
36 .owner = THIS_MODULE,
37
38 /* Note: to properly handle manual update displays, we wrap the
39 * basic fbdev ops which write to the framebuffer
40 */
41 .fb_read = fb_sys_read,
42 .fb_write = fb_sys_write,
43 .fb_fillrect = sys_fillrect,
44 .fb_copyarea = sys_copyarea,
45 .fb_imageblit = sys_imageblit,
46
47 .fb_check_var = drm_fb_helper_check_var,
48 .fb_set_par = drm_fb_helper_set_par,
49 .fb_pan_display = drm_fb_helper_pan_display,
50 .fb_blank = drm_fb_helper_blank,
51 .fb_setcmap = drm_fb_helper_setcmap,
52};
53
54static int msm_fbdev_create(struct drm_fb_helper *helper,
55 struct drm_fb_helper_surface_size *sizes)
56{
57 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
58 struct drm_device *dev = helper->dev;
59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr;
63 int ret, size;
64
65 /* only doing ARGB32 since this is what is needed to alpha-blend
66 * with video overlays:
67 */
68 sizes->surface_bpp = 32;
69 sizes->surface_depth = 32;
70
71 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
72 sizes->surface_height, sizes->surface_bpp,
73 sizes->fb_width, sizes->fb_height);
74
75 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
76 sizes->surface_depth);
77
78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height;
80
81 mode_cmd.pitches[0] = align_pitch(
82 mode_cmd.width, sizes->surface_bpp);
83
84 /* allocate backing bo */
85 size = mode_cmd.pitches[0] * mode_cmd.height;
86 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
87 mutex_lock(&dev->struct_mutex);
88 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
89 mutex_unlock(&dev->struct_mutex);
90 if (IS_ERR(fbdev->bo)) {
91 ret = PTR_ERR(fbdev->bo);
92 fbdev->bo = NULL;
93 dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
94 goto fail;
95 }
96
97 fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
98 if (IS_ERR(fb)) {
99 dev_err(dev->dev, "failed to allocate fb\n");
100 /* note: if fb creation failed, we can't rely on fb destroy
101 * to unref the bo:
102 */
103 drm_gem_object_unreference(fbdev->bo);
104 ret = PTR_ERR(fb);
105 goto fail;
106 }
107
108 mutex_lock(&dev->struct_mutex);
109
110 /* TODO implement our own fb_mmap so we don't need this: */
111 msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
112
113 fbi = framebuffer_alloc(0, dev->dev);
114 if (!fbi) {
115 dev_err(dev->dev, "failed to allocate fb info\n");
116 ret = -ENOMEM;
117 goto fail_unlock;
118 }
119
120 DBG("fbi=%p, dev=%p", fbi, dev);
121
122 fbdev->fb = fb;
123 helper->fb = fb;
124 helper->fbdev = fbi;
125
126 fbi->par = helper;
127 fbi->flags = FBINFO_DEFAULT;
128 fbi->fbops = &msm_fb_ops;
129
130 strcpy(fbi->fix.id, "msm");
131
132 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
133 if (ret) {
134 ret = -ENOMEM;
135 goto fail_unlock;
136 }
137
138 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
139 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
140
141 dev->mode_config.fb_base = paddr;
142
143 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
144 fbi->screen_size = fbdev->bo->size;
145 fbi->fix.smem_start = paddr;
146 fbi->fix.smem_len = fbdev->bo->size;
147
148 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
149 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
150
151 mutex_unlock(&dev->struct_mutex);
152
153 return 0;
154
155fail_unlock:
156 mutex_unlock(&dev->struct_mutex);
157fail:
158
159 if (ret) {
160 if (fbi)
161 framebuffer_release(fbi);
162 if (fb) {
163 drm_framebuffer_unregister_private(fb);
164 drm_framebuffer_remove(fb);
165 }
166 }
167
168 return ret;
169}
170
171static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
172 u16 red, u16 green, u16 blue, int regno)
173{
174 DBG("fbdev: set gamma");
175}
176
177static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
178 u16 *red, u16 *green, u16 *blue, int regno)
179{
180 DBG("fbdev: get gamma");
181}
182
183static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
184 .gamma_set = msm_crtc_fb_gamma_set,
185 .gamma_get = msm_crtc_fb_gamma_get,
186 .fb_probe = msm_fbdev_create,
187};
188
189/* initialize fbdev helper */
190struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
191{
192 struct msm_drm_private *priv = dev->dev_private;
193 struct msm_fbdev *fbdev = NULL;
194 struct drm_fb_helper *helper;
195 int ret = 0;
196
197 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
198 if (!fbdev)
199 goto fail;
200
201 helper = &fbdev->base;
202
203 helper->funcs = &msm_fb_helper_funcs;
204
205 ret = drm_fb_helper_init(dev, helper,
206 priv->num_crtcs, priv->num_connectors);
207 if (ret) {
208 dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
209 goto fail;
210 }
211
212 drm_fb_helper_single_add_all_connectors(helper);
213
214 /* disable all the possible outputs/crtcs before entering KMS mode */
215 drm_helper_disable_unused_functions(dev);
216
217 drm_fb_helper_initial_config(helper, 32);
218
219 priv->fbdev = helper;
220
221 return helper;
222
223fail:
224 kfree(fbdev);
225 return NULL;
226}
227
228void msm_fbdev_free(struct drm_device *dev)
229{
230 struct msm_drm_private *priv = dev->dev_private;
231 struct drm_fb_helper *helper = priv->fbdev;
232 struct msm_fbdev *fbdev;
233 struct fb_info *fbi;
234
235 DBG();
236
237 fbi = helper->fbdev;
238
239 /* only cleanup framebuffer if it is present */
240 if (fbi) {
241 unregister_framebuffer(fbi);
242 framebuffer_release(fbi);
243 }
244
245 drm_fb_helper_fini(helper);
246
247 fbdev = to_msm_fbdev(priv->fbdev);
248
249 /* this will free the backing object */
250 if (fbdev->fb) {
251 drm_framebuffer_unregister_private(fbdev->fb);
252 drm_framebuffer_remove(fbdev->fb);
253 }
254
255 kfree(fbdev);
256
257 priv->fbdev = NULL;
258}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 000000000000..6b5a6c8c7658
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20
21#include "msm_drv.h"
22#include "msm_gem.h"
23#include "msm_gpu.h"
24
25
26/* called with dev->struct_mutex held */
27static struct page **get_pages(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30
31 if (!msm_obj->pages) {
32 struct drm_device *dev = obj->dev;
33 struct page **p = drm_gem_get_pages(obj, 0);
34 int npages = obj->size >> PAGE_SHIFT;
35
36 if (IS_ERR(p)) {
37 dev_err(dev->dev, "could not get pages: %ld\n",
38 PTR_ERR(p));
39 return p;
40 }
41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) {
44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM);
46 }
47
48 msm_obj->pages = p;
49
50 /* For non-cached buffers, ensure the new pages are clean
51 * because display controller, GPU, etc. are not coherent:
52 */
53 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 }
57
58 return msm_obj->pages;
59}
60
61static void put_pages(struct drm_gem_object *obj)
62{
63 struct msm_gem_object *msm_obj = to_msm_bo(obj);
64
65 if (msm_obj->pages) {
66 /* For non-cached buffers, ensure the new pages are clean
67 * because display controller, GPU, etc. are not coherent:
68 */
69 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 sg_free_table(msm_obj->sgt);
73 kfree(msm_obj->sgt);
74
75 drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 msm_obj->pages = NULL;
77 }
78}
79
80int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma)
82{
83 struct msm_gem_object *msm_obj = to_msm_bo(obj);
84
85 vma->vm_flags &= ~VM_PFNMAP;
86 vma->vm_flags |= VM_MIXEDMAP;
87
88 if (msm_obj->flags & MSM_BO_WC) {
89 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 } else {
93 /*
94 * Shunt off cached objs to shmem file so they have their own
95 * address_space (so unmap_mapping_range does what we want,
96 * in particular in the case of mmap'd dmabufs)
97 */
98 fput(vma->vm_file);
99 get_file(obj->filp);
100 vma->vm_pgoff = 0;
101 vma->vm_file = obj->filp;
102
103 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 }
105
106 return 0;
107}
108
109int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110{
111 int ret;
112
113 ret = drm_gem_mmap(filp, vma);
114 if (ret) {
115 DBG("mmap failed: %d", ret);
116 return ret;
117 }
118
119 return msm_gem_mmap_obj(vma->vm_private_data, vma);
120}
121
122int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123{
124 struct drm_gem_object *obj = vma->vm_private_data;
125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 struct drm_device *dev = obj->dev;
127 struct page **pages;
128 unsigned long pfn;
129 pgoff_t pgoff;
130 int ret;
131
132 /* Make sure we don't parallel update on a fault, nor move or remove
133 * something from beneath our feet
134 */
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
136 if (ret)
137 goto out;
138
139 /* make sure we have pages attached now */
140 pages = get_pages(obj);
141 if (IS_ERR(pages)) {
142 ret = PTR_ERR(pages);
143 goto out_unlock;
144 }
145
146 /* We don't use vmf->pgoff since that has the fake offset: */
147 pgoff = ((unsigned long)vmf->virtual_address -
148 vma->vm_start) >> PAGE_SHIFT;
149
150 pfn = page_to_pfn(msm_obj->pages[pgoff]);
151
152 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 pfn, pfn << PAGE_SHIFT);
154
155 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156
157out_unlock:
158 mutex_unlock(&dev->struct_mutex);
159out:
160 switch (ret) {
161 case -EAGAIN:
162 set_need_resched();
163 case 0:
164 case -ERESTARTSYS:
165 case -EINTR:
166 return VM_FAULT_NOPAGE;
167 case -ENOMEM:
168 return VM_FAULT_OOM;
169 default:
170 return VM_FAULT_SIGBUS;
171 }
172}
173
174/** get mmap offset */
175static uint64_t mmap_offset(struct drm_gem_object *obj)
176{
177 struct drm_device *dev = obj->dev;
178 int ret;
179
180 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181
182 /* Make it mmapable */
183 ret = drm_gem_create_mmap_offset(obj);
184
185 if (ret) {
186 dev_err(dev->dev, "could not allocate mmap offset\n");
187 return 0;
188 }
189
190 return drm_vma_node_offset_addr(&obj->vma_node);
191}
192
193uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194{
195 uint64_t offset;
196 mutex_lock(&obj->dev->struct_mutex);
197 offset = mmap_offset(obj);
198 mutex_unlock(&obj->dev->struct_mutex);
199 return offset;
200}
201
202/* helpers for dealing w/ iommu: */
203static int map_range(struct iommu_domain *domain, unsigned int iova,
204 struct sg_table *sgt, unsigned int len, int prot)
205{
206 struct scatterlist *sg;
207 unsigned int da = iova;
208 unsigned int i, j;
209 int ret;
210
211 if (!domain || !sgt)
212 return -EINVAL;
213
214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 u32 pa = sg_phys(sg) - sg->offset;
216 size_t bytes = sg->length + sg->offset;
217
218 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219
220 ret = iommu_map(domain, da, pa, bytes, prot);
221 if (ret)
222 goto fail;
223
224 da += bytes;
225 }
226
227 return 0;
228
229fail:
230 da = iova;
231
232 for_each_sg(sgt->sgl, sg, i, j) {
233 size_t bytes = sg->length + sg->offset;
234 iommu_unmap(domain, da, bytes);
235 da += bytes;
236 }
237 return ret;
238}
239
240static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 struct sg_table *sgt, unsigned int len)
242{
243 struct scatterlist *sg;
244 unsigned int da = iova;
245 int i;
246
247 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 size_t bytes = sg->length + sg->offset;
249 size_t unmapped;
250
251 unmapped = iommu_unmap(domain, da, bytes);
252 if (unmapped < bytes)
253 break;
254
255 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256
257 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258
259 da += bytes;
260 }
261}
262
263/* should be called under struct_mutex.. although it can be called
264 * from atomic context without struct_mutex to acquire an extra
265 * iova ref if you know one is already held.
266 *
267 * That means when I do eventually need to add support for unpinning
268 * the refcnt counter needs to be atomic_t.
269 */
270int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 uint32_t *iova)
272{
273 struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 int ret = 0;
275
276 if (!msm_obj->domain[id].iova) {
277 struct msm_drm_private *priv = obj->dev->dev_private;
278 uint32_t offset = (uint32_t)mmap_offset(obj);
279 struct page **pages;
280 pages = get_pages(obj);
281 if (IS_ERR(pages))
282 return PTR_ERR(pages);
283 // XXX ideally we would not map buffers writable when not needed...
284 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 obj->size, IOMMU_READ | IOMMU_WRITE);
286 msm_obj->domain[id].iova = offset;
287 }
288
289 if (!ret)
290 *iova = msm_obj->domain[id].iova;
291
292 return ret;
293}
294
295int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296{
297 int ret;
298 mutex_lock(&obj->dev->struct_mutex);
299 ret = msm_gem_get_iova_locked(obj, id, iova);
300 mutex_unlock(&obj->dev->struct_mutex);
301 return ret;
302}
303
304void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305{
306 // XXX TODO ..
307 // NOTE: probably don't need a _locked() version.. we wouldn't
308 // normally unmap here, but instead just mark that it could be
309 // unmapped (if the iova refcnt drops to zero), but then later
310 // if another _get_iova_locked() fails we can start unmapping
311 // things that are no longer needed..
312}
313
314int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 struct drm_mode_create_dumb *args)
316{
317 args->pitch = align_pitch(args->width, args->bpp);
318 args->size = PAGE_ALIGN(args->pitch * args->height);
319 return msm_gem_new_handle(dev, file, args->size,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321}
322
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset)
332{
333 struct drm_gem_object *obj;
334 int ret = 0;
335
336 /* GEM does all our handle to object mapping */
337 obj = drm_gem_object_lookup(dev, file, handle);
338 if (obj == NULL) {
339 ret = -ENOENT;
340 goto fail;
341 }
342
343 *offset = msm_gem_mmap_offset(obj);
344
345 drm_gem_object_unreference_unlocked(obj);
346
347fail:
348 return ret;
349}
350
351void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 if (!msm_obj->vaddr) {
356 struct page **pages = get_pages(obj);
357 if (IS_ERR(pages))
358 return ERR_CAST(pages);
359 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 }
362 return msm_obj->vaddr;
363}
364
365void *msm_gem_vaddr(struct drm_gem_object *obj)
366{
367 void *ret;
368 mutex_lock(&obj->dev->struct_mutex);
369 ret = msm_gem_vaddr_locked(obj);
370 mutex_unlock(&obj->dev->struct_mutex);
371 return ret;
372}
373
374int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 struct work_struct *work)
376{
377 struct drm_device *dev = obj->dev;
378 struct msm_drm_private *priv = dev->dev_private;
379 struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 int ret = 0;
381
382 mutex_lock(&dev->struct_mutex);
383 if (!list_empty(&work->entry)) {
384 ret = -EINVAL;
385 } else if (is_active(msm_obj)) {
386 list_add_tail(&work->entry, &msm_obj->inactive_work);
387 } else {
388 queue_work(priv->wq, work);
389 }
390 mutex_unlock(&dev->struct_mutex);
391
392 return ret;
393}
394
395void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence)
397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu;
400 msm_obj->fence = fence;
401 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403}
404
405void msm_gem_move_to_inactive(struct drm_gem_object *obj)
406{
407 struct drm_device *dev = obj->dev;
408 struct msm_drm_private *priv = dev->dev_private;
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412
413 msm_obj->gpu = NULL;
414 msm_obj->fence = 0;
415 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417
418 while (!list_empty(&msm_obj->inactive_work)) {
419 struct work_struct *work;
420
421 work = list_first_entry(&msm_obj->inactive_work,
422 struct work_struct, entry);
423
424 list_del_init(&work->entry);
425 queue_work(priv->wq, work);
426 }
427}
428
429int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
430 struct timespec *timeout)
431{
432 struct drm_device *dev = obj->dev;
433 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0;
435
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
438
439 /* TODO cache maintenance */
440
441 return ret;
442}
443
444int msm_gem_cpu_fini(struct drm_gem_object *obj)
445{
446 /* TODO cache maintenance */
447 return 0;
448}
449
450#ifdef CONFIG_DEBUG_FS
451void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
452{
453 struct drm_device *dev = obj->dev;
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 uint64_t off = drm_vma_node_start(&obj->vma_node);
456
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size);
462}
463
464void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
465{
466 struct msm_gem_object *msm_obj;
467 int count = 0;
468 size_t size = 0;
469
470 list_for_each_entry(msm_obj, list, mm_list) {
471 struct drm_gem_object *obj = &msm_obj->base;
472 seq_printf(m, " ");
473 msm_gem_describe(obj, m);
474 count++;
475 size += obj->size;
476 }
477
478 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
479}
480#endif
481
482void msm_gem_free_object(struct drm_gem_object *obj)
483{
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486 int id;
487
488 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
489
490 /* object should not be on active list: */
491 WARN_ON(is_active(msm_obj));
492
493 list_del(&msm_obj->mm_list);
494
495 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
496 if (msm_obj->domain[id].iova) {
497 struct msm_drm_private *priv = obj->dev->dev_private;
498 uint32_t offset = (uint32_t)mmap_offset(obj);
499 unmap_range(priv->iommus[id], offset,
500 msm_obj->sgt, obj->size);
501 }
502 }
503
504 drm_gem_free_mmap_offset(obj);
505
506 if (msm_obj->vaddr)
507 vunmap(msm_obj->vaddr);
508
509 put_pages(obj);
510
511 if (msm_obj->resv == &msm_obj->_resv)
512 reservation_object_fini(msm_obj->resv);
513
514 drm_gem_object_release(obj);
515
516 kfree(msm_obj);
517}
518
519/* convenience method to construct a GEM buffer object, and userspace handle */
520int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
521 uint32_t size, uint32_t flags, uint32_t *handle)
522{
523 struct drm_gem_object *obj;
524 int ret;
525
526 ret = mutex_lock_interruptible(&dev->struct_mutex);
527 if (ret)
528 return ret;
529
530 obj = msm_gem_new(dev, size, flags);
531
532 mutex_unlock(&dev->struct_mutex);
533
534 if (IS_ERR(obj))
535 return PTR_ERR(obj);
536
537 ret = drm_gem_handle_create(file, obj, handle);
538
539 /* drop reference from allocate - handle holds it now */
540 drm_gem_object_unreference_unlocked(obj);
541
542 return ret;
543}
544
545struct drm_gem_object *msm_gem_new(struct drm_device *dev,
546 uint32_t size, uint32_t flags)
547{
548 struct msm_drm_private *priv = dev->dev_private;
549 struct msm_gem_object *msm_obj;
550 struct drm_gem_object *obj = NULL;
551 int ret;
552
553 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
554
555 size = PAGE_ALIGN(size);
556
557 switch (flags & MSM_BO_CACHE_MASK) {
558 case MSM_BO_UNCACHED:
559 case MSM_BO_CACHED:
560 case MSM_BO_WC:
561 break;
562 default:
563 dev_err(dev->dev, "invalid cache flag: %x\n",
564 (flags & MSM_BO_CACHE_MASK));
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
570 if (!msm_obj) {
571 ret = -ENOMEM;
572 goto fail;
573 }
574
575 obj = &msm_obj->base;
576
577 ret = drm_gem_object_init(dev, obj, size);
578 if (ret)
579 goto fail;
580
581 msm_obj->flags = flags;
582
583 msm_obj->resv = &msm_obj->_resv;
584 reservation_object_init(msm_obj->resv);
585
586 INIT_LIST_HEAD(&msm_obj->submit_entry);
587 INIT_LIST_HEAD(&msm_obj->inactive_work);
588 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
589
590 return obj;
591
592fail:
593 if (obj)
594 drm_gem_object_unreference_unlocked(obj);
595
596 return ERR_PTR(ret);
597}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 000000000000..d746f13d283c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
21#include <linux/reservation.h>
22#include "msm_drv.h"
23
24struct msm_gem_object {
25 struct drm_gem_object base;
26
27 uint32_t flags;
28
29 /* And object is either:
30 * inactive - on priv->inactive_list
31 * active - on one one of the gpu's active_list.. well, at
32 * least for now we don't have (I don't think) hw sync between
33 * 2d and 3d one devices which have both, meaning we need to
34 * block on submit if a bo is already on other ring
35 *
36 */
37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence;
40
41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for
43 * the duration of the ioctl, so one bo can never be on multiple
44 * submit lists.
45 */
46 struct list_head submit_entry;
47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages;
52 struct sg_table *sgt;
53 void *vaddr;
54
55 struct {
56 // XXX
57 uint32_t iova;
58 } domain[NUM_DOMAINS];
59
60 /* normally (resv == &_resv) except for imported bo's */
61 struct reservation_object *resv;
62 struct reservation_object _resv;
63};
64#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
65
66static inline bool is_active(struct msm_gem_object *msm_obj)
67{
68 return msm_obj->gpu != NULL;
69}
70
71#define MAX_CMDS 4
72
73/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
74 * associated with the cmdstream submission for synchronization (and
75 * make it easier to unwind when things go wrong, etc). This only
76 * lasts for the duration of the submit-ioctl.
77 */
78struct msm_gem_submit {
79 struct drm_device *dev;
80 struct msm_gpu *gpu;
81 struct list_head bo_list;
82 struct ww_acquire_ctx ticket;
83 uint32_t fence;
84 bool valid;
85 unsigned int nr_cmds;
86 unsigned int nr_bos;
87 struct {
88 uint32_t type;
89 uint32_t size; /* in dwords */
90 uint32_t iova;
91 } cmd[MAX_CMDS];
92 struct {
93 uint32_t flags;
94 struct msm_gem_object *obj;
95 uint32_t iova;
96 } bos[0];
97};
98
99#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 000000000000..3e1ef3a00f60
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20#include "msm_gem.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28#define BO_VALID 0x8000
29#define BO_LOCKED 0x4000
30#define BO_PINNED 0x2000
31
32static inline void __user *to_user_ptr(u64 address)
33{
34 return (void __user *)(uintptr_t)address;
35}
36
37static struct msm_gem_submit *submit_create(struct drm_device *dev,
38 struct msm_gpu *gpu, int nr)
39{
40 struct msm_gem_submit *submit;
41 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
42
43 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
44 if (submit) {
45 submit->dev = dev;
46 submit->gpu = gpu;
47
48 /* initially, until copy_from_user() and bo lookup succeeds: */
49 submit->nr_bos = 0;
50 submit->nr_cmds = 0;
51
52 INIT_LIST_HEAD(&submit->bo_list);
53 ww_acquire_init(&submit->ticket, &reservation_ww_class);
54 }
55
56 return submit;
57}
58
59static int submit_lookup_objects(struct msm_gem_submit *submit,
60 struct drm_msm_gem_submit *args, struct drm_file *file)
61{
62 unsigned i;
63 int ret = 0;
64
65 spin_lock(&file->table_lock);
66
67 for (i = 0; i < args->nr_bos; i++) {
68 struct drm_msm_gem_submit_bo submit_bo;
69 struct drm_gem_object *obj;
70 struct msm_gem_object *msm_obj;
71 void __user *userptr =
72 to_user_ptr(args->bos + (i * sizeof(submit_bo)));
73
74 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
75 if (ret) {
76 ret = -EFAULT;
77 goto out_unlock;
78 }
79
80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags);
82 ret = -EINVAL;
83 goto out_unlock;
84 }
85
86 submit->bos[i].flags = submit_bo.flags;
87 /* in validate_objects() we figure out if this is true: */
88 submit->bos[i].iova = submit_bo.presumed;
89
90 /* normally use drm_gem_object_lookup(), but for bulk lookup
91 * all under single table_lock just hit object_idr directly:
92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i);
96 ret = -EINVAL;
97 goto out_unlock;
98 }
99
100 msm_obj = to_msm_bo(obj);
101
102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list",
104 submit_bo.handle, i);
105 ret = -EINVAL;
106 goto out_unlock;
107 }
108
109 drm_gem_object_reference(obj);
110
111 submit->bos[i].obj = msm_obj;
112
113 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
114 }
115
116out_unlock:
117 submit->nr_bos = i;
118 spin_unlock(&file->table_lock);
119
120 return ret;
121}
122
123static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
124{
125 struct msm_gem_object *msm_obj = submit->bos[i].obj;
126
127 if (submit->bos[i].flags & BO_PINNED)
128 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
129
130 if (submit->bos[i].flags & BO_LOCKED)
131 ww_mutex_unlock(&msm_obj->resv->lock);
132
133 if (!(submit->bos[i].flags & BO_VALID))
134 submit->bos[i].iova = 0;
135
136 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
137}
138
139/* This is where we make sure all the bo's are reserved and pin'd: */
140static int submit_validate_objects(struct msm_gem_submit *submit)
141{
142 int contended, slow_locked = -1, i, ret = 0;
143
144retry:
145 submit->valid = true;
146
147 for (i = 0; i < submit->nr_bos; i++) {
148 struct msm_gem_object *msm_obj = submit->bos[i].obj;
149 uint32_t iova;
150
151 if (slow_locked == i)
152 slow_locked = -1;
153
154 contended = i;
155
156 if (!(submit->bos[i].flags & BO_LOCKED)) {
157 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
158 &submit->ticket);
159 if (ret)
160 goto fail;
161 submit->bos[i].flags |= BO_LOCKED;
162 }
163
164
165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base,
167 submit->gpu->id, &iova);
168
169 /* this would break the logic in the fail path.. there is no
170 * reason for this to happen, but just to be on the safe side
171 * let's notice if this starts happening in the future:
172 */
173 WARN_ON(ret == -EDEADLK);
174
175 if (ret)
176 goto fail;
177
178 submit->bos[i].flags |= BO_PINNED;
179
180 if (iova == submit->bos[i].iova) {
181 submit->bos[i].flags |= BO_VALID;
182 } else {
183 submit->bos[i].iova = iova;
184 submit->bos[i].flags &= ~BO_VALID;
185 submit->valid = false;
186 }
187 }
188
189 ww_acquire_done(&submit->ticket);
190
191 return 0;
192
193fail:
194 for (; i >= 0; i--)
195 submit_unlock_unpin_bo(submit, i);
196
197 if (slow_locked > 0)
198 submit_unlock_unpin_bo(submit, slow_locked);
199
200 if (ret == -EDEADLK) {
201 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
202 /* we lost out in a seqno race, lock and retry.. */
203 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
204 &submit->ticket);
205 if (!ret) {
206 submit->bos[contended].flags |= BO_LOCKED;
207 slow_locked = contended;
208 goto retry;
209 }
210 }
211
212 return ret;
213}
214
215static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{
218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
220 return EINVAL;
221 }
222
223 if (obj)
224 *obj = submit->bos[idx].obj;
225 if (iova)
226 *iova = submit->bos[idx].iova;
227 if (valid)
228 *valid = !!(submit->bos[idx].flags & BO_VALID);
229
230 return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237 uint32_t i, last_offset = 0;
238 uint32_t *ptr;
239 int ret;
240
241 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset);
243 return -EINVAL;
244 }
245
246 /* For now, just map the entire thing. Eventually we probably
247 * to do it page-by-page, w/ kmap() if not vmap()d..
248 */
249 ptr = msm_gem_vaddr(&obj->base);
250
251 if (IS_ERR(ptr)) {
252 ret = PTR_ERR(ptr);
253 DBG("failed to map: %d", ret);
254 return ret;
255 }
256
257 for (i = 0; i < nr_relocs; i++) {
258 struct drm_msm_gem_submit_reloc submit_reloc;
259 void __user *userptr =
260 to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 uint32_t iova, off;
262 bool valid;
263
264 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265 if (ret)
266 return -EFAULT;
267
268 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u",
270 submit_reloc.submit_offset);
271 return -EINVAL;
272 }
273
274 /* offset in dwords: */
275 off = submit_reloc.submit_offset / 4;
276
277 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i);
280 return -EINVAL;
281 }
282
283 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284 if (ret)
285 return ret;
286
287 if (valid)
288 continue;
289
290 iova += submit_reloc.reloc_offset;
291
292 if (submit_reloc.shift < 0)
293 iova >>= -submit_reloc.shift;
294 else
295 iova <<= submit_reloc.shift;
296
297 ptr[off] = iova | submit_reloc.or;
298
299 last_offset = off;
300 }
301
302 return 0;
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307 unsigned i;
308
309 mutex_lock(&submit->dev->struct_mutex);
310 for (i = 0; i < submit->nr_bos; i++) {
311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
312 submit_unlock_unpin_bo(submit, i);
313 list_del_init(&msm_obj->submit_entry);
314 drm_gem_object_unreference(&msm_obj->base);
315 }
316 mutex_unlock(&submit->dev->struct_mutex);
317
318 ww_acquire_fini(&submit->ticket);
319 kfree(submit);
320}
321
322int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
323 struct drm_file *file)
324{
325 struct msm_drm_private *priv = dev->dev_private;
326 struct drm_msm_gem_submit *args = data;
327 struct msm_file_private *ctx = file->driver_priv;
328 struct msm_gem_submit *submit;
329 struct msm_gpu *gpu;
330 unsigned i;
331 int ret;
332
333 /* for now, we just have 3d pipe.. eventually this would need to
334 * be more clever to dispatch to appropriate gpu module:
335 */
336 if (args->pipe != MSM_PIPE_3D0)
337 return -EINVAL;
338
339 gpu = priv->gpu;
340
341 if (args->nr_cmds > MAX_CMDS)
342 return -EINVAL;
343
344 submit = submit_create(dev, gpu, args->nr_bos);
345 if (!submit) {
346 ret = -ENOMEM;
347 goto out;
348 }
349
350 ret = submit_lookup_objects(submit, args, file);
351 if (ret)
352 goto out;
353
354 ret = submit_validate_objects(submit);
355 if (ret)
356 goto out;
357
358 for (i = 0; i < args->nr_cmds; i++) {
359 struct drm_msm_gem_submit_cmd submit_cmd;
360 void __user *userptr =
361 to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
362 struct msm_gem_object *msm_obj;
363 uint32_t iova;
364
365 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
366 if (ret) {
367 ret = -EFAULT;
368 goto out;
369 }
370
371 ret = submit_bo(submit, submit_cmd.submit_idx,
372 &msm_obj, &iova, NULL);
373 if (ret)
374 goto out;
375
376 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u",
378 submit_cmd.size);
379 ret = -EINVAL;
380 goto out;
381 }
382
383 if (submit_cmd.size >= msm_obj->base.size) {
384 DBG("invalid cmdstream size: %u", submit_cmd.size);
385 ret = -EINVAL;
386 goto out;
387 }
388
389 submit->cmd[i].type = submit_cmd.type;
390 submit->cmd[i].size = submit_cmd.size / 4;
391 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
392
393 if (submit->valid)
394 continue;
395
396 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
397 submit_cmd.nr_relocs, submit_cmd.relocs);
398 if (ret)
399 goto out;
400 }
401
402 submit->nr_cmds = i;
403
404 ret = msm_gpu_submit(gpu, submit, ctx);
405
406 args->fence = submit->fence;
407
408out:
409 if (submit)
410 submit_cleanup(submit, !!ret);
411 return ret;
412}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 000000000000..e1e1ec9321ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
20
21
22/*
23 * Power Management:
24 */
25
26#ifdef CONFIG_MSM_BUS_SCALING
27#include <mach/board.h>
28#include <mach/kgsl.h>
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{
31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
33
34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return;
37 }
38
39 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc);
42 }
43}
44
45static void bs_fini(struct msm_gpu *gpu)
46{
47 if (gpu->bsc) {
48 msm_bus_scale_unregister_client(gpu->bsc);
49 gpu->bsc = 0;
50 }
51}
52
53static void bs_set(struct msm_gpu *gpu, int idx)
54{
55 if (gpu->bsc) {
56 DBG("set bus scaling: %d", idx);
57 msm_bus_scale_client_update_request(gpu->bsc, idx);
58 }
59}
60#else
61static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
62static void bs_fini(struct msm_gpu *gpu) {}
63static void bs_set(struct msm_gpu *gpu, int idx) {}
64#endif
65
66static int enable_pwrrail(struct msm_gpu *gpu)
67{
68 struct drm_device *dev = gpu->dev;
69 int ret = 0;
70
71 if (gpu->gpu_reg) {
72 ret = regulator_enable(gpu->gpu_reg);
73 if (ret) {
74 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
75 return ret;
76 }
77 }
78
79 if (gpu->gpu_cx) {
80 ret = regulator_enable(gpu->gpu_cx);
81 if (ret) {
82 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
83 return ret;
84 }
85 }
86
87 return 0;
88}
89
90static int disable_pwrrail(struct msm_gpu *gpu)
91{
92 if (gpu->gpu_cx)
93 regulator_disable(gpu->gpu_cx);
94 if (gpu->gpu_reg)
95 regulator_disable(gpu->gpu_reg);
96 return 0;
97}
98
99static int enable_clk(struct msm_gpu *gpu)
100{
101 struct clk *rate_clk = NULL;
102 int i;
103
104 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
105 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
106 if (gpu->grp_clks[i]) {
107 clk_prepare(gpu->grp_clks[i]);
108 rate_clk = gpu->grp_clks[i];
109 }
110 }
111
112 if (rate_clk && gpu->fast_rate)
113 clk_set_rate(rate_clk, gpu->fast_rate);
114
115 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
116 if (gpu->grp_clks[i])
117 clk_enable(gpu->grp_clks[i]);
118
119 return 0;
120}
121
122static int disable_clk(struct msm_gpu *gpu)
123{
124 struct clk *rate_clk = NULL;
125 int i;
126
127 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
128 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
129 if (gpu->grp_clks[i]) {
130 clk_disable(gpu->grp_clks[i]);
131 rate_clk = gpu->grp_clks[i];
132 }
133 }
134
135 if (rate_clk && gpu->slow_rate)
136 clk_set_rate(rate_clk, gpu->slow_rate);
137
138 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
139 if (gpu->grp_clks[i])
140 clk_unprepare(gpu->grp_clks[i]);
141
142 return 0;
143}
144
145static int enable_axi(struct msm_gpu *gpu)
146{
147 if (gpu->ebi1_clk)
148 clk_prepare_enable(gpu->ebi1_clk);
149 if (gpu->bus_freq)
150 bs_set(gpu, gpu->bus_freq);
151 return 0;
152}
153
154static int disable_axi(struct msm_gpu *gpu)
155{
156 if (gpu->ebi1_clk)
157 clk_disable_unprepare(gpu->ebi1_clk);
158 if (gpu->bus_freq)
159 bs_set(gpu, 0);
160 return 0;
161}
162
163int msm_gpu_pm_resume(struct msm_gpu *gpu)
164{
165 int ret;
166
167 DBG("%s", gpu->name);
168
169 ret = enable_pwrrail(gpu);
170 if (ret)
171 return ret;
172
173 ret = enable_clk(gpu);
174 if (ret)
175 return ret;
176
177 ret = enable_axi(gpu);
178 if (ret)
179 return ret;
180
181 return 0;
182}
183
184int msm_gpu_pm_suspend(struct msm_gpu *gpu)
185{
186 int ret;
187
188 DBG("%s", gpu->name);
189
190 ret = disable_axi(gpu);
191 if (ret)
192 return ret;
193
194 ret = disable_clk(gpu);
195 if (ret)
196 return ret;
197
198 ret = disable_pwrrail(gpu);
199 if (ret)
200 return ret;
201
202 return 0;
203}
204
205/*
206 * Hangcheck detection for locked gpu:
207 */
208
209static void recover_worker(struct work_struct *work)
210{
211 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
212 struct drm_device *dev = gpu->dev;
213
214 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
215
216 mutex_lock(&dev->struct_mutex);
217 gpu->funcs->recover(gpu);
218 mutex_unlock(&dev->struct_mutex);
219
220 msm_gpu_retire(gpu);
221}
222
223static void hangcheck_timer_reset(struct msm_gpu *gpu)
224{
225 DBG("%s", gpu->name);
226 mod_timer(&gpu->hangcheck_timer,
227 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
228}
229
230static void hangcheck_handler(unsigned long data)
231{
232 struct msm_gpu *gpu = (struct msm_gpu *)data;
233 uint32_t fence = gpu->funcs->last_fence(gpu);
234
235 if (fence != gpu->hangcheck_fence) {
236 /* some progress has been made.. ya! */
237 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence;
242 queue_work(priv->wq, &gpu->recover_work);
243 }
244
245 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu);
248}
249
250/*
251 * Cmdstream submission/retirement:
252 */
253
254static void retire_worker(struct work_struct *work)
255{
256 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
257 struct drm_device *dev = gpu->dev;
258 uint32_t fence = gpu->funcs->last_fence(gpu);
259
260 mutex_lock(&dev->struct_mutex);
261
262 while (!list_empty(&gpu->active_list)) {
263 struct msm_gem_object *obj;
264
265 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list);
267
268 if (obj->fence <= fence) {
269 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id);
272 drm_gem_object_unreference(&obj->base);
273 } else {
274 break;
275 }
276 }
277
278 msm_update_fence(gpu->dev, fence);
279
280 mutex_unlock(&dev->struct_mutex);
281}
282
283/* call from irq handler to schedule work to retire bo's */
284void msm_gpu_retire(struct msm_gpu *gpu)
285{
286 struct msm_drm_private *priv = gpu->dev->dev_private;
287 queue_work(priv->wq, &gpu->retire_work);
288}
289
290/* add bo's to gpu's ring, and kick gpu: */
291int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
292 struct msm_file_private *ctx)
293{
294 struct drm_device *dev = gpu->dev;
295 struct msm_drm_private *priv = dev->dev_private;
296 int i, ret;
297
298 mutex_lock(&dev->struct_mutex);
299
300 submit->fence = ++priv->next_fence;
301
302 gpu->submitted_fence = submit->fence;
303
304 ret = gpu->funcs->submit(gpu, submit, ctx);
305 priv->lastctx = ctx;
306
307 for (i = 0; i < submit->nr_bos; i++) {
308 struct msm_gem_object *msm_obj = submit->bos[i].obj;
309
310 /* can't happen yet.. but when we add 2d support we'll have
311 * to deal w/ cross-ring synchronization:
312 */
313 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
314
315 if (!is_active(msm_obj)) {
316 uint32_t iova;
317
318 /* ring takes a reference to the bo and iova: */
319 drm_gem_object_reference(&msm_obj->base);
320 msm_gem_get_iova_locked(&msm_obj->base,
321 submit->gpu->id, &iova);
322 }
323
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
325 }
326 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex);
328
329 return ret;
330}
331
332/*
333 * Init/Cleanup:
334 */
335
336static irqreturn_t irq_handler(int irq, void *data)
337{
338 struct msm_gpu *gpu = data;
339 return gpu->funcs->irq(gpu);
340}
341
342static const char *clk_names[] = {
343 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
344};
345
346int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
347 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
348 const char *name, const char *ioname, const char *irqname, int ringsz)
349{
350 int i, ret;
351
352 gpu->dev = drm;
353 gpu->funcs = funcs;
354 gpu->name = name;
355
356 INIT_LIST_HEAD(&gpu->active_list);
357 INIT_WORK(&gpu->retire_work, retire_worker);
358 INIT_WORK(&gpu->recover_work, recover_worker);
359
360 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
361 (unsigned long)gpu);
362
363 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
364
365 /* Map registers: */
366 gpu->mmio = msm_ioremap(pdev, ioname, name);
367 if (IS_ERR(gpu->mmio)) {
368 ret = PTR_ERR(gpu->mmio);
369 goto fail;
370 }
371
372 /* Get Interrupt: */
373 gpu->irq = platform_get_irq_byname(pdev, irqname);
374 if (gpu->irq < 0) {
375 ret = gpu->irq;
376 dev_err(drm->dev, "failed to get irq: %d\n", ret);
377 goto fail;
378 }
379
380 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
381 IRQF_TRIGGER_HIGH, gpu->name, gpu);
382 if (ret) {
383 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
384 goto fail;
385 }
386
387 /* Acquire clocks: */
388 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
389 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
390 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
391 if (IS_ERR(gpu->grp_clks[i]))
392 gpu->grp_clks[i] = NULL;
393 }
394
395 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
396 DBG("ebi1_clk: %p", gpu->ebi1_clk);
397 if (IS_ERR(gpu->ebi1_clk))
398 gpu->ebi1_clk = NULL;
399
400 /* Acquire regulators: */
401 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
402 DBG("gpu_reg: %p", gpu->gpu_reg);
403 if (IS_ERR(gpu->gpu_reg))
404 gpu->gpu_reg = NULL;
405
406 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
407 DBG("gpu_cx: %p", gpu->gpu_cx);
408 if (IS_ERR(gpu->gpu_cx))
409 gpu->gpu_cx = NULL;
410
411 /* Setup IOMMU.. eventually we will (I think) do this once per context
412 * and have separate page tables per context. For now, to keep things
413 * simple and to get something working, just use a single address space:
414 */
415 gpu->iommu = iommu_domain_alloc(&platform_bus_type);
416 if (!gpu->iommu) {
417 dev_err(drm->dev, "failed to allocate IOMMU\n");
418 ret = -ENOMEM;
419 goto fail;
420 }
421 gpu->id = msm_register_iommu(drm, gpu->iommu);
422
423 /* Create ringbuffer: */
424 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
425 if (IS_ERR(gpu->rb)) {
426 ret = PTR_ERR(gpu->rb);
427 gpu->rb = NULL;
428 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
429 goto fail;
430 }
431
432 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
433 if (ret) {
434 gpu->rb_iova = 0;
435 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
436 goto fail;
437 }
438
439 bs_init(gpu, pdev);
440
441 return 0;
442
443fail:
444 return ret;
445}
446
447void msm_gpu_cleanup(struct msm_gpu *gpu)
448{
449 DBG("%s", gpu->name);
450
451 WARN_ON(!list_empty(&gpu->active_list));
452
453 bs_fini(gpu);
454
455 if (gpu->rb) {
456 if (gpu->rb_iova)
457 msm_gem_put_iova(gpu->rb->bo, gpu->id);
458 msm_ringbuffer_destroy(gpu->rb);
459 }
460
461 if (gpu->iommu)
462 iommu_domain_free(gpu->iommu);
463}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 000000000000..8cd829e520bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GPU_H__
19#define __MSM_GPU_H__
20
21#include <linux/clk.h>
22#include <linux/regulator/consumer.h>
23
24#include "msm_drv.h"
25#include "msm_ringbuffer.h"
26
27struct msm_gem_submit;
28
29/* So far, with hardware that I've seen to date, we can have:
30 * + zero, one, or two z180 2d cores
31 * + a3xx or a2xx 3d core, which share a common CP (the firmware
32 * for the CP seems to implement some different PM4 packet types
33 * but the basics of cmdstream submission are the same)
34 *
35 * Which means that the eventual complete "class" hierarchy, once
36 * support for all past and present hw is in place, becomes:
37 * + msm_gpu
38 * + adreno_gpu
39 * + a3xx_gpu
40 * + a2xx_gpu
41 * + z180_gpu
42 */
43struct msm_gpu_funcs {
44 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
45 int (*hw_init)(struct msm_gpu *gpu);
46 int (*pm_suspend)(struct msm_gpu *gpu);
47 int (*pm_resume)(struct msm_gpu *gpu);
48 int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
49 struct msm_file_private *ctx);
50 void (*flush)(struct msm_gpu *gpu);
51 void (*idle)(struct msm_gpu *gpu);
52 irqreturn_t (*irq)(struct msm_gpu *irq);
53 uint32_t (*last_fence)(struct msm_gpu *gpu);
54 void (*recover)(struct msm_gpu *gpu);
55 void (*destroy)(struct msm_gpu *gpu);
56#ifdef CONFIG_DEBUG_FS
57 /* show GPU status in debugfs: */
58 void (*show)(struct msm_gpu *gpu, struct seq_file *m);
59#endif
60};
61
62struct msm_gpu {
63 const char *name;
64 struct drm_device *dev;
65 const struct msm_gpu_funcs *funcs;
66
67 struct msm_ringbuffer *rb;
68 uint32_t rb_iova;
69
70 /* list of GEM active objects: */
71 struct list_head active_list;
72
73 uint32_t submitted_fence;
74
75 /* worker for handling active-list retiring: */
76 struct work_struct retire_work;
77
78 void __iomem *mmio;
79 int irq;
80
81 struct iommu_domain *iommu;
82 int id;
83
84 /* Power Control: */
85 struct regulator *gpu_reg, *gpu_cx;
86 struct clk *ebi1_clk, *grp_clks[5];
87 uint32_t fast_rate, slow_rate, bus_freq;
88 uint32_t bsc;
89
90 /* Hang Detction: */
91#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
92#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
93 struct timer_list hangcheck_timer;
94 uint32_t hangcheck_fence;
95 struct work_struct recover_work;
96};
97
98static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
99{
100 msm_writel(data, gpu->mmio + (reg << 2));
101}
102
103static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
104{
105 return msm_readl(gpu->mmio + (reg << 2));
106}
107
108int msm_gpu_pm_suspend(struct msm_gpu *gpu);
109int msm_gpu_pm_resume(struct msm_gpu *gpu);
110
111void msm_gpu_retire(struct msm_gpu *gpu);
112int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
113 struct msm_file_private *ctx);
114
115int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
116 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
117 const char *name, const char *ioname, const char *irqname, int ringsz);
118void msm_gpu_cleanup(struct msm_gpu *gpu);
119
120struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
121void __init a3xx_register(void);
122void __exit a3xx_unregister(void);
123
124#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 000000000000..8171537dd7d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_ringbuffer.h"
19#include "msm_gpu.h"
20
21struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
22{
23 struct msm_ringbuffer *ring;
24 int ret;
25
26 size = ALIGN(size, 4); /* size should be dword aligned */
27
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29 if (!ring) {
30 ret = -ENOMEM;
31 goto fail;
32 }
33
34 ring->gpu = gpu;
35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
36 if (IS_ERR(ring->bo)) {
37 ret = PTR_ERR(ring->bo);
38 ring->bo = NULL;
39 goto fail;
40 }
41
42 ring->start = msm_gem_vaddr_locked(ring->bo);
43 ring->end = ring->start + (size / 4);
44 ring->cur = ring->start;
45
46 ring->size = size;
47
48 return ring;
49
50fail:
51 if (ring)
52 msm_ringbuffer_destroy(ring);
53 return ERR_PTR(ret);
54}
55
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{
58 if (ring->bo)
59 drm_gem_object_unreference(ring->bo);
60 kfree(ring);
61}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 000000000000..6e0e1049fa4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_RINGBUFFER_H__
19#define __MSM_RINGBUFFER_H__
20
21#include "msm_drv.h"
22
23struct msm_ringbuffer {
24 struct msm_gpu *gpu;
25 int size;
26 struct drm_gem_object *bo;
27 uint32_t *start, *end, *cur;
28};
29
30struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
31void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
32
33/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
34
35static inline void
36OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
37{
38 if (ring->cur == ring->end)
39 ring->cur = ring->start;
40 *(ring->cur++) = data;
41}
42
43#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 6161eaf5447c..52fb2aa129e8 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,6 +27,8 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <core/printk.h> 28#include <core/printk.h>
29 29
30int nv_printk_suspend_level = NV_DBG_DEBUG;
31
30void 32void
31nv_printk_(struct nouveau_object *object, const char *pfx, int level, 33nv_printk_(struct nouveau_object *object, const char *pfx, int level,
32 const char *fmt, ...) 34 const char *fmt, ...)
@@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
72 vprintk(mfmt, args); 74 vprintk(mfmt, args);
73 va_end(args); 75 va_end(args);
74} 76}
77
78#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
79
80const char *nv_printk_level_to_pfx(int level)
81{
82 switch (level) {
83 CONV_LEVEL(FATAL);
84 CONV_LEVEL(ERROR);
85 CONV_LEVEL(WARN);
86 CONV_LEVEL(INFO);
87 CONV_LEVEL(DEBUG);
88 CONV_LEVEL(PARANOIA);
89 CONV_LEVEL(TRACE);
90 CONV_LEVEL(SPAM);
91 }
92 return NV_PRINTK_DEBUG;
93}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f0..054d9cff4f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 DBG("status %*ph\n", 6, dp->stat); 153 DBG("status %6ph\n", dp->stat);
154 return 0; 154 return 0;
155} 155}
156 156
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index febed2ea5c80..d87836e3a704 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,6 +15,12 @@ struct nouveau_object;
15#define NV_PRINTK_TRACE KERN_DEBUG 15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG 16#define NV_PRINTK_SPAM KERN_DEBUG
17 17
18extern int nv_printk_suspend_level;
19
20#define NV_DBG_SUSPEND (nv_printk_suspend_level)
21#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
22
23const char *nv_printk_level_to_pfx(int level);
18void __printf(4, 5) 24void __printf(4, 5)
19nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); 25nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
20 26
@@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
31#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 37#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
32#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 38#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
33 39
40#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
41
42static inline void nv_suspend_set_printk_level(int level)
43{
44 nv_printk_suspend_level = level;
45}
46
34#define nv_assert(f,a...) do { \ 47#define nv_assert(f,a...) do { \
35 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 48 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
36 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ 49 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 0687e6481438..2e11ea02cf87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2165 u16 data; 2165 u16 data;
2166 2166
2167 if (execute) 2167 if (execute)
2168 nv_info(bios, "running init tables\n"); 2168 nv_suspend(bios, "running init tables\n");
2169 while (!ret && (data = (init_script(bios, ++i)))) { 2169 while (!ret && (data = (init_script(bios, ++i)))) {
2170 struct nvbios_init init = { 2170 struct nvbios_init init = {
2171 .subdev = subdev, 2171 .subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..2e7c5fd3de3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -23,16 +23,20 @@
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26#include <linux/pm_runtime.h>
26 27
27static irqreturn_t 28static irqreturn_t
28nouveau_mc_intr(int irq, void *arg) 29nouveau_mc_intr(int irq, void *arg)
29{ 30{
30 struct nouveau_mc *pmc = arg; 31 struct nouveau_mc *pmc = arg;
31 const struct nouveau_mc_intr *map = pmc->intr_map; 32 const struct nouveau_mc_intr *map = pmc->intr_map;
33 struct nouveau_device *device = nv_device(pmc);
32 struct nouveau_subdev *unit; 34 struct nouveau_subdev *unit;
33 u32 stat, intr; 35 u32 stat, intr;
34 36
35 intr = stat = nv_rd32(pmc, 0x000100); 37 intr = stat = nv_rd32(pmc, 0x000100);
38 if (intr == 0xffffffff)
39 return IRQ_NONE;
36 while (stat && map->stat) { 40 while (stat && map->stat) {
37 if (stat & map->stat) { 41 if (stat & map->stat) {
38 unit = nouveau_subdev(pmc, map->unit); 42 unit = nouveau_subdev(pmc, map->unit);
@@ -47,6 +51,8 @@ nouveau_mc_intr(int irq, void *arg)
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 51 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 52 }
49 53
54 if (stat == IRQ_HANDLED)
55 pm_runtime_mark_last_busy(&device->pdev->dev);
50 return stat ? IRQ_HANDLED : IRQ_NONE; 56 return stat ? IRQ_HANDLED : IRQ_NONE;
51} 57}
52 58
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e04..6413552df21c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,6 +22,7 @@
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25#include <linux/pm_runtime.h>
25 26
26#include <drm/drmP.h> 27#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
@@ -1007,13 +1008,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1007 return 0; 1008 return 0;
1008} 1009}
1009 1010
1011int
1012nouveau_crtc_set_config(struct drm_mode_set *set)
1013{
1014 struct drm_device *dev;
1015 struct nouveau_drm *drm;
1016 int ret;
1017 struct drm_crtc *crtc;
1018 bool active = false;
1019 if (!set || !set->crtc)
1020 return -EINVAL;
1021
1022 dev = set->crtc->dev;
1023
1024 /* get a pm reference here */
1025 ret = pm_runtime_get_sync(dev->dev);
1026 if (ret < 0)
1027 return ret;
1028
1029 ret = drm_crtc_helper_set_config(set);
1030
1031 drm = nouveau_drm(dev);
1032
1033 /* if we get here with no crtcs active then we can drop a reference */
1034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1035 if (crtc->enabled)
1036 active = true;
1037 }
1038
1039 pm_runtime_mark_last_busy(dev->dev);
1040 /* if we have active crtcs and we don't have a power ref,
1041 take the current one */
1042 if (active && !drm->have_disp_power_ref) {
1043 drm->have_disp_power_ref = true;
1044 return ret;
1045 }
1046 /* if we have no active crtcs, then drop the power ref
1047 we got before */
1048 if (!active && drm->have_disp_power_ref) {
1049 pm_runtime_put_autosuspend(dev->dev);
1050 drm->have_disp_power_ref = false;
1051 }
1052 /* drop the power reference we got coming in here */
1053 pm_runtime_put_autosuspend(dev->dev);
1054 return ret;
1055}
1056
1010static const struct drm_crtc_funcs nv04_crtc_funcs = { 1057static const struct drm_crtc_funcs nv04_crtc_funcs = {
1011 .save = nv_crtc_save, 1058 .save = nv_crtc_save,
1012 .restore = nv_crtc_restore, 1059 .restore = nv_crtc_restore,
1013 .cursor_set = nv04_crtc_cursor_set, 1060 .cursor_set = nv04_crtc_cursor_set,
1014 .cursor_move = nv04_crtc_cursor_move, 1061 .cursor_move = nv04_crtc_cursor_move,
1015 .gamma_set = nv_crtc_gamma_set, 1062 .gamma_set = nv_crtc_gamma_set,
1016 .set_config = drm_crtc_helper_set_config, 1063 .set_config = nouveau_crtc_set_config,
1017 .page_flip = nouveau_crtc_page_flip, 1064 .page_flip = nouveau_crtc_page_flip,
1018 .destroy = nv_crtc_destroy, 1065 .destroy = nv_crtc_destroy,
1019}; 1066};
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d97f20069d3e..dd7d2e182719 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -25,8 +25,27 @@
25#define NOUVEAU_DSM_POWER_SPEED 0x01 25#define NOUVEAU_DSM_POWER_SPEED 0x01
26#define NOUVEAU_DSM_POWER_STAMINA 0x02 26#define NOUVEAU_DSM_POWER_STAMINA 0x02
27 27
28#define NOUVEAU_DSM_OPTIMUS_FN 0x1A 28#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
29#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 29#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
30
31#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
32#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
33#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
34
35#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
36
37/* result of the optimus caps function */
38#define OPTIMUS_ENABLED (1 << 0)
39#define OPTIMUS_STATUS_MASK (3 << 3)
40#define OPTIMUS_STATUS_OFF (0 << 3)
41#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
42#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
43#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
44#define OPTIMUS_CAPS_MASK (7 << 24)
45#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
46
47#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
48#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
30 49
31static struct nouveau_dsm_priv { 50static struct nouveau_dsm_priv {
32 bool dsm_detected; 51 bool dsm_detected;
@@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
251 retval |= NOUVEAU_DSM_HAS_MUX; 270 retval |= NOUVEAU_DSM_HAS_MUX;
252 271
253 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, 272 if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
254 NOUVEAU_DSM_OPTIMUS_FN)) 273 NOUVEAU_DSM_OPTIMUS_CAPS))
255 retval |= NOUVEAU_DSM_HAS_OPT; 274 retval |= NOUVEAU_DSM_HAS_OPT;
256 275
276 if (retval & NOUVEAU_DSM_HAS_OPT) {
277 uint32_t result;
278 nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
279 &result);
280 dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
281 (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
282 (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
283 (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
284 }
257 if (retval) 285 if (retval)
258 nouveau_dsm_priv.dhandle = dhandle; 286 nouveau_dsm_priv.dhandle = dhandle;
259 287
@@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void)
328 if (!nouveau_dsm_priv.optimus_detected) 356 if (!nouveau_dsm_priv.optimus_detected)
329 return; 357 return;
330 358
331 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, 359 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
332 NOUVEAU_DSM_OPTIMUS_ARGS, &result); 360 0x3, &result);
361
362 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
363 NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
364
333} 365}
334 366
335void nouveau_unregister_dsm_handler(void) 367void nouveau_unregister_dsm_handler(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4e7ee5f4155c..e4444bacd0b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1260,7 +1260,9 @@ out:
1260static int 1260static int
1261nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 1261nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1262{ 1262{
1263 return 0; 1263 struct nouveau_bo *nvbo = nouveau_bo(bo);
1264
1265 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
1264} 1266}
1265 1267
1266static int 1268static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4da776f344d7..c5b36f9e9a10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -26,6 +26,8 @@
26 26
27#include <acpi/button.h> 27#include <acpi/button.h>
28 28
29#include <linux/pm_runtime.h>
30
29#include <drm/drmP.h> 31#include <drm/drmP.h>
30#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
31#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
@@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
240 struct nouveau_encoder *nv_partner; 242 struct nouveau_encoder *nv_partner;
241 struct nouveau_i2c_port *i2c; 243 struct nouveau_i2c_port *i2c;
242 int type; 244 int type;
245 int ret;
246 enum drm_connector_status conn_status = connector_status_disconnected;
243 247
244 /* Cleanup the previous EDID block. */ 248 /* Cleanup the previous EDID block. */
245 if (nv_connector->edid) { 249 if (nv_connector->edid) {
@@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
248 nv_connector->edid = NULL; 252 nv_connector->edid = NULL;
249 } 253 }
250 254
255 ret = pm_runtime_get_sync(connector->dev->dev);
256 if (ret < 0)
257 return conn_status;
258
251 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 259 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
252 if (i2c) { 260 if (i2c) {
253 nv_connector->edid = drm_get_edid(connector, &i2c->adapter); 261 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
@@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
263 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { 271 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
264 NV_ERROR(drm, "Detected %s, but failed init\n", 272 NV_ERROR(drm, "Detected %s, but failed init\n",
265 drm_get_connector_name(connector)); 273 drm_get_connector_name(connector));
266 return connector_status_disconnected; 274 conn_status = connector_status_disconnected;
275 goto out;
267 } 276 }
268 277
269 /* Override encoder type for DVI-I based on whether EDID 278 /* Override encoder type for DVI-I based on whether EDID
@@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
290 } 299 }
291 300
292 nouveau_connector_set_encoder(connector, nv_encoder); 301 nouveau_connector_set_encoder(connector, nv_encoder);
293 return connector_status_connected; 302 conn_status = connector_status_connected;
303 goto out;
294 } 304 }
295 305
296 nv_encoder = nouveau_connector_of_detect(connector); 306 nv_encoder = nouveau_connector_of_detect(connector);
297 if (nv_encoder) { 307 if (nv_encoder) {
298 nouveau_connector_set_encoder(connector, nv_encoder); 308 nouveau_connector_set_encoder(connector, nv_encoder);
299 return connector_status_connected; 309 conn_status = connector_status_connected;
310 goto out;
300 } 311 }
301 312
302detect_analog: 313detect_analog:
@@ -311,12 +322,18 @@ detect_analog:
311 if (helper->detect(encoder, connector) == 322 if (helper->detect(encoder, connector) ==
312 connector_status_connected) { 323 connector_status_connected) {
313 nouveau_connector_set_encoder(connector, nv_encoder); 324 nouveau_connector_set_encoder(connector, nv_encoder);
314 return connector_status_connected; 325 conn_status = connector_status_connected;
326 goto out;
315 } 327 }
316 328
317 } 329 }
318 330
319 return connector_status_disconnected; 331 out:
332
333 pm_runtime_mark_last_busy(connector->dev->dev);
334 pm_runtime_put_autosuspend(connector->dev->dev);
335
336 return conn_status;
320} 337}
321 338
322static enum drm_connector_status 339static enum drm_connector_status
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4d..44202bf7b819 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -394,7 +394,7 @@ nouveau_display_suspend(struct drm_device *dev)
394 394
395 nouveau_display_fini(dev); 395 nouveau_display_fini(dev);
396 396
397 NV_INFO(drm, "unpinning framebuffer(s)...\n"); 397 NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
399 struct nouveau_framebuffer *nouveau_fb; 399 struct nouveau_framebuffer *nouveau_fb;
400 400
@@ -416,7 +416,7 @@ nouveau_display_suspend(struct drm_device *dev)
416} 416}
417 417
418void 418void
419nouveau_display_resume(struct drm_device *dev) 419nouveau_display_repin(struct drm_device *dev)
420{ 420{
421 struct nouveau_drm *drm = nouveau_drm(dev); 421 struct nouveau_drm *drm = nouveau_drm(dev);
422 struct drm_crtc *crtc; 422 struct drm_crtc *crtc;
@@ -441,10 +441,12 @@ nouveau_display_resume(struct drm_device *dev)
441 if (ret) 441 if (ret)
442 NV_ERROR(drm, "Could not pin/map cursor.\n"); 442 NV_ERROR(drm, "Could not pin/map cursor.\n");
443 } 443 }
444}
444 445
445 nouveau_fbcon_set_suspend(dev, 0); 446void
446 nouveau_fbcon_zfill_all(dev); 447nouveau_display_resume(struct drm_device *dev)
447 448{
449 struct drm_crtc *crtc;
448 nouveau_display_init(dev); 450 nouveau_display_init(dev);
449 451
450 /* Force CLUT to get re-loaded during modeset */ 452 /* Force CLUT to get re-loaded during modeset */
@@ -519,7 +521,8 @@ fail:
519 521
520int 522int
521nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 523nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
522 struct drm_pending_vblank_event *event) 524 struct drm_pending_vblank_event *event,
525 uint32_t page_flip_flags)
523{ 526{
524 struct drm_device *dev = crtc->dev; 527 struct drm_device *dev = crtc->dev;
525 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -674,13 +677,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
674} 677}
675 678
676int 679int
677nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
678 uint32_t handle)
679{
680 return drm_gem_handle_delete(file_priv, handle);
681}
682
683int
684nouveau_display_dumb_map_offset(struct drm_file *file_priv, 680nouveau_display_dumb_map_offset(struct drm_file *file_priv,
685 struct drm_device *dev, 681 struct drm_device *dev,
686 uint32_t handle, uint64_t *poffset) 682 uint32_t handle, uint64_t *poffset)
@@ -690,7 +686,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
690 gem = drm_gem_object_lookup(dev, file_priv, handle); 686 gem = drm_gem_object_lookup(dev, file_priv, handle);
691 if (gem) { 687 if (gem) {
692 struct nouveau_bo *bo = gem->driver_private; 688 struct nouveau_bo *bo = gem->driver_private;
693 *poffset = bo->bo.addr_space_offset; 689 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
694 drm_gem_object_unreference_unlocked(gem); 690 drm_gem_object_unreference_unlocked(gem);
695 return 0; 691 return 0;
696 } 692 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b62..025c66f8e0ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -57,10 +57,12 @@ void nouveau_display_destroy(struct drm_device *dev);
57int nouveau_display_init(struct drm_device *dev); 57int nouveau_display_init(struct drm_device *dev);
58void nouveau_display_fini(struct drm_device *dev); 58void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_repin(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev); 61void nouveau_display_resume(struct drm_device *dev);
61 62
62int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 63int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
63 struct drm_pending_vblank_event *event); 64 struct drm_pending_vblank_event *event,
65 uint32_t page_flip_flags);
64int nouveau_finish_page_flip(struct nouveau_channel *, 66int nouveau_finish_page_flip(struct nouveau_channel *,
65 struct nouveau_page_flip_state *); 67 struct nouveau_page_flip_state *);
66 68
@@ -68,11 +70,10 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
68 struct drm_mode_create_dumb *args); 70 struct drm_mode_create_dumb *args);
69int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, 71int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
70 u32 handle, u64 *offset); 72 u32 handle, u64 *offset);
71int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
72 u32 handle);
73 73
74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); 74void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
75 75
76int nouveau_crtc_set_config(struct drm_mode_set *set);
76#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 77#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
77extern int nouveau_backlight_init(struct drm_device *); 78extern int nouveau_backlight_init(struct drm_device *);
78extern void nouveau_backlight_exit(struct drm_device *); 79extern void nouveau_backlight_exit(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 61972668fd05..62c6118e94c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -25,7 +25,10 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28 28#include <linux/pm_runtime.h>
29#include <linux/vga_switcheroo.h>
30#include "drmP.h"
31#include "drm_crtc_helper.h"
29#include <core/device.h> 32#include <core/device.h>
30#include <core/client.h> 33#include <core/client.h>
31#include <core/gpuobj.h> 34#include <core/gpuobj.h>
@@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
69int nouveau_modeset = -1; 72int nouveau_modeset = -1;
70module_param_named(modeset, nouveau_modeset, int, 0400); 73module_param_named(modeset, nouveau_modeset, int, 0400);
71 74
75MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
76int nouveau_runtime_pm = -1;
77module_param_named(runpm, nouveau_runtime_pm, int, 0400);
78
72static struct drm_driver driver; 79static struct drm_driver driver;
73 80
74static int 81static int
@@ -296,6 +303,31 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
296 return 0; 303 return 0;
297} 304}
298 305
306#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
307
308static void
309nouveau_get_hdmi_dev(struct drm_device *dev)
310{
311 struct nouveau_drm *drm = dev->dev_private;
312 struct pci_dev *pdev = dev->pdev;
313
314 /* subfunction one is a hdmi audio device? */
315 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
316 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
317
318 if (!drm->hdmi_device) {
319 DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
320 return;
321 }
322
323 if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
324 DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
325 pci_dev_put(drm->hdmi_device);
326 drm->hdmi_device = NULL;
327 return;
328 }
329}
330
299static int 331static int
300nouveau_drm_load(struct drm_device *dev, unsigned long flags) 332nouveau_drm_load(struct drm_device *dev, unsigned long flags)
301{ 333{
@@ -314,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
314 INIT_LIST_HEAD(&drm->clients); 346 INIT_LIST_HEAD(&drm->clients);
315 spin_lock_init(&drm->tile.lock); 347 spin_lock_init(&drm->tile.lock);
316 348
349 nouveau_get_hdmi_dev(dev);
350
317 /* make sure AGP controller is in a consistent state before we 351 /* make sure AGP controller is in a consistent state before we
318 * (possibly) execute vbios init tables (see nouveau_agp.h) 352 * (possibly) execute vbios init tables (see nouveau_agp.h)
319 */ 353 */
@@ -388,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
388 422
389 nouveau_accel_init(drm); 423 nouveau_accel_init(drm);
390 nouveau_fbcon_init(dev); 424 nouveau_fbcon_init(dev);
425
426 if (nouveau_runtime_pm != 0) {
427 pm_runtime_use_autosuspend(dev->dev);
428 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
429 pm_runtime_set_active(dev->dev);
430 pm_runtime_allow(dev->dev);
431 pm_runtime_mark_last_busy(dev->dev);
432 pm_runtime_put(dev->dev);
433 }
391 return 0; 434 return 0;
392 435
393fail_dispinit: 436fail_dispinit:
@@ -409,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev)
409{ 452{
410 struct nouveau_drm *drm = nouveau_drm(dev); 453 struct nouveau_drm *drm = nouveau_drm(dev);
411 454
455 pm_runtime_get_sync(dev->dev);
412 nouveau_fbcon_fini(dev); 456 nouveau_fbcon_fini(dev);
413 nouveau_accel_fini(drm); 457 nouveau_accel_fini(drm);
414 458
@@ -424,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev)
424 nouveau_agp_fini(drm); 468 nouveau_agp_fini(drm);
425 nouveau_vga_fini(drm); 469 nouveau_vga_fini(drm);
426 470
471 if (drm->hdmi_device)
472 pci_dev_put(drm->hdmi_device);
427 nouveau_cli_destroy(&drm->client); 473 nouveau_cli_destroy(&drm->client);
428 return 0; 474 return 0;
429} 475}
@@ -450,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev)
450 int ret; 496 int ret;
451 497
452 if (dev->mode_config.num_crtc) { 498 if (dev->mode_config.num_crtc) {
453 NV_INFO(drm, "suspending fbcon...\n"); 499 NV_SUSPEND(drm, "suspending display...\n");
454 nouveau_fbcon_set_suspend(dev, 1);
455
456 NV_INFO(drm, "suspending display...\n");
457 ret = nouveau_display_suspend(dev); 500 ret = nouveau_display_suspend(dev);
458 if (ret) 501 if (ret)
459 return ret; 502 return ret;
460 } 503 }
461 504
462 NV_INFO(drm, "evicting buffers...\n"); 505 NV_SUSPEND(drm, "evicting buffers...\n");
463 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 506 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
464 507
465 NV_INFO(drm, "waiting for kernel channels to go idle...\n"); 508 NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
466 if (drm->cechan) { 509 if (drm->cechan) {
467 ret = nouveau_channel_idle(drm->cechan); 510 ret = nouveau_channel_idle(drm->cechan);
468 if (ret) 511 if (ret)
@@ -475,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev)
475 return ret; 518 return ret;
476 } 519 }
477 520
478 NV_INFO(drm, "suspending client object trees...\n"); 521 NV_SUSPEND(drm, "suspending client object trees...\n");
479 if (drm->fence && nouveau_fence(drm)->suspend) { 522 if (drm->fence && nouveau_fence(drm)->suspend) {
480 if (!nouveau_fence(drm)->suspend(drm)) 523 if (!nouveau_fence(drm)->suspend(drm))
481 return -ENOMEM; 524 return -ENOMEM;
@@ -487,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev)
487 goto fail_client; 530 goto fail_client;
488 } 531 }
489 532
490 NV_INFO(drm, "suspending kernel object tree...\n"); 533 NV_SUSPEND(drm, "suspending kernel object tree...\n");
491 ret = nouveau_client_fini(&drm->client.base, true); 534 ret = nouveau_client_fini(&drm->client.base, true);
492 if (ret) 535 if (ret)
493 goto fail_client; 536 goto fail_client;
@@ -501,7 +544,7 @@ fail_client:
501 } 544 }
502 545
503 if (dev->mode_config.num_crtc) { 546 if (dev->mode_config.num_crtc) {
504 NV_INFO(drm, "resuming display...\n"); 547 NV_SUSPEND(drm, "resuming display...\n");
505 nouveau_display_resume(dev); 548 nouveau_display_resume(dev);
506 } 549 }
507 return ret; 550 return ret;
@@ -513,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev)
513 struct drm_device *drm_dev = pci_get_drvdata(pdev); 556 struct drm_device *drm_dev = pci_get_drvdata(pdev);
514 int ret; 557 int ret;
515 558
516 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 559 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
560 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
517 return 0; 561 return 0;
518 562
563 if (drm_dev->mode_config.num_crtc)
564 nouveau_fbcon_set_suspend(drm_dev, 1);
565
566 nv_suspend_set_printk_level(NV_DBG_INFO);
519 ret = nouveau_do_suspend(drm_dev); 567 ret = nouveau_do_suspend(drm_dev);
520 if (ret) 568 if (ret)
521 return ret; 569 return ret;
@@ -523,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev)
523 pci_save_state(pdev); 571 pci_save_state(pdev);
524 pci_disable_device(pdev); 572 pci_disable_device(pdev);
525 pci_set_power_state(pdev, PCI_D3hot); 573 pci_set_power_state(pdev, PCI_D3hot);
574 nv_suspend_set_printk_level(NV_DBG_DEBUG);
526 575
527 return 0; 576 return 0;
528} 577}
@@ -533,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev)
533 struct nouveau_drm *drm = nouveau_drm(dev); 582 struct nouveau_drm *drm = nouveau_drm(dev);
534 struct nouveau_cli *cli; 583 struct nouveau_cli *cli;
535 584
536 NV_INFO(drm, "re-enabling device...\n"); 585 NV_SUSPEND(drm, "re-enabling device...\n");
537 586
538 nouveau_agp_reset(drm); 587 nouveau_agp_reset(drm);
539 588
540 NV_INFO(drm, "resuming kernel object tree...\n"); 589 NV_SUSPEND(drm, "resuming kernel object tree...\n");
541 nouveau_client_init(&drm->client.base); 590 nouveau_client_init(&drm->client.base);
542 nouveau_agp_init(drm); 591 nouveau_agp_init(drm);
543 592
544 NV_INFO(drm, "resuming client object trees...\n"); 593 NV_SUSPEND(drm, "resuming client object trees...\n");
545 if (drm->fence && nouveau_fence(drm)->resume) 594 if (drm->fence && nouveau_fence(drm)->resume)
546 nouveau_fence(drm)->resume(drm); 595 nouveau_fence(drm)->resume(drm);
547 596
@@ -553,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev)
553 nouveau_pm_resume(dev); 602 nouveau_pm_resume(dev);
554 603
555 if (dev->mode_config.num_crtc) { 604 if (dev->mode_config.num_crtc) {
556 NV_INFO(drm, "resuming display...\n"); 605 NV_SUSPEND(drm, "resuming display...\n");
557 nouveau_display_resume(dev); 606 nouveau_display_repin(dev);
558 } 607 }
608
559 return 0; 609 return 0;
560} 610}
561 611
@@ -565,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev)
565 struct drm_device *drm_dev = pci_get_drvdata(pdev); 615 struct drm_device *drm_dev = pci_get_drvdata(pdev);
566 int ret; 616 int ret;
567 617
568 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 618 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
619 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
569 return 0; 620 return 0;
570 621
571 pci_set_power_state(pdev, PCI_D0); 622 pci_set_power_state(pdev, PCI_D0);
@@ -575,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev)
575 return ret; 626 return ret;
576 pci_set_master(pdev); 627 pci_set_master(pdev);
577 628
578 return nouveau_do_resume(drm_dev); 629 nv_suspend_set_printk_level(NV_DBG_INFO);
630 ret = nouveau_do_resume(drm_dev);
631 if (ret) {
632 nv_suspend_set_printk_level(NV_DBG_DEBUG);
633 return ret;
634 }
635 if (drm_dev->mode_config.num_crtc)
636 nouveau_fbcon_set_suspend(drm_dev, 0);
637
638 nouveau_fbcon_zfill_all(drm_dev);
639 nouveau_display_resume(drm_dev);
640 nv_suspend_set_printk_level(NV_DBG_DEBUG);
641 return 0;
579} 642}
580 643
581static int nouveau_pmops_freeze(struct device *dev) 644static int nouveau_pmops_freeze(struct device *dev)
582{ 645{
583 struct pci_dev *pdev = to_pci_dev(dev); 646 struct pci_dev *pdev = to_pci_dev(dev);
584 struct drm_device *drm_dev = pci_get_drvdata(pdev); 647 struct drm_device *drm_dev = pci_get_drvdata(pdev);
648 int ret;
649
650 nv_suspend_set_printk_level(NV_DBG_INFO);
651 if (drm_dev->mode_config.num_crtc)
652 nouveau_fbcon_set_suspend(drm_dev, 1);
585 653
586 return nouveau_do_suspend(drm_dev); 654 ret = nouveau_do_suspend(drm_dev);
655 nv_suspend_set_printk_level(NV_DBG_DEBUG);
656 return ret;
587} 657}
588 658
589static int nouveau_pmops_thaw(struct device *dev) 659static int nouveau_pmops_thaw(struct device *dev)
590{ 660{
591 struct pci_dev *pdev = to_pci_dev(dev); 661 struct pci_dev *pdev = to_pci_dev(dev);
592 struct drm_device *drm_dev = pci_get_drvdata(pdev); 662 struct drm_device *drm_dev = pci_get_drvdata(pdev);
663 int ret;
593 664
594 return nouveau_do_resume(drm_dev); 665 nv_suspend_set_printk_level(NV_DBG_INFO);
666 ret = nouveau_do_resume(drm_dev);
667 if (ret) {
668 nv_suspend_set_printk_level(NV_DBG_DEBUG);
669 return ret;
670 }
671 if (drm_dev->mode_config.num_crtc)
672 nouveau_fbcon_set_suspend(drm_dev, 0);
673 nouveau_fbcon_zfill_all(drm_dev);
674 nouveau_display_resume(drm_dev);
675 nv_suspend_set_printk_level(NV_DBG_DEBUG);
676 return 0;
595} 677}
596 678
597 679
@@ -604,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
604 char name[32], tmpname[TASK_COMM_LEN]; 686 char name[32], tmpname[TASK_COMM_LEN];
605 int ret; 687 int ret;
606 688
689 /* need to bring up power immediately if opening device */
690 ret = pm_runtime_get_sync(dev->dev);
691 if (ret < 0)
692 return ret;
693
607 get_task_comm(tmpname, current); 694 get_task_comm(tmpname, current);
608 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 695 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
609 696
610 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); 697 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
611 if (ret) 698 if (ret)
612 return ret; 699 goto out_suspend;
613 700
614 if (nv_device(drm->device)->card_type >= NV_50) { 701 if (nv_device(drm->device)->card_type >= NV_50) {
615 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 702 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
616 0x1000, &cli->base.vm); 703 0x1000, &cli->base.vm);
617 if (ret) { 704 if (ret) {
618 nouveau_cli_destroy(cli); 705 nouveau_cli_destroy(cli);
619 return ret; 706 goto out_suspend;
620 } 707 }
621 } 708 }
622 709
@@ -625,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
625 mutex_lock(&drm->client.mutex); 712 mutex_lock(&drm->client.mutex);
626 list_add(&cli->head, &drm->clients); 713 list_add(&cli->head, &drm->clients);
627 mutex_unlock(&drm->client.mutex); 714 mutex_unlock(&drm->client.mutex);
628 return 0; 715
716out_suspend:
717 pm_runtime_mark_last_busy(dev->dev);
718 pm_runtime_put_autosuspend(dev->dev);
719
720 return ret;
629} 721}
630 722
631static void 723static void
@@ -634,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
634 struct nouveau_cli *cli = nouveau_cli(fpriv); 726 struct nouveau_cli *cli = nouveau_cli(fpriv);
635 struct nouveau_drm *drm = nouveau_drm(dev); 727 struct nouveau_drm *drm = nouveau_drm(dev);
636 728
729 pm_runtime_get_sync(dev->dev);
730
637 if (cli->abi16) 731 if (cli->abi16)
638 nouveau_abi16_fini(cli->abi16); 732 nouveau_abi16_fini(cli->abi16);
639 733
640 mutex_lock(&drm->client.mutex); 734 mutex_lock(&drm->client.mutex);
641 list_del(&cli->head); 735 list_del(&cli->head);
642 mutex_unlock(&drm->client.mutex); 736 mutex_unlock(&drm->client.mutex);
737
643} 738}
644 739
645static void 740static void
@@ -647,9 +742,11 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
647{ 742{
648 struct nouveau_cli *cli = nouveau_cli(fpriv); 743 struct nouveau_cli *cli = nouveau_cli(fpriv);
649 nouveau_cli_destroy(cli); 744 nouveau_cli_destroy(cli);
745 pm_runtime_mark_last_busy(dev->dev);
746 pm_runtime_put_autosuspend(dev->dev);
650} 747}
651 748
652static struct drm_ioctl_desc 749static const struct drm_ioctl_desc
653nouveau_ioctls[] = { 750nouveau_ioctls[] = {
654 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), 751 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
655 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 752 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -665,15 +762,32 @@ nouveau_ioctls[] = {
665 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), 762 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
666}; 763};
667 764
765long nouveau_drm_ioctl(struct file *filp,
766 unsigned int cmd, unsigned long arg)
767{
768 struct drm_file *file_priv = filp->private_data;
769 struct drm_device *dev;
770 long ret;
771 dev = file_priv->minor->dev;
772
773 ret = pm_runtime_get_sync(dev->dev);
774 if (ret < 0)
775 return ret;
776
777 ret = drm_ioctl(filp, cmd, arg);
778
779 pm_runtime_mark_last_busy(dev->dev);
780 pm_runtime_put_autosuspend(dev->dev);
781 return ret;
782}
668static const struct file_operations 783static const struct file_operations
669nouveau_driver_fops = { 784nouveau_driver_fops = {
670 .owner = THIS_MODULE, 785 .owner = THIS_MODULE,
671 .open = drm_open, 786 .open = drm_open,
672 .release = drm_release, 787 .release = drm_release,
673 .unlocked_ioctl = drm_ioctl, 788 .unlocked_ioctl = nouveau_drm_ioctl,
674 .mmap = nouveau_ttm_mmap, 789 .mmap = nouveau_ttm_mmap,
675 .poll = drm_poll, 790 .poll = drm_poll,
676 .fasync = drm_fasync,
677 .read = drm_read, 791 .read = drm_read,
678#if defined(CONFIG_COMPAT) 792#if defined(CONFIG_COMPAT)
679 .compat_ioctl = nouveau_compat_ioctl, 793 .compat_ioctl = nouveau_compat_ioctl,
@@ -684,7 +798,7 @@ nouveau_driver_fops = {
684static struct drm_driver 798static struct drm_driver
685driver = { 799driver = {
686 .driver_features = 800 .driver_features =
687 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 801 DRIVER_USE_AGP |
688 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, 802 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
689 803
690 .load = nouveau_drm_load, 804 .load = nouveau_drm_load,
@@ -704,6 +818,7 @@ driver = {
704 .disable_vblank = nouveau_drm_vblank_disable, 818 .disable_vblank = nouveau_drm_vblank_disable,
705 819
706 .ioctls = nouveau_ioctls, 820 .ioctls = nouveau_ioctls,
821 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
707 .fops = &nouveau_driver_fops, 822 .fops = &nouveau_driver_fops,
708 823
709 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 824 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +839,7 @@ driver = {
724 839
725 .dumb_create = nouveau_display_dumb_create, 840 .dumb_create = nouveau_display_dumb_create,
726 .dumb_map_offset = nouveau_display_dumb_map_offset, 841 .dumb_map_offset = nouveau_display_dumb_map_offset,
727 .dumb_destroy = nouveau_display_dumb_destroy, 842 .dumb_destroy = drm_gem_dumb_destroy,
728 843
729 .name = DRIVER_NAME, 844 .name = DRIVER_NAME,
730 .desc = DRIVER_DESC, 845 .desc = DRIVER_DESC,
@@ -753,6 +868,90 @@ nouveau_drm_pci_table[] = {
753 {} 868 {}
754}; 869};
755 870
871static int nouveau_pmops_runtime_suspend(struct device *dev)
872{
873 struct pci_dev *pdev = to_pci_dev(dev);
874 struct drm_device *drm_dev = pci_get_drvdata(pdev);
875 int ret;
876
877 if (nouveau_runtime_pm == 0)
878 return -EINVAL;
879
880 drm_kms_helper_poll_disable(drm_dev);
881 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
882 nouveau_switcheroo_optimus_dsm();
883 ret = nouveau_do_suspend(drm_dev);
884 pci_save_state(pdev);
885 pci_disable_device(pdev);
886 pci_set_power_state(pdev, PCI_D3cold);
887 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
888 return ret;
889}
890
891static int nouveau_pmops_runtime_resume(struct device *dev)
892{
893 struct pci_dev *pdev = to_pci_dev(dev);
894 struct drm_device *drm_dev = pci_get_drvdata(pdev);
895 struct nouveau_device *device = nouveau_dev(drm_dev);
896 int ret;
897
898 if (nouveau_runtime_pm == 0)
899 return -EINVAL;
900
901 pci_set_power_state(pdev, PCI_D0);
902 pci_restore_state(pdev);
903 ret = pci_enable_device(pdev);
904 if (ret)
905 return ret;
906 pci_set_master(pdev);
907
908 ret = nouveau_do_resume(drm_dev);
909 nouveau_display_resume(drm_dev);
910 drm_kms_helper_poll_enable(drm_dev);
911 /* do magic */
912 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
913 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
914 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
915 return ret;
916}
917
918static int nouveau_pmops_runtime_idle(struct device *dev)
919{
920 struct pci_dev *pdev = to_pci_dev(dev);
921 struct drm_device *drm_dev = pci_get_drvdata(pdev);
922 struct nouveau_drm *drm = nouveau_drm(drm_dev);
923 struct drm_crtc *crtc;
924
925 if (nouveau_runtime_pm == 0)
926 return -EBUSY;
927
928 /* are we optimus enabled? */
929 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
930 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
931 return -EBUSY;
932 }
933
934 /* if we have a hdmi audio device - make sure it has a driver loaded */
935 if (drm->hdmi_device) {
936 if (!drm->hdmi_device->driver) {
937 DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
938 pm_runtime_mark_last_busy(dev);
939 return -EBUSY;
940 }
941 }
942
943 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
944 if (crtc->enabled) {
945 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
946 return -EBUSY;
947 }
948 }
949 pm_runtime_mark_last_busy(dev);
950 pm_runtime_autosuspend(dev);
951 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
952 return 1;
953}
954
756static const struct dev_pm_ops nouveau_pm_ops = { 955static const struct dev_pm_ops nouveau_pm_ops = {
757 .suspend = nouveau_pmops_suspend, 956 .suspend = nouveau_pmops_suspend,
758 .resume = nouveau_pmops_resume, 957 .resume = nouveau_pmops_resume,
@@ -760,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = {
760 .thaw = nouveau_pmops_thaw, 959 .thaw = nouveau_pmops_thaw,
761 .poweroff = nouveau_pmops_freeze, 960 .poweroff = nouveau_pmops_freeze,
762 .restore = nouveau_pmops_resume, 961 .restore = nouveau_pmops_resume,
962 .runtime_suspend = nouveau_pmops_runtime_suspend,
963 .runtime_resume = nouveau_pmops_runtime_resume,
964 .runtime_idle = nouveau_pmops_runtime_idle,
763}; 965};
764 966
765static struct pci_driver 967static struct pci_driver
@@ -774,8 +976,6 @@ nouveau_drm_pci_driver = {
774static int __init 976static int __init
775nouveau_drm_init(void) 977nouveau_drm_init(void)
776{ 978{
777 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
778
779 if (nouveau_modeset == -1) { 979 if (nouveau_modeset == -1) {
780#ifdef CONFIG_VGA_CONSOLE 980#ifdef CONFIG_VGA_CONSOLE
781 if (vgacon_text_force()) 981 if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 41ff7e0d403a..994fd6ec373b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv)
70 return fpriv ? fpriv->driver_priv : NULL; 70 return fpriv ? fpriv->driver_priv : NULL;
71} 71}
72 72
73extern int nouveau_runtime_pm;
74
73struct nouveau_drm { 75struct nouveau_drm {
74 struct nouveau_cli client; 76 struct nouveau_cli client;
75 struct drm_device *dev; 77 struct drm_device *dev;
@@ -129,6 +131,12 @@ struct nouveau_drm {
129 131
130 /* power management */ 132 /* power management */
131 struct nouveau_pm *pm; 133 struct nouveau_pm *pm;
134
135 /* display power reference */
136 bool have_disp_power_ref;
137
138 struct dev_pm_domain vga_pm_domain;
139 struct pci_dev *hdmi_device;
132}; 140};
133 141
134static inline struct nouveau_drm * 142static inline struct nouveau_drm *
@@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev)
146int nouveau_pmops_suspend(struct device *); 154int nouveau_pmops_suspend(struct device *);
147int nouveau_pmops_resume(struct device *); 155int nouveau_pmops_resume(struct device *);
148 156
157#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
149#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
150#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
151#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 830cb7bad922..487242fb3fdc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
220 } 220 }
221 221
222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
223 rep->map_handle = nvbo->bo.addr_space_offset; 223 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
224 rep->tile_mode = nvbo->tile_mode; 224 rep->tile_mode = nvbo->tile_mode;
225 rep->tile_flags = nvbo->tile_flags; 225 rep->tile_flags = nvbo->tile_flags;
226 return 0; 226 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 25d3495725eb..81638d7f2eff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
32{ 32{
33 struct drm_device *dev = pci_get_drvdata(pdev); 33 struct drm_device *dev = pci_get_drvdata(pdev);
34 34
35 if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
36 return;
37
35 if (state == VGA_SWITCHEROO_ON) { 38 if (state == VGA_SWITCHEROO_ON) {
36 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 39 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
37 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 40 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -78,8 +81,17 @@ void
78nouveau_vga_init(struct nouveau_drm *drm) 81nouveau_vga_init(struct nouveau_drm *drm)
79{ 82{
80 struct drm_device *dev = drm->dev; 83 struct drm_device *dev = drm->dev;
84 bool runtime = false;
81 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 85 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
82 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); 86
87 if (nouveau_runtime_pm == 1)
88 runtime = true;
89 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
90 runtime = true;
91 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
92
93 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
94 vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
83} 95}
84 96
85void 97void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b40a36c1b57..9d2092a5ed38 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1326,7 +1326,7 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
1326 .cursor_set = nv50_crtc_cursor_set, 1326 .cursor_set = nv50_crtc_cursor_set,
1327 .cursor_move = nv50_crtc_cursor_move, 1327 .cursor_move = nv50_crtc_cursor_move,
1328 .gamma_set = nv50_crtc_gamma_set, 1328 .gamma_set = nv50_crtc_gamma_set,
1329 .set_config = drm_crtc_helper_set_config, 1329 .set_config = nouveau_crtc_set_config,
1330 .destroy = nv50_crtc_destroy, 1330 .destroy = nv50_crtc_destroy,
1331 .page_flip = nouveau_crtc_page_flip, 1331 .page_flip = nouveau_crtc_page_flip,
1332}; 1332};
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..778372b062ad 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
18 omap_dmm_tiler.o \ 18 omap_dmm_tiler.o \
19 tcm-sita.o 19 tcm-sita.o
20 20
21# temporary:
22omapdrm-y += omap_gem_helpers.o
23
24obj-$(CONFIG_DRM_OMAP) += omapdrm.o 21obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 11a5263a5e9f..0fd2eb139f6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -331,7 +331,8 @@ static void page_flip_cb(void *arg)
331 331
332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, 332static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
333 struct drm_framebuffer *fb, 333 struct drm_framebuffer *fb,
334 struct drm_pending_vblank_event *event) 334 struct drm_pending_vblank_event *event,
335 uint32_t page_flip_flags)
335{ 336{
336 struct drm_device *dev = crtc->dev; 337 struct drm_device *dev = crtc->dev;
337 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..acf667859cb6 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg)
871 goto error; 871 goto error;
872 872
873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { 873 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
874 memset(map, 0, sizeof(h_adj * sizeof(*map))); 874 memset(map, 0, h_adj * sizeof(*map));
875 memset(global_map, ' ', (w_adj + 1) * h_adj); 875 memset(global_map, ' ', (w_adj + 1) * h_adj);
876 876
877 for (i = 0; i < omap_dmm->container_height; i++) { 877 for (i = 0; i < omap_dmm->container_height; i++) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a3..2603d909f49c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
419 return ret; 419 return ret;
420} 420}
421 421
422static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 422static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 423 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 424 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 425 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
524 return 0; 524 return 0;
525} 525}
526 526
527static int dev_firstopen(struct drm_device *dev)
528{
529 DBG("firstopen: dev=%p", dev);
530 return 0;
531}
532
533/** 527/**
534 * lastclose - clean up after all DRM clients have exited 528 * lastclose - clean up after all DRM clients have exited
535 * @dev: DRM device 529 * @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
598 .release = drm_release, 592 .release = drm_release,
599 .mmap = omap_gem_mmap, 593 .mmap = omap_gem_mmap,
600 .poll = drm_poll, 594 .poll = drm_poll,
601 .fasync = drm_fasync,
602 .read = drm_read, 595 .read = drm_read,
603 .llseek = noop_llseek, 596 .llseek = noop_llseek,
604}; 597};
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
609 .load = dev_load, 602 .load = dev_load,
610 .unload = dev_unload, 603 .unload = dev_unload,
611 .open = dev_open, 604 .open = dev_open,
612 .firstopen = dev_firstopen,
613 .lastclose = dev_lastclose, 605 .lastclose = dev_lastclose,
614 .preclose = dev_preclose, 606 .preclose = dev_preclose,
615 .postclose = dev_postclose, 607 .postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
633 .gem_vm_ops = &omap_gem_vm_ops, 625 .gem_vm_ops = &omap_gem_vm_ops,
634 .dumb_create = omap_gem_dumb_create, 626 .dumb_create = omap_gem_dumb_create,
635 .dumb_map_offset = omap_gem_dumb_map_offset, 627 .dumb_map_offset = omap_gem_dumb_map_offset,
636 .dumb_destroy = omap_gem_dumb_destroy, 628 .dumb_destroy = drm_gem_dumb_destroy,
637 .ioctls = ioctls, 629 .ioctls = ioctls,
638 .num_ioctls = DRM_OMAP_NUM_IOCTLS, 630 .num_ioctls = DRM_OMAP_NUM_IOCTLS,
639 .fops = &omapdriver_fops, 631 .fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce25..30b95b736658 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 203struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 204 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 205struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
206int omap_framebuffer_replace(struct drm_framebuffer *a, 206int omap_framebuffer_pin(struct drm_framebuffer *fb);
207 struct drm_framebuffer *b, void *arg, 207int omap_framebuffer_unpin(struct drm_framebuffer *fb);
208 void (*unpin)(void *arg, struct drm_gem_object *bo));
209void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 208void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
210 struct omap_drm_window *win, struct omap_overlay_info *info); 209 struct omap_drm_window *win, struct omap_overlay_info *info);
211struct drm_connector *omap_framebuffer_get_next_connector( 210struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
225void *omap_gem_vaddr(struct drm_gem_object *obj); 224void *omap_gem_vaddr(struct drm_gem_object *obj);
226int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
227 uint32_t handle, uint64_t *offset); 226 uint32_t handle, uint64_t *offset);
228int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
229 uint32_t handle);
230int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 227int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
231 struct drm_mode_create_dumb *args); 228 struct drm_mode_create_dumb *args);
232int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); 229int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8031402e7951..f2b8f0668c0c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
237 } 237 }
238} 238}
239 239
240/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although 240/* pin, prepare for scanout: */
241 * buffers to unpin are just pushed to the unpin fifo so that the 241int omap_framebuffer_pin(struct drm_framebuffer *fb)
242 * caller can defer unpin until vblank.
243 *
244 * Note if this fails (ie. something went very wrong!), all buffers are
245 * unpinned, and the caller disables the overlay. We could have tried
246 * to revert back to the previous set of pinned buffers but if things are
247 * hosed there is no guarantee that would succeed.
248 */
249int omap_framebuffer_replace(struct drm_framebuffer *a,
250 struct drm_framebuffer *b, void *arg,
251 void (*unpin)(void *arg, struct drm_gem_object *bo))
252{ 242{
253 int ret = 0, i, na, nb; 243 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
254 struct omap_framebuffer *ofba = to_omap_framebuffer(a); 244 int ret, i, n = drm_format_num_planes(fb->pixel_format);
255 struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
256 uint32_t pinned_mask = 0;
257 245
258 na = a ? drm_format_num_planes(a->pixel_format) : 0; 246 for (i = 0; i < n; i++) {
259 nb = b ? drm_format_num_planes(b->pixel_format) : 0; 247 struct plane *plane = &omap_fb->planes[i];
248 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
249 if (ret)
250 goto fail;
251 omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
252 }
260 253
261 for (i = 0; i < max(na, nb); i++) { 254 return 0;
262 struct plane *pa, *pb;
263 255
264 pa = (i < na) ? &ofba->planes[i] : NULL; 256fail:
265 pb = (i < nb) ? &ofbb->planes[i] : NULL; 257 for (i--; i >= 0; i--) {
258 struct plane *plane = &omap_fb->planes[i];
259 omap_gem_put_paddr(plane->bo);
260 plane->paddr = 0;
261 }
266 262
267 if (pa) 263 return ret;
268 unpin(arg, pa->bo); 264}
269 265
270 if (pb && !ret) { 266/* unpin, no longer being scanned out: */
271 ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true); 267int omap_framebuffer_unpin(struct drm_framebuffer *fb)
272 if (!ret) { 268{
273 omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE); 269 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
274 pinned_mask |= (1 << i); 270 int ret, i, n = drm_format_num_planes(fb->pixel_format);
275 }
276 }
277 }
278 271
279 if (ret) { 272 for (i = 0; i < n; i++) {
280 /* something went wrong.. unpin what has been pinned */ 273 struct plane *plane = &omap_fb->planes[i];
281 for (i = 0; i < nb; i++) { 274 ret = omap_gem_put_paddr(plane->bo);
282 if (pinned_mask & (1 << i)) { 275 if (ret)
283 struct plane *pb = &ofba->planes[i]; 276 goto fail;
284 unpin(arg, pb->bo); 277 plane->paddr = 0;
285 }
286 }
287 } 278 }
288 279
280 return 0;
281
282fail:
289 return ret; 283 return ret;
290} 284}
291 285
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9c..533f6ebec531 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/shmem_fs.h> 22#include <linux/shmem_fs.h>
23#include <drm/drm_vma_manager.h>
23 24
24#include "omap_drv.h" 25#include "omap_drv.h"
25#include "omap_dmm_tiler.h" 26#include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
236 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably 237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
237 * we actually want CMA memory for it all anyways.. 238 * we actually want CMA memory for it all anyways..
238 */ 239 */
239 pages = _drm_gem_get_pages(obj, GFP_KERNEL); 240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
240 if (IS_ERR(pages)) { 241 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages); 243 return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
270 return 0; 271 return 0;
271 272
272free_pages: 273free_pages:
273 _drm_gem_put_pages(obj, pages, true, false); 274 drm_gem_put_pages(obj, pages, true, false);
274 275
275 return ret; 276 return ret;
276} 277}
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
294 kfree(omap_obj->addrs); 295 kfree(omap_obj->addrs);
295 omap_obj->addrs = NULL; 296 omap_obj->addrs = NULL;
296 297
297 _drm_gem_put_pages(obj, omap_obj->pages, true, false); 298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
298 omap_obj->pages = NULL; 299 omap_obj->pages = NULL;
299} 300}
300 301
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
308static uint64_t mmap_offset(struct drm_gem_object *obj) 309static uint64_t mmap_offset(struct drm_gem_object *obj)
309{ 310{
310 struct drm_device *dev = obj->dev; 311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
311 314
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313 316
314 if (!obj->map_list.map) { 317 /* Make it mmapable */
315 /* Make it mmapable */ 318 size = omap_gem_mmap_size(obj);
316 size_t size = omap_gem_mmap_size(obj); 319 ret = drm_gem_create_mmap_offset_size(obj, size);
317 int ret = _drm_gem_create_mmap_offset_size(obj, size); 320 if (ret) {
318 321 dev_err(dev->dev, "could not allocate mmap offset\n");
319 if (ret) { 322 return 0;
320 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return 0;
322 }
323 } 323 }
324 324
325 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT; 325 return drm_vma_node_offset_addr(&obj->vma_node);
326} 326}
327 327
328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 328uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
629} 629}
630 630
631/** 631/**
632 * omap_gem_dumb_destroy - destroy a dumb buffer
633 * @file: client file
634 * @dev: our DRM device
635 * @handle: the object handle
636 *
637 * Destroy a handle that was created via omap_gem_dumb_create.
638 */
639int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
640 uint32_t handle)
641{
642 /* No special work needed, drop the reference and see what falls out */
643 return drm_gem_handle_delete(file, handle);
644}
645
646/**
647 * omap_gem_dumb_map - buffer mapping for dumb interface 632 * omap_gem_dumb_map - buffer mapping for dumb interface
648 * @file: our drm client file 633 * @file: our drm client file
649 * @dev: drm device 634 * @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
997{ 982{
998 struct drm_device *dev = obj->dev; 983 struct drm_device *dev = obj->dev;
999 struct omap_gem_object *omap_obj = to_omap_bo(obj); 984 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1000 uint64_t off = 0; 985 uint64_t off;
1001 986
1002 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 987 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1003 988
1004 if (obj->map_list.map) 989 off = drm_vma_node_start(&obj->vma_node);
1005 off = (uint64_t)obj->map_list.hash.key;
1006 990
1007 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", 991 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
1008 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 992 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1309 1293
1310 list_del(&omap_obj->mm_list); 1294 list_del(&omap_obj->mm_list);
1311 1295
1312 if (obj->map_list.map) 1296 drm_gem_free_mmap_offset(obj);
1313 drm_gem_free_mmap_offset(obj);
1314 1297
1315 /* this means the object is still pinned.. which really should 1298 /* this means the object is still pinned.. which really should
1316 * not happen. I think.. 1299 * not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1427 omap_obj->height = gsize.tiled.height; 1410 omap_obj->height = gsize.tiled.height;
1428 } 1411 }
1429 1412
1413 ret = 0;
1430 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) 1414 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1431 ret = drm_gem_private_object_init(dev, obj, size); 1415 drm_gem_private_object_init(dev, obj, size);
1432 else 1416 else
1433 ret = drm_gem_object_init(dev, obj, size); 1417 ret = drm_gem_object_init(dev, obj, size);
1434 1418
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644
index f9eb679eb79b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/* temporary copy of drm_gem_{get,put}_pages() until the
21 * "drm/gem: add functions to get/put pages" patch is merged..
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/shmem_fs.h>
27
28#include <drm/drmP.h>
29
30/**
31 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
32 * @obj: obj in question
33 * @gfpmask: gfp mask of requested pages
34 */
35struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
36{
37 struct inode *inode;
38 struct address_space *mapping;
39 struct page *p, **pages;
40 int i, npages;
41
42 /* This is the shared memory object that backs the GEM resource */
43 inode = file_inode(obj->filp);
44 mapping = inode->i_mapping;
45
46 npages = obj->size >> PAGE_SHIFT;
47
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
49 if (pages == NULL)
50 return ERR_PTR(-ENOMEM);
51
52 gfpmask |= mapping_gfp_mask(mapping);
53
54 for (i = 0; i < npages; i++) {
55 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
56 if (IS_ERR(p))
57 goto fail;
58 pages[i] = p;
59
60 /* There is a hypothetical issue w/ drivers that require
61 * buffer memory in the low 4GB.. if the pages are un-
62 * pinned, and swapped out, they can end up swapped back
63 * in above 4GB. If pages are already in memory, then
64 * shmem_read_mapping_page_gfp will ignore the gfpmask,
65 * even if the already in-memory page disobeys the mask.
66 *
67 * It is only a theoretical issue today, because none of
68 * the devices with this limitation can be populated with
69 * enough memory to trigger the issue. But this BUG_ON()
70 * is here as a reminder in case the problem with
71 * shmem_read_mapping_page_gfp() isn't solved by the time
72 * it does become a real issue.
73 *
74 * See this thread: http://lkml.org/lkml/2011/7/11/238
75 */
76 BUG_ON((gfpmask & __GFP_DMA32) &&
77 (page_to_pfn(p) >= 0x00100000UL));
78 }
79
80 return pages;
81
82fail:
83 while (i--)
84 page_cache_release(pages[i]);
85
86 drm_free_large(pages);
87 return ERR_CAST(p);
88}
89
90/**
91 * drm_gem_put_pages - helper to free backing pages for a GEM object
92 * @obj: obj in question
93 * @pages: pages to free
94 */
95void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
96 bool dirty, bool accessed)
97{
98 int i, npages;
99
100 npages = obj->size >> PAGE_SHIFT;
101
102 for (i = 0; i < npages; i++) {
103 if (dirty)
104 set_page_dirty(pages[i]);
105
106 if (accessed)
107 mark_page_accessed(pages[i]);
108
109 /* Undo the reference we took when populating the table */
110 page_cache_release(pages[i]);
111 }
112
113 drm_free_large(pages);
114}
115
116int
117_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
118{
119 struct drm_device *dev = obj->dev;
120 struct drm_gem_mm *mm = dev->mm_private;
121 struct drm_map_list *list;
122 struct drm_local_map *map;
123 int ret = 0;
124
125 /* Set the object up for mmap'ing */
126 list = &obj->map_list;
127 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
128 if (!list->map)
129 return -ENOMEM;
130
131 map = list->map;
132 map->type = _DRM_GEM;
133 map->size = size;
134 map->handle = obj;
135
136 /* Get a DRM GEM mmap offset allocated... */
137 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
138 size / PAGE_SIZE, 0, 0);
139
140 if (!list->file_offset_node) {
141 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
142 ret = -ENOSPC;
143 goto out_free_list;
144 }
145
146 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
147 size / PAGE_SIZE, 0);
148 if (!list->file_offset_node) {
149 ret = -ENOMEM;
150 goto out_free_list;
151 }
152
153 list->hash.key = list->file_offset_node->start;
154 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
155 if (ret) {
156 DRM_ERROR("failed to add to map hash\n");
157 goto out_free_mm;
158 }
159
160 return 0;
161
162out_free_mm:
163 drm_mm_put_block(list->file_offset_node);
164out_free_list:
165 kfree(list->map);
166 list->map = NULL;
167
168 return ret;
169}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 8d225d7ff4e3..046d5e660c04 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,7 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/kfifo.h> 20#include "drm_flip_work.h"
21 21
22#include "omap_drv.h" 22#include "omap_drv.h"
23#include "omap_dmm_tiler.h" 23#include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
58 58
59 struct omap_drm_irq error_irq; 59 struct omap_drm_irq error_irq;
60 60
61 /* set of bo's pending unpin until next post_apply() */ 61 /* for deferring bo unpin's until next post_apply(): */
62 DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *); 62 struct drm_flip_work unpin_work;
63 63
64 // XXX maybe get rid of this and handle vblank in crtc too? 64 // XXX maybe get rid of this and handle vblank in crtc too?
65 struct callback apply_done_cb; 65 struct callback apply_done_cb;
66}; 66};
67 67
68static void unpin(void *arg, struct drm_gem_object *bo) 68static void unpin_worker(struct drm_flip_work *work, void *val)
69{ 69{
70 struct drm_plane *plane = arg; 70 struct omap_plane *omap_plane =
71 struct omap_plane *omap_plane = to_omap_plane(plane); 71 container_of(work, struct omap_plane, unpin_work);
72 struct drm_device *dev = omap_plane->base.dev;
72 73
73 if (kfifo_put(&omap_plane->unpin_fifo, 74 omap_framebuffer_unpin(val);
74 (const struct drm_gem_object **)&bo)) { 75 mutex_lock(&dev->mode_config.mutex);
75 /* also hold a ref so it isn't free'd while pinned */ 76 drm_framebuffer_unreference(val);
76 drm_gem_object_reference(bo); 77 mutex_unlock(&dev->mode_config.mutex);
77 } else {
78 dev_err(plane->dev->dev, "unpin fifo full!\n");
79 omap_gem_put_paddr(bo);
80 }
81} 78}
82 79
83/* update which fb (if any) is pinned for scanout */ 80/* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
87 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; 84 struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
88 85
89 if (pinned_fb != fb) { 86 if (pinned_fb != fb) {
90 int ret; 87 int ret = 0;
91 88
92 DBG("%p -> %p", pinned_fb, fb); 89 DBG("%p -> %p", pinned_fb, fb);
93 90
94 if (fb) 91 if (fb) {
95 drm_framebuffer_reference(fb); 92 drm_framebuffer_reference(fb);
96 93 ret = omap_framebuffer_pin(fb);
97 ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin); 94 }
98 95
99 if (pinned_fb) 96 if (pinned_fb)
100 drm_framebuffer_unreference(pinned_fb); 97 drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
101 98
102 if (ret) { 99 if (ret) {
103 dev_err(plane->dev->dev, "could not swap %p -> %p\n", 100 dev_err(plane->dev->dev, "could not swap %p -> %p\n",
104 omap_plane->pinned_fb, fb); 101 omap_plane->pinned_fb, fb);
105 if (fb) 102 drm_framebuffer_unreference(fb);
106 drm_framebuffer_unreference(fb);
107 omap_plane->pinned_fb = NULL; 103 omap_plane->pinned_fb = NULL;
108 return ret; 104 return ret;
109 } 105 }
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
170 struct omap_plane *omap_plane = 166 struct omap_plane *omap_plane =
171 container_of(apply, struct omap_plane, apply); 167 container_of(apply, struct omap_plane, apply);
172 struct drm_plane *plane = &omap_plane->base; 168 struct drm_plane *plane = &omap_plane->base;
169 struct omap_drm_private *priv = plane->dev->dev_private;
173 struct omap_overlay_info *info = &omap_plane->info; 170 struct omap_overlay_info *info = &omap_plane->info;
174 struct drm_gem_object *bo = NULL;
175 struct callback cb; 171 struct callback cb;
176 172
177 cb = omap_plane->apply_done_cb; 173 cb = omap_plane->apply_done_cb;
178 omap_plane->apply_done_cb.fxn = NULL; 174 omap_plane->apply_done_cb.fxn = NULL;
179 175
180 while (kfifo_get(&omap_plane->unpin_fifo, &bo)) { 176 drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
181 omap_gem_put_paddr(bo);
182 drm_gem_object_unreference_unlocked(bo);
183 }
184 177
185 if (cb.fxn) 178 if (cb.fxn)
186 cb.fxn(cb.arg); 179 cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
277 omap_plane_disable(plane); 270 omap_plane_disable(plane);
278 drm_plane_cleanup(plane); 271 drm_plane_cleanup(plane);
279 272
280 WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo)); 273 drm_flip_work_cleanup(&omap_plane->unpin_work);
281 kfifo_free(&omap_plane->unpin_fifo);
282 274
283 kfree(omap_plane); 275 kfree(omap_plane);
284} 276}
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
399 if (!omap_plane) 391 if (!omap_plane)
400 goto fail; 392 goto fail;
401 393
402 ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL); 394 ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
395 "unpin", unpin_worker);
403 if (ret) { 396 if (ret) {
404 dev_err(dev->dev, "could not allocate unpin FIFO\n"); 397 dev_err(dev->dev, "could not allocate unpin FIFO\n");
405 goto fail; 398 goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
182 183
183 cmd.type = type; 184 cmd.type = type;
184 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
185 186
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
187} 188}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
191 uint32_t type, bool interruptible) 192 uint32_t type, bool interruptible)
192{ 193{
193 struct qxl_command cmd; 194 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
194 196
195 cmd.type = type; 197 cmd.type = type;
196 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
197 199
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
199} 201}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
214 struct qxl_release *release; 216 struct qxl_release *release;
215 uint64_t id, next_id; 217 uint64_t id, next_id;
216 int i = 0; 218 int i = 0;
217 int ret;
218 union qxl_release_info *info; 219 union qxl_release_info *info;
219 220
220 while (qxl_ring_pop(qdev->release_ring, &id)) { 221 while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
224 if (release == NULL) 225 if (release == NULL)
225 break; 226 break;
226 227
227 ret = qxl_release_reserve(qdev, release, false);
228 if (ret) {
229 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
230 DRM_ERROR("failed to reserve release %lld\n", id);
231 }
232
233 info = qxl_release_map(qdev, release); 228 info = qxl_release_map(qdev, release);
234 next_id = info->next; 229 next_id = info->next;
235 qxl_release_unmap(qdev, release, info); 230 qxl_release_unmap(qdev, release, info);
236 231
237 qxl_release_unreserve(qdev, release);
238 QXL_INFO(qdev, "popped %lld, next %lld\n", id, 232 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
239 next_id); 233 next_id);
240 234
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
259 return i; 253 return i;
260} 254}
261 255
262int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 256int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257 struct qxl_release *release,
258 unsigned long size,
263 struct qxl_bo **_bo) 259 struct qxl_bo **_bo)
264{ 260{
265 struct qxl_bo *bo; 261 struct qxl_bo *bo;
266 int ret; 262 int ret;
267 263
268 ret = qxl_bo_create(qdev, size, false /* not kernel - device */, 264 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
269 QXL_GEM_DOMAIN_VRAM, NULL, &bo); 265 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
270 if (ret) { 266 if (ret) {
271 DRM_ERROR("failed to allocate VRAM BO\n"); 267 DRM_ERROR("failed to allocate VRAM BO\n");
272 return ret; 268 return ret;
273 } 269 }
274 ret = qxl_bo_reserve(bo, false); 270 ret = qxl_release_list_add(release, bo);
275 if (unlikely(ret != 0)) 271 if (ret)
276 goto out_unref; 272 goto out_unref;
277 273
278 *_bo = bo; 274 *_bo = bo;
279 return 0; 275 return 0;
280out_unref: 276out_unref:
281 qxl_bo_unref(&bo); 277 qxl_bo_unref(&bo);
282 return 0; 278 return ret;
283} 279}
284 280
285static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) 281static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
503 if (ret) 499 if (ret)
504 return ret; 500 return ret;
505 501
502 ret = qxl_release_reserve_list(release, true);
503 if (ret)
504 return ret;
505
506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
507 cmd->type = QXL_SURFACE_CMD_CREATE; 507 cmd->type = QXL_SURFACE_CMD_CREATE;
508 cmd->u.surface_create.format = surf->surf.format; 508 cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
524 524
525 surf->surf_create = release; 525 surf->surf_create = release;
526 526
527 /* no need to add a release to the fence for this bo, 527 /* no need to add a release to the fence for this surface bo,
528 since it is only released when we ask to destroy the surface 528 since it is only released when we ask to destroy the surface
529 and it would never signal otherwise */ 529 and it would never signal otherwise */
530 qxl_fence_releaseable(qdev, release);
531
532 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 530 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
533 531 qxl_release_fence_buffer_objects(release);
534 qxl_release_unreserve(qdev, release);
535 532
536 surf->hw_surf_alloc = true; 533 surf->hw_surf_alloc = true;
537 spin_lock(&qdev->surf_id_idr_lock); 534 spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
573 cmd->surface_id = id; 570 cmd->surface_id = id;
574 qxl_release_unmap(qdev, release, &cmd->release_info); 571 qxl_release_unmap(qdev, release, &cmd->release_info);
575 572
576 qxl_fence_releaseable(qdev, release);
577
578 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 573 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
579 574
580 qxl_release_unreserve(qdev, release); 575 qxl_release_fence_buffer_objects(release);
581
582 576
583 return 0; 577 return 0;
584} 578}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
179 kfree(qxl_crtc); 179 kfree(qxl_crtc);
180} 180}
181 181
182static void 182static int
183qxl_hide_cursor(struct qxl_device *qdev) 183qxl_hide_cursor(struct qxl_device *qdev)
184{ 184{
185 struct qxl_release *release; 185 struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
188 188
189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
190 &release, NULL); 190 &release, NULL);
191 if (ret)
192 return ret;
193
194 ret = qxl_release_reserve_list(release, true);
195 if (ret) {
196 qxl_release_free(qdev, release);
197 return ret;
198 }
191 199
192 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 200 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
193 cmd->type = QXL_CURSOR_HIDE; 201 cmd->type = QXL_CURSOR_HIDE;
194 qxl_release_unmap(qdev, release, &cmd->release_info); 202 qxl_release_unmap(qdev, release, &cmd->release_info);
195 203
196 qxl_fence_releaseable(qdev, release);
197 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 204 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
198 qxl_release_unreserve(qdev, release); 205 qxl_release_fence_buffer_objects(release);
206 return 0;
199} 207}
200 208
201static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, 209static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
216 224
217 int size = 64*64*4; 225 int size = 64*64*4;
218 int ret = 0; 226 int ret = 0;
219 if (!handle) { 227 if (!handle)
220 qxl_hide_cursor(qdev); 228 return qxl_hide_cursor(qdev);
221 return 0;
222 }
223 229
224 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 230 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
225 if (!obj) { 231 if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
234 goto out_unref; 240 goto out_unref;
235 241
236 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 242 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
243 qxl_bo_unreserve(user_bo);
237 if (ret) 244 if (ret)
238 goto out_unreserve; 245 goto out_unref;
239 246
240 ret = qxl_bo_kmap(user_bo, &user_ptr); 247 ret = qxl_bo_kmap(user_bo, &user_ptr);
241 if (ret) 248 if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
246 &release, NULL); 253 &release, NULL);
247 if (ret) 254 if (ret)
248 goto out_kunmap; 255 goto out_kunmap;
249 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, 256
250 &cursor_bo); 257 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
258 &cursor_bo);
251 if (ret) 259 if (ret)
252 goto out_free_release; 260 goto out_free_release;
253 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 261
262 ret = qxl_release_reserve_list(release, false);
254 if (ret) 263 if (ret)
255 goto out_free_bo; 264 goto out_free_bo;
256 265
266 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
267 if (ret)
268 goto out_backoff;
269
257 cursor->header.unique = 0; 270 cursor->header.unique = 0;
258 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; 271 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
259 cursor->header.width = 64; 272 cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
269 282
270 qxl_bo_kunmap(cursor_bo); 283 qxl_bo_kunmap(cursor_bo);
271 284
272 /* finish with the userspace bo */
273 qxl_bo_kunmap(user_bo); 285 qxl_bo_kunmap(user_bo);
274 qxl_bo_unpin(user_bo);
275 qxl_bo_unreserve(user_bo);
276 drm_gem_object_unreference_unlocked(obj);
277 286
278 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 287 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
279 cmd->type = QXL_CURSOR_SET; 288 cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
281 cmd->u.set.position.y = qcrtc->cur_y; 290 cmd->u.set.position.y = qcrtc->cur_y;
282 291
283 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 292 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
284 qxl_release_add_res(qdev, release, cursor_bo);
285 293
286 cmd->u.set.visible = 1; 294 cmd->u.set.visible = 1;
287 qxl_release_unmap(qdev, release, &cmd->release_info); 295 qxl_release_unmap(qdev, release, &cmd->release_info);
288 296
289 qxl_fence_releaseable(qdev, release);
290 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 297 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
291 qxl_release_unreserve(qdev, release); 298 qxl_release_fence_buffer_objects(release);
299
300 /* finish with the userspace bo */
301 ret = qxl_bo_reserve(user_bo, false);
302 if (!ret) {
303 qxl_bo_unpin(user_bo);
304 qxl_bo_unreserve(user_bo);
305 }
306 drm_gem_object_unreference_unlocked(obj);
292 307
293 qxl_bo_unreserve(cursor_bo);
294 qxl_bo_unref(&cursor_bo); 308 qxl_bo_unref(&cursor_bo);
295 309
296 return ret; 310 return ret;
311
312out_backoff:
313 qxl_release_backoff_reserve_list(release);
297out_free_bo: 314out_free_bo:
298 qxl_bo_unref(&cursor_bo); 315 qxl_bo_unref(&cursor_bo);
299out_free_release: 316out_free_release:
300 qxl_release_unreserve(qdev, release);
301 qxl_release_free(qdev, release); 317 qxl_release_free(qdev, release);
302out_kunmap: 318out_kunmap:
303 qxl_bo_kunmap(user_bo); 319 qxl_bo_kunmap(user_bo);
304out_unpin: 320out_unpin:
305 qxl_bo_unpin(user_bo); 321 qxl_bo_unpin(user_bo);
306out_unreserve:
307 qxl_bo_unreserve(user_bo);
308out_unref: 322out_unref:
309 drm_gem_object_unreference_unlocked(obj); 323 drm_gem_object_unreference_unlocked(obj);
310 return ret; 324 return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
322 336
323 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 337 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
324 &release, NULL); 338 &release, NULL);
339 if (ret)
340 return ret;
341
342 ret = qxl_release_reserve_list(release, true);
343 if (ret) {
344 qxl_release_free(qdev, release);
345 return ret;
346 }
325 347
326 qcrtc->cur_x = x; 348 qcrtc->cur_x = x;
327 qcrtc->cur_y = y; 349 qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
332 cmd->u.position.y = qcrtc->cur_y; 354 cmd->u.position.y = qcrtc->cur_y;
333 qxl_release_unmap(qdev, release, &cmd->release_info); 355 qxl_release_unmap(qdev, release, &cmd->release_info);
334 356
335 qxl_fence_releaseable(qdev, release);
336 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 357 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
337 qxl_release_unreserve(qdev, release); 358 qxl_release_fence_buffer_objects(release);
359
338 return 0; 360 return 0;
339} 361}
340 362
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
23#include "qxl_drv.h" 23#include "qxl_drv.h"
24#include "qxl_object.h" 24#include "qxl_object.h"
25 25
26static int alloc_clips(struct qxl_device *qdev,
27 struct qxl_release *release,
28 unsigned num_clips,
29 struct qxl_bo **clips_bo)
30{
31 int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
32
33 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
34}
35
26/* returns a pointer to the already allocated qxl_rect array inside 36/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable, 40 struct qxl_drawable *drawable,
31 unsigned num_clips, 41 unsigned num_clips,
32 struct qxl_bo **clips_bo, 42 struct qxl_bo *clips_bo)
33 struct qxl_release *release)
34{ 43{
35 struct qxl_clip_rects *dev_clips; 44 struct qxl_clip_rects *dev_clips;
36 int ret; 45 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41 46
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); 47 ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
43 if (ret) { 48 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL; 49 return NULL;
46 } 50 }
47 dev_clips->num_rects = num_clips; 51 dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
52} 56}
53 57
54static int 58static int
59alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
60{
61 int ret;
62 ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 return ret;
66}
67
68static void
69free_drawable(struct qxl_device *qdev, struct qxl_release *release)
70{
71 qxl_release_free(qdev, release);
72}
73
74/* release needs to be reserved at this point */
75static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 76make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect, 77 const struct qxl_rect *rect,
57 struct qxl_release **release) 78 struct qxl_release *release)
58{ 79{
59 struct qxl_drawable *drawable; 80 struct qxl_drawable *drawable;
60 int i, ret; 81 int i;
61 82
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), 83 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
63 QXL_RELEASE_DRAWABLE, release, 84 if (!drawable)
64 NULL); 85 return -ENOMEM;
65 if (ret)
66 return ret;
67 86
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type; 87 drawable->type = type;
70 88
71 drawable->surface_id = surface; /* Only primary for now */ 89 drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
91 drawable->bbox = *rect; 109 drawable->bbox = *rect;
92 110
93 drawable->mm_time = qdev->rom->mm_clock; 111 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info); 112 qxl_release_unmap(qdev, release, &drawable->release_info);
95 return 0; 113 return 0;
96} 114}
97 115
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, 116static int alloc_palette_object(struct qxl_device *qdev,
117 struct qxl_release *release,
118 struct qxl_bo **palette_bo)
119{
120 return qxl_alloc_bo_reserved(qdev, release,
121 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
122 palette_bo);
123}
124
125static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
126 struct qxl_release *release,
99 const struct qxl_fb_image *qxl_fb_image) 127 const struct qxl_fb_image *qxl_fb_image)
100{ 128{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image; 129 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual; 130 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; 131 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
108 static uint64_t unique; /* we make no attempt to actually set this 135 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require 136 * correctly globaly, since that would require
110 * tracking all of our palettes. */ 137 * tracking all of our palettes. */
111 138 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2; 139 pal->num_ents = 2;
118 pal->unique = unique++; 140 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
126 } 148 }
127 pal->ents[0] = bgcolor; 149 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor; 150 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo); 151 qxl_bo_kunmap(palette_bo);
130 return 0; 152 return 0;
131} 153}
132 154
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
144 const char *src = fb_image->data; 166 const char *src = fb_image->data;
145 int depth = fb_image->depth; 167 int depth = fb_image->depth;
146 struct qxl_release *release; 168 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image; 169 struct qxl_image *image;
149 int ret; 170 int ret;
150 171 struct qxl_drm_image *dimage;
172 struct qxl_bo *palette_bo = NULL;
151 if (stride == 0) 173 if (stride == 0)
152 stride = depth * width / 8; 174 stride = depth * width / 8;
153 175
176 ret = alloc_drawable(qdev, &release);
177 if (ret)
178 return;
179
180 ret = qxl_image_alloc_objects(qdev, release,
181 &dimage,
182 height, stride);
183 if (ret)
184 goto out_free_drawable;
185
186 if (depth == 1) {
187 ret = alloc_palette_object(qdev, release, &palette_bo);
188 if (ret)
189 goto out_free_image;
190 }
191
192 /* do a reservation run over all the objects we just allocated */
193 ret = qxl_release_reserve_list(release, true);
194 if (ret)
195 goto out_free_palette;
196
154 rect.left = x; 197 rect.left = x;
155 rect.right = x + width; 198 rect.right = x + width;
156 rect.top = y; 199 rect.top = y;
157 rect.bottom = y + height; 200 rect.bottom = y + height;
158 201
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); 202 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
160 if (ret) 203 if (ret) {
161 return; 204 qxl_release_backoff_reserve_list(release);
205 goto out_free_palette;
206 }
162 207
163 ret = qxl_image_create(qdev, release, &image_bo, 208 ret = qxl_image_init(qdev, release, dimage,
164 (const uint8_t *)src, 0, 0, 209 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride); 210 width, height, depth, stride);
166 if (ret) { 211 if (ret) {
167 qxl_release_unreserve(qdev, release); 212 qxl_release_backoff_reserve_list(release);
168 qxl_release_free(qdev, release); 213 qxl_release_free(qdev, release);
169 return; 214 return;
170 } 215 }
171 216
172 if (depth == 1) { 217 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr; 218 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); 219 ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177 220
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 221 ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
179 image = ptr; 222 image = ptr;
180 image->u.bitmap.palette = 223 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0); 224 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 225 qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 } 226 }
186 227
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 228 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
199 drawable->u.copy.mask.bitmap = 0; 240 drawable->u.copy.mask.bitmap = 0;
200 241
201 drawable->u.copy.src_bitmap = 242 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0); 243 qxl_bo_physical_address(qdev, dimage->bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info); 244 qxl_release_unmap(qdev, release, &drawable->release_info);
204 245
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 246 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release); 247 qxl_release_fence_buffer_objects(release);
248
249out_free_palette:
250 if (palette_bo)
251 qxl_bo_unref(&palette_bo);
252out_free_image:
253 qxl_image_free_objects(qdev, dimage);
254out_free_drawable:
255 if (ret)
256 free_drawable(qdev, release);
212} 257}
213 258
214/* push a draw command using the given clipping rectangles as 259/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
243 int depth = qxl_fb->base.bits_per_pixel; 288 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base; 289 uint8_t *surface_base;
245 struct qxl_release *release; 290 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo; 291 struct qxl_bo *clips_bo;
292 struct qxl_drm_image *dimage;
248 int ret; 293 int ret;
249 294
295 ret = alloc_drawable(qdev, &release);
296 if (ret)
297 return;
298
250 left = clips->x1; 299 left = clips->x1;
251 right = clips->x2; 300 right = clips->x2;
252 top = clips->y1; 301 top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
263 312
264 width = right - left; 313 width = right - left;
265 height = bottom - top; 314 height = bottom - top;
315
316 ret = alloc_clips(qdev, release, num_clips, &clips_bo);
317 if (ret)
318 goto out_free_drawable;
319
320 ret = qxl_image_alloc_objects(qdev, release,
321 &dimage,
322 height, stride);
323 if (ret)
324 goto out_free_clips;
325
326 /* do a reservation run over all the objects we just allocated */
327 ret = qxl_release_reserve_list(release, true);
328 if (ret)
329 goto out_free_image;
330
266 drawable_rect.left = left; 331 drawable_rect.left = left;
267 drawable_rect.right = right; 332 drawable_rect.right = right;
268 drawable_rect.top = top; 333 drawable_rect.top = top;
269 drawable_rect.bottom = bottom; 334 drawable_rect.bottom = bottom;
335
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, 336 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release); 337 release);
272 if (ret) 338 if (ret)
273 return; 339 goto out_release_backoff;
274 340
275 ret = qxl_bo_kmap(bo, (void **)&surface_base); 341 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret) 342 if (ret)
277 goto out_unref; 343 goto out_release_backoff;
278 344
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base, 345
280 left, top, width, height, depth, stride); 346 ret = qxl_image_init(qdev, release, dimage, surface_base,
347 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo); 348 qxl_bo_kunmap(bo);
282 if (ret) 349 if (ret)
283 goto out_unref; 350 goto out_release_backoff;
351
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
353 if (!rects)
354 goto out_release_backoff;
284 355
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 356 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291 357
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; 358 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev, 359 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0); 360 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296 361
297 drawable->u.copy.src_area.top = 0; 362 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height; 363 drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
306 drawable->u.copy.mask.pos.y = 0; 371 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0; 372 drawable->u.copy.mask.bitmap = 0;
308 373
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); 374 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info); 375 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo); 376
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips; 377 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 378 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1; 379 rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
319 rects[i].bottom = clips_ptr->y2; 382 rects[i].bottom = clips_ptr->y2;
320 } 383 }
321 qxl_bo_kunmap(clips_bo); 384 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324 385
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 386 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release); 387 qxl_release_fence_buffer_objects(release);
328 return; 388
389out_release_backoff:
390 if (ret)
391 qxl_release_backoff_reserve_list(release);
392out_free_image:
393 qxl_image_free_objects(qdev, dimage);
394out_free_clips:
395 qxl_bo_unref(&clips_bo);
396out_free_drawable:
397 /* only free drawable on error */
398 if (ret)
399 free_drawable(qdev, release);
329 400
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333} 401}
334 402
335void qxl_draw_copyarea(struct qxl_device *qdev, 403void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
342 struct qxl_release *release; 410 struct qxl_release *release;
343 int ret; 411 int ret;
344 412
413 ret = alloc_drawable(qdev, &release);
414 if (ret)
415 return;
416
417 /* do a reservation run over all the objects we just allocated */
418 ret = qxl_release_reserve_list(release, true);
419 if (ret)
420 goto out_free_release;
421
345 rect.left = dx; 422 rect.left = dx;
346 rect.top = dy; 423 rect.top = dy;
347 rect.right = dx + width; 424 rect.right = dx + width;
348 rect.bottom = dy + height; 425 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); 426 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
350 if (ret) 427 if (ret) {
351 return; 428 qxl_release_backoff_reserve_list(release);
429 goto out_free_release;
430 }
352 431
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 432 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx; 433 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy; 434 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info); 435 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release); 436
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 437 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release); 438 qxl_release_fence_buffer_objects(release);
439
440out_free_release:
441 if (ret)
442 free_drawable(qdev, release);
361} 443}
362 444
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) 445void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
370 struct qxl_release *release; 452 struct qxl_release *release;
371 int ret; 453 int ret;
372 454
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); 455 ret = alloc_drawable(qdev, &release);
374 if (ret) 456 if (ret)
375 return; 457 return;
376 458
459 /* do a reservation run over all the objects we just allocated */
460 ret = qxl_release_reserve_list(release, true);
461 if (ret)
462 goto out_free_release;
463
464 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
465 if (ret) {
466 qxl_release_backoff_reserve_list(release);
467 goto out_free_release;
468 }
469
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 470 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; 471 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color; 472 drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
384 drawable->u.fill.mask.bitmap = 0; 477 drawable->u.fill.mask.bitmap = 0;
385 478
386 qxl_release_unmap(qdev, release, &drawable->release_info); 479 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release); 480
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 481 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release); 482 qxl_release_fence_buffer_objects(release);
483
484out_free_release:
485 if (ret)
486 free_drawable(qdev, release);
390} 487}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a6608..514118ae72d4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
84 .release = drm_release, 84 .release = drm_release,
85 .unlocked_ioctl = drm_ioctl, 85 .unlocked_ioctl = drm_ioctl,
86 .poll = drm_poll, 86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .mmap = qxl_mmap, 87 .mmap = qxl_mmap,
89}; 88};
90 89
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
221 220
222 .dumb_create = qxl_mode_dumb_create, 221 .dumb_create = qxl_mode_dumb_create,
223 .dumb_map_offset = qxl_mode_dumb_mmap, 222 .dumb_map_offset = qxl_mode_dumb_mmap,
224 .dumb_destroy = qxl_mode_dumb_destroy, 223 .dumb_destroy = drm_gem_dumb_destroy,
225#if defined(CONFIG_DEBUG_FS) 224#if defined(CONFIG_DEBUG_FS)
226 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
227 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..f7c9adde46a0 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
42#include <ttm/ttm_placement.h> 42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h> 43#include <ttm/ttm_module.h>
44 44
45/* just for ttm_validate_buffer */
46#include <ttm/ttm_execbuf_util.h>
47
45#include <drm/qxl_drm.h> 48#include <drm/qxl_drm.h>
46#include "qxl_dev.h" 49#include "qxl_dev.h"
47 50
@@ -118,9 +121,9 @@ struct qxl_bo {
118 uint32_t surface_id; 121 uint32_t surface_id;
119 struct qxl_fence fence; /* per bo fence - list of releases */ 122 struct qxl_fence fence; /* per bo fence - list of releases */
120 struct qxl_release *surf_create; 123 struct qxl_release *surf_create;
121 atomic_t reserve_count;
122}; 124};
123#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
126#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
124 127
125struct qxl_gem { 128struct qxl_gem {
126 struct mutex mutex; 129 struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
128}; 131};
129 132
130struct qxl_bo_list { 133struct qxl_bo_list {
131 struct list_head lhead; 134 struct ttm_validate_buffer tv;
132 struct qxl_bo *bo;
133};
134
135struct qxl_reloc_list {
136 struct list_head bos;
137}; 135};
138 136
139struct qxl_crtc { 137struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
195struct qxl_release { 193struct qxl_release {
196 int id; 194 int id;
197 int type; 195 int type;
198 int bo_count;
199 uint32_t release_offset; 196 uint32_t release_offset;
200 uint32_t surface_release_id; 197 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES]; 198 struct ww_acquire_ctx ticket;
199 struct list_head bos;
200};
201
202struct qxl_drm_chunk {
203 struct list_head head;
204 struct qxl_bo *bo;
205};
206
207struct qxl_drm_image {
208 struct qxl_bo *bo;
209 struct list_head chunk_list;
202}; 210};
203 211
204struct qxl_fb_image { 212struct qxl_fb_image {
@@ -314,12 +322,13 @@ struct qxl_device {
314 struct workqueue_struct *gc_queue; 322 struct workqueue_struct *gc_queue;
315 struct work_struct gc_work; 323 struct work_struct gc_work;
316 324
325 struct work_struct fb_work;
317}; 326};
318 327
319/* forward declaration for QXL_INFO_IO */ 328/* forward declaration for QXL_INFO_IO */
320void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); 329void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
321 330
322extern struct drm_ioctl_desc qxl_ioctls[]; 331extern const struct drm_ioctl_desc qxl_ioctls[];
323extern int qxl_max_ioctl; 332extern int qxl_max_ioctl;
324 333
325int qxl_driver_load(struct drm_device *dev, unsigned long flags); 334int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -396,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
396 bool discardable, bool kernel, 405 bool discardable, bool kernel,
397 struct qxl_surface *surf, 406 struct qxl_surface *surf,
398 struct drm_gem_object **obj); 407 struct drm_gem_object **obj);
399int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
400 uint64_t *gpu_addr);
401void qxl_gem_object_unpin(struct drm_gem_object *obj);
402int qxl_gem_object_create_with_handle(struct qxl_device *qdev, 408int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
403 struct drm_file *file_priv, 409 struct drm_file *file_priv,
404 u32 domain, 410 u32 domain,
@@ -418,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
418int qxl_mode_dumb_create(struct drm_file *file_priv, 424int qxl_mode_dumb_create(struct drm_file *file_priv,
419 struct drm_device *dev, 425 struct drm_device *dev,
420 struct drm_mode_create_dumb *args); 426 struct drm_mode_create_dumb *args);
421int qxl_mode_dumb_destroy(struct drm_file *file_priv,
422 struct drm_device *dev,
423 uint32_t handle);
424int qxl_mode_dumb_mmap(struct drm_file *filp, 427int qxl_mode_dumb_mmap(struct drm_file *filp,
425 struct drm_device *dev, 428 struct drm_device *dev,
426 uint32_t handle, uint64_t *offset_p); 429 uint32_t handle, uint64_t *offset_p);
@@ -433,12 +436,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
433 436
434/* qxl image */ 437/* qxl image */
435 438
436int qxl_image_create(struct qxl_device *qdev, 439int qxl_image_init(struct qxl_device *qdev,
437 struct qxl_release *release, 440 struct qxl_release *release,
438 struct qxl_bo **image_bo, 441 struct qxl_drm_image *dimage,
439 const uint8_t *data, 442 const uint8_t *data,
440 int x, int y, int width, int height, 443 int x, int y, int width, int height,
441 int depth, int stride); 444 int depth, int stride);
445int
446qxl_image_alloc_objects(struct qxl_device *qdev,
447 struct qxl_release *release,
448 struct qxl_drm_image **image_ptr,
449 int height, int stride);
450void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
451
442void qxl_update_screen(struct qxl_device *qxl); 452void qxl_update_screen(struct qxl_device *qxl);
443 453
444/* qxl io operations (qxl_cmd.c) */ 454/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +469,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
459void qxl_io_flush_release(struct qxl_device *qdev); 469void qxl_io_flush_release(struct qxl_device *qdev);
460void qxl_io_flush_surfaces(struct qxl_device *qdev); 470void qxl_io_flush_surfaces(struct qxl_device *qdev);
461 471
462int qxl_release_reserve(struct qxl_device *qdev,
463 struct qxl_release *release, bool no_wait);
464void qxl_release_unreserve(struct qxl_device *qdev,
465 struct qxl_release *release);
466union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 472union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
467 struct qxl_release *release); 473 struct qxl_release *release);
468void qxl_release_unmap(struct qxl_device *qdev, 474void qxl_release_unmap(struct qxl_device *qdev,
469 struct qxl_release *release, 475 struct qxl_release *release,
470 union qxl_release_info *info); 476 union qxl_release_info *info);
471/* 477int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
472 * qxl_bo_add_resource. 478int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
473 * 479void qxl_release_backoff_reserve_list(struct qxl_release *release);
474 */ 480void qxl_release_fence_buffer_objects(struct qxl_release *release);
475void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
476 481
477int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 482int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
478 enum qxl_surface_cmd_type surface_cmd_type, 483 enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +486,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
481int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 486int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
482 int type, struct qxl_release **release, 487 int type, struct qxl_release **release,
483 struct qxl_bo **rbo); 488 struct qxl_bo **rbo);
484int qxl_fence_releaseable(struct qxl_device *qdev, 489
485 struct qxl_release *release);
486int 490int
487qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, 491qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
488 uint32_t type, bool interruptible); 492 uint32_t type, bool interruptible);
489int 493int
490qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, 494qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
491 uint32_t type, bool interruptible); 495 uint32_t type, bool interruptible);
492int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 496int qxl_alloc_bo_reserved(struct qxl_device *qdev,
497 struct qxl_release *release,
498 unsigned long size,
493 struct qxl_bo **_bo); 499 struct qxl_bo **_bo);
494/* qxl drawing commands */ 500/* qxl drawing commands */
495 501
@@ -510,15 +516,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
510 u32 sx, u32 sy, 516 u32 sx, u32 sy,
511 u32 dx, u32 dy); 517 u32 dx, u32 dy);
512 518
513uint64_t
514qxl_release_alloc(struct qxl_device *qdev, int type,
515 struct qxl_release **ret);
516
517void qxl_release_free(struct qxl_device *qdev, 519void qxl_release_free(struct qxl_device *qdev,
518 struct qxl_release *release); 520 struct qxl_release *release);
519void qxl_release_add_res(struct qxl_device *qdev, 521
520 struct qxl_release *release,
521 struct qxl_bo *bo);
522/* used by qxl_debugfs_release */ 522/* used by qxl_debugfs_release */
523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
524 uint64_t id); 524 uint64_t id);
@@ -561,7 +561,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
562 562
563/* qxl_fence.c */ 563/* qxl_fence.c */
564int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); 564void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); 565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); 566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
567void qxl_fence_fini(struct qxl_fence *qfence); 567void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f7..d34bb4130ff0 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
68 return 0; 68 return 0;
69} 69}
70 70
71int qxl_mode_dumb_destroy(struct drm_file *file_priv,
72 struct drm_device *dev,
73 uint32_t handle)
74{
75 return drm_gem_handle_delete(file_priv, handle);
76}
77
78int qxl_mode_dumb_mmap(struct drm_file *file_priv, 71int qxl_mode_dumb_mmap(struct drm_file *file_priv,
79 struct drm_device *dev, 72 struct drm_device *dev,
80 uint32_t handle, uint64_t *offset_p) 73 uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
37 37
38#define QXL_DIRTY_DELAY (HZ / 30) 38#define QXL_DIRTY_DELAY (HZ / 30)
39 39
40#define QXL_FB_OP_FILLRECT 1
41#define QXL_FB_OP_COPYAREA 2
42#define QXL_FB_OP_IMAGEBLIT 3
43
44struct qxl_fb_op {
45 struct list_head head;
46 int op_type;
47 union {
48 struct fb_fillrect fr;
49 struct fb_copyarea ca;
50 struct fb_image ib;
51 } op;
52 void *img_data;
53};
54
40struct qxl_fbdev { 55struct qxl_fbdev {
41 struct drm_fb_helper helper; 56 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb; 57 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list; 58 struct list_head fbdev_list;
44 struct qxl_device *qdev; 59 struct qxl_device *qdev;
45 60
61 spinlock_t delayed_ops_lock;
62 struct list_head delayed_ops;
46 void *shadow; 63 void *shadow;
47 int size; 64 int size;
48 65
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
164 .deferred_io = qxl_deferred_io, 181 .deferred_io = qxl_deferred_io,
165}; 182};
166 183
167static void qxl_fb_fillrect(struct fb_info *info, 184static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
168 const struct fb_fillrect *fb_rect) 185 const struct fb_fillrect *fb_rect)
186{
187 struct qxl_fb_op *op;
188 unsigned long flags;
189
190 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
191 if (!op)
192 return;
193
194 op->op.fr = *fb_rect;
195 op->img_data = NULL;
196 op->op_type = QXL_FB_OP_FILLRECT;
197
198 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
199 list_add_tail(&op->head, &qfbdev->delayed_ops);
200 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
201}
202
203static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
204 const struct fb_copyarea *fb_copy)
205{
206 struct qxl_fb_op *op;
207 unsigned long flags;
208
209 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
210 if (!op)
211 return;
212
213 op->op.ca = *fb_copy;
214 op->img_data = NULL;
215 op->op_type = QXL_FB_OP_COPYAREA;
216
217 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
218 list_add_tail(&op->head, &qfbdev->delayed_ops);
219 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
220}
221
222static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
223 const struct fb_image *fb_image)
224{
225 struct qxl_fb_op *op;
226 unsigned long flags;
227 uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
228
229 op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
230 if (!op)
231 return;
232
233 op->op.ib = *fb_image;
234 op->img_data = (void *)(op + 1);
235 op->op_type = QXL_FB_OP_IMAGEBLIT;
236
237 memcpy(op->img_data, fb_image->data, size);
238
239 op->op.ib.data = op->img_data;
240 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
241 list_add_tail(&op->head, &qfbdev->delayed_ops);
242 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
243}
244
245static void qxl_fb_fillrect_internal(struct fb_info *info,
246 const struct fb_fillrect *fb_rect)
169{ 247{
170 struct qxl_fbdev *qfbdev = info->par; 248 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev; 249 struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
203 qxl_draw_fill_rec.rect = rect; 281 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color; 282 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop; 283 qxl_draw_fill_rec.rop = rop;
284
285 qxl_draw_fill(&qxl_draw_fill_rec);
286}
287
288static void qxl_fb_fillrect(struct fb_info *info,
289 const struct fb_fillrect *fb_rect)
290{
291 struct qxl_fbdev *qfbdev = info->par;
292 struct qxl_device *qdev = qfbdev->qdev;
293
206 if (!drm_can_sleep()) { 294 if (!drm_can_sleep()) {
207 qxl_io_log(qdev, 295 qxl_fb_delayed_fillrect(qfbdev, fb_rect);
208 "%s: TODO use RCU, mysterious locks with spin_lock\n", 296 schedule_work(&qdev->fb_work);
209 __func__);
210 return; 297 return;
211 } 298 }
212 qxl_draw_fill(&qxl_draw_fill_rec); 299 /* make sure any previous work is done */
300 flush_work(&qdev->fb_work);
301 qxl_fb_fillrect_internal(info, fb_rect);
213} 302}
214 303
215static void qxl_fb_copyarea(struct fb_info *info, 304static void qxl_fb_copyarea_internal(struct fb_info *info,
216 const struct fb_copyarea *region) 305 const struct fb_copyarea *region)
217{ 306{
218 struct qxl_fbdev *qfbdev = info->par; 307 struct qxl_fbdev *qfbdev = info->par;
219 308
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
223 region->dx, region->dy); 312 region->dx, region->dy);
224} 313}
225 314
315static void qxl_fb_copyarea(struct fb_info *info,
316 const struct fb_copyarea *region)
317{
318 struct qxl_fbdev *qfbdev = info->par;
319 struct qxl_device *qdev = qfbdev->qdev;
320
321 if (!drm_can_sleep()) {
322 qxl_fb_delayed_copyarea(qfbdev, region);
323 schedule_work(&qdev->fb_work);
324 return;
325 }
326 /* make sure any previous work is done */
327 flush_work(&qdev->fb_work);
328 qxl_fb_copyarea_internal(info, region);
329}
330
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) 331static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{ 332{
228 qxl_draw_opaque_fb(qxl_fb_image, 0); 333 qxl_draw_opaque_fb(qxl_fb_image, 0);
229} 334}
230 335
336static void qxl_fb_imageblit_internal(struct fb_info *info,
337 const struct fb_image *image)
338{
339 struct qxl_fbdev *qfbdev = info->par;
340 struct qxl_fb_image qxl_fb_image;
341
342 /* ensure proper order rendering operations - TODO: must do this
343 * for everything. */
344 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
345 qxl_fb_imageblit_safe(&qxl_fb_image);
346}
347
231static void qxl_fb_imageblit(struct fb_info *info, 348static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image) 349 const struct fb_image *image)
233{ 350{
234 struct qxl_fbdev *qfbdev = info->par; 351 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev; 352 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237 353
238 if (!drm_can_sleep()) { 354 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on 355 qxl_fb_delayed_imageblit(qfbdev, image);
240 * ioremap_wc..__get_vm_area_node, so queue the work item 356 schedule_work(&qdev->fb_work);
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return; 357 return;
247 } 358 }
359 /* make sure any previous work is done */
360 flush_work(&qdev->fb_work);
361 qxl_fb_imageblit_internal(info, image);
362}
248 363
249 /* ensure proper order of rendering operations - TODO: must do this 364static void qxl_fb_work(struct work_struct *work)
250 * for everything. */ 365{
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 366 struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
252 qxl_fb_imageblit_safe(&qxl_fb_image); 367 unsigned long flags;
368 struct qxl_fb_op *entry, *tmp;
369 struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
370
371 /* since the irq context just adds entries to the end of the
372 list dropping the lock should be fine, as entry isn't modified
373 in the operation code */
374 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
375 list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
376 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
377 switch (entry->op_type) {
378 case QXL_FB_OP_FILLRECT:
379 qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
380 break;
381 case QXL_FB_OP_COPYAREA:
382 qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
383 break;
384 case QXL_FB_OP_IMAGEBLIT:
385 qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
386 break;
387 }
388 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
389 list_del(&entry->head);
390 kfree(entry);
391 }
392 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
253} 393}
254 394
255int qxl_fb_init(struct qxl_device *qdev) 395int qxl_fb_init(struct qxl_device *qdev)
256{ 396{
397 INIT_WORK(&qdev->fb_work, qxl_fb_work);
257 return 0; 398 return 0;
258} 399}
259 400
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
536 qfbdev->qdev = qdev; 677 qfbdev->qdev = qdev;
537 qdev->mode_info.qfbdev = qfbdev; 678 qdev->mode_info.qfbdev = qfbdev;
538 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 679 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
539 680 spin_lock_init(&qfbdev->delayed_ops_lock);
681 INIT_LIST_HEAD(&qfbdev->delayed_ops);
540 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 682 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
541 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 683 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
542 QXLFB_CONN_LIMIT); 684 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
49 49
50 For some reason every so often qxl hw fails to release, things go wrong. 50 For some reason every so often qxl hw fails to release, things go wrong.
51*/ 51*/
52 52/* must be called with the fence lock held */
53 53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{ 54{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence); 55 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++; 56 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63} 57}
64 58
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) 59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..1648e4125af7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
55 /* At least align on page size */ 55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) 56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE; 57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); 58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
59 if (r) { 59 if (r) {
60 if (r != -ERESTARTSYS) 60 if (r != -ERESTARTSYS)
61 DRM_ERROR( 61 DRM_ERROR(
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
101 return 0; 101 return 0;
102} 102}
103 103
104int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
105 uint64_t *gpu_addr)
106{
107 struct qxl_bo *qobj = obj->driver_private;
108 int r;
109
110 r = qxl_bo_reserve(qobj, false);
111 if (unlikely(r != 0))
112 return r;
113 r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
114 qxl_bo_unreserve(qobj);
115 return r;
116}
117
118void qxl_gem_object_unpin(struct drm_gem_object *obj)
119{
120 struct qxl_bo *qobj = obj->driver_private;
121 int r;
122
123 r = qxl_bo_reserve(qobj, false);
124 if (likely(r == 0)) {
125 qxl_bo_unpin(qobj);
126 qxl_bo_unreserve(qobj);
127 }
128}
129
130int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 104int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
131{ 105{
132 return 0; 106 return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
30#include "qxl_object.h" 30#include "qxl_object.h"
31 31
32static int 32static int
33qxl_image_create_helper(struct qxl_device *qdev, 33qxl_allocate_chunk(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_drm_image *image,
36 unsigned int chunk_size)
37{
38 struct qxl_drm_chunk *chunk;
39 int ret;
40
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
42 if (!chunk)
43 return -ENOMEM;
44
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
46 if (ret) {
47 kfree(chunk);
48 return ret;
49 }
50
51 list_add_tail(&chunk->head, &image->chunk_list);
52 return 0;
53}
54
55int
56qxl_image_alloc_objects(struct qxl_device *qdev,
34 struct qxl_release *release, 57 struct qxl_release *release,
35 struct qxl_bo **image_bo, 58 struct qxl_drm_image **image_ptr,
36 const uint8_t *data, 59 int height, int stride)
37 int width, int height, 60{
38 int depth, unsigned int hash, 61 struct qxl_drm_image *image;
39 int stride) 62 int ret;
63
64 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
65 if (!image)
66 return -ENOMEM;
67
68 INIT_LIST_HEAD(&image->chunk_list);
69
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
71 if (ret) {
72 kfree(image);
73 return ret;
74 }
75
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
77 if (ret) {
78 qxl_bo_unref(&image->bo);
79 kfree(image);
80 return ret;
81 }
82 *image_ptr = image;
83 return 0;
84}
85
86void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
40{ 87{
88 struct qxl_drm_chunk *chunk, *tmp;
89
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
91 qxl_bo_unref(&chunk->bo);
92 kfree(chunk);
93 }
94
95 qxl_bo_unref(&dimage->bo);
96 kfree(dimage);
97}
98
99static int
100qxl_image_init_helper(struct qxl_device *qdev,
101 struct qxl_release *release,
102 struct qxl_drm_image *dimage,
103 const uint8_t *data,
104 int width, int height,
105 int depth, unsigned int hash,
106 int stride)
107{
108 struct qxl_drm_chunk *drv_chunk;
41 struct qxl_image *image; 109 struct qxl_image *image;
42 struct qxl_data_chunk *chunk; 110 struct qxl_data_chunk *chunk;
43 int i; 111 int i;
44 int chunk_stride; 112 int chunk_stride;
45 int linesize = width * depth / 8; 113 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo; 114 struct qxl_bo *chunk_bo, *image_bo;
47 int ret;
48 void *ptr; 115 void *ptr;
49 /* Chunk */ 116 /* Chunk */
50 /* FIXME: Check integer overflow */ 117 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */ 118 /* TODO: variable number of chunks */
119
120 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
121
122 chunk_bo = drv_chunk->bo;
52 chunk_stride = stride; /* TODO: should use linesize, but it renders 123 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly 124 wrong (check the bitmaps are sent correctly
54 first) */ 125 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, 126
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr; 128 chunk = ptr;
60 chunk->data_size = height * chunk_stride; 129 chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
102 while (remain > 0) { 171 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK; 172 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset); 173 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain); 174 size = min((int)(PAGE_SIZE - page_offset), remain);
107 175
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); 176 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
116 } 184 }
117 } 185 }
118 } 186 }
119
120
121 qxl_bo_kunmap(chunk_bo); 187 qxl_bo_kunmap(chunk_bo);
122 188
123 /* Image */ 189 image_bo = dimage->bo;
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); 190 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr; 191 image = ptr;
128 192
129 image->descriptor.id = 0; 193 image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
154 image->u.bitmap.stride = chunk_stride; 218 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0; 219 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 220 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160 221
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); 222 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
162 223
163 return 0; 224 return 0;
164} 225}
165 226
166int qxl_image_create(struct qxl_device *qdev, 227int qxl_image_init(struct qxl_device *qdev,
167 struct qxl_release *release, 228 struct qxl_release *release,
168 struct qxl_bo **image_bo, 229 struct qxl_drm_image *dimage,
169 const uint8_t *data, 230 const uint8_t *data,
170 int x, int y, int width, int height, 231 int x, int y, int width, int height,
171 int depth, int stride) 232 int depth, int stride)
172{ 233{
173 data += y * stride + x * (depth / 8); 234 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data, 235 return qxl_image_init_helper(qdev, release, dimage, data,
175 width, height, depth, 0, stride); 236 width, height, depth, 0, stride);
176} 237}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..7b95c75e9626 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
68 &qxl_map->offset); 68 &qxl_map->offset);
69} 69}
70 70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
71/* 79/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram). 81 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */ 83 */
76static void 84static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
78 struct qxl_bo *src, uint64_t src_off)
79{ 86{
80 void *reloc_page; 87 void *reloc_page;
81 88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 90 info->src_bo,
84 src, src_off); 91 info->src_offset);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
86} 93}
87 94
88static void 95static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
90 struct qxl_bo *src)
91{ 97{
92 uint32_t id = 0; 98 uint32_t id = 0;
93 void *reloc_page; 99 void *reloc_page;
94 100
95 if (src && !src->is_primary) 101 if (info->src_bo && !info->src_bo->is_primary)
96 id = src->surface_id; 102 id = info->src_bo->surface_id;
97 103
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; 105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
101} 107}
102 108
103/* return holding the reference to this object */ 109/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, 110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle, 111 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list) 112 struct qxl_release *release)
107{ 113{
108 struct drm_gem_object *gobj; 114 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj; 115 struct qxl_bo *qobj;
110 int ret; 116 int ret;
111 117
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); 118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) { 119 if (!gobj)
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL; 120 return NULL;
116 } 121
117 qobj = gem_to_qxl_bo(gobj); 122 qobj = gem_to_qxl_bo(gobj);
118 123
119 ret = qxl_bo_list_add(reloc_list, qobj); 124 ret = qxl_release_list_add(release, qobj);
120 if (ret) 125 if (ret)
121 return NULL; 126 return NULL;
122 127
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
129 * However, the command as passed from user space must *not* contain the initial 134 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes) 135 * QXLReleaseInfo struct (first XXX bytes)
131 */ 136 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, 137static int qxl_process_single_command(struct qxl_device *qdev,
133 struct drm_file *file_priv) 138 struct drm_qxl_command *cmd,
139 struct drm_file *file_priv)
134{ 140{
135 struct qxl_device *qdev = dev->dev_private; 141 struct qxl_reloc_info *reloc_info;
136 struct drm_qxl_execbuffer *execbuffer = data; 142 int release_type;
137 struct drm_qxl_command user_cmd; 143 struct qxl_release *release;
138 int cmd_num; 144 struct qxl_bo *cmd_bo;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd; 145 void *fb_cmd;
143 int i, ret; 146 int i, j, ret, num_relocs;
144 struct qxl_reloc_list reloc_list;
145 int unwritten; 147 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148 148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 149 switch (cmd->type) {
150 struct qxl_release *release; 150 case QXL_CMD_DRAW:
151 struct qxl_bo *cmd_bo; 151 release_type = QXL_RELEASE_DRAWABLE;
152 int release_type; 152 break;
153 struct drm_qxl_command *commands = 153 case QXL_CMD_SURFACE:
154 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; 154 case QXL_CMD_CURSOR:
155 default:
156 DRM_DEBUG("Only draw commands in execbuffers\n");
157 return -EINVAL;
158 break;
159 }
155 160
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 161 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
157 sizeof(user_cmd))) 162 return -EINVAL;
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170 163
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 164 if (!access_ok(VERIFY_READ,
172 return -EINVAL; 165 (void *)(unsigned long)cmd->command,
166 cmd->command_size))
167 return -EFAULT;
173 168
174 if (!access_ok(VERIFY_READ, 169 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
175 (void *)(unsigned long)user_cmd.command, 170 if (!reloc_info)
176 user_cmd.command_size)) 171 return -ENOMEM;
177 return -EFAULT;
178 172
179 ret = qxl_alloc_release_reserved(qdev, 173 ret = qxl_alloc_release_reserved(qdev,
180 sizeof(union qxl_release_info) + 174 sizeof(union qxl_release_info) +
181 user_cmd.command_size, 175 cmd->command_size,
182 release_type, 176 release_type,
183 &release, 177 &release,
184 &cmd_bo); 178 &cmd_bo);
185 if (ret) 179 if (ret)
186 return ret; 180 goto out_free_reloc;
187 181
188 /* TODO copy slow path code from i915 */ 182 /* TODO copy slow path code from i915 */
189 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 183 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
190 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 184 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
191 185
192 { 186 {
193 struct qxl_drawable *draw = fb_cmd; 187 struct qxl_drawable *draw = fb_cmd;
188 draw->mm_time = qdev->rom->mm_clock;
189 }
194 190
195 draw->mm_time = qdev->rom->mm_clock; 191 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196 } 192 if (unwritten) {
197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 193 DRM_ERROR("got unwritten %d\n", unwritten);
198 if (unwritten) { 194 ret = -EFAULT;
199 DRM_ERROR("got unwritten %d\n", unwritten); 195 goto out_free_release;
200 qxl_release_unreserve(qdev, release); 196 }
201 qxl_release_free(qdev, release); 197
202 return -EFAULT; 198 /* fill out reloc info structs */
199 num_relocs = 0;
200 for (i = 0; i < cmd->relocs_num; ++i) {
201 struct drm_qxl_reloc reloc;
202
203 if (DRM_COPY_FROM_USER(&reloc,
204 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
205 sizeof(reloc))) {
206 ret = -EFAULT;
207 goto out_free_bos;
203 } 208 }
204 209
205 for (i = 0 ; i < user_cmd.relocs_num; ++i) { 210 /* add the bos to the list of bos to validate -
206 if (DRM_COPY_FROM_USER(&reloc, 211 need to validate first then process relocs? */
207 &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], 212 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
208 sizeof(reloc))) { 213 DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
209 qxl_bo_list_unreserve(&reloc_list, true);
210 qxl_release_unreserve(qdev, release);
211 qxl_release_free(qdev, release);
212 return -EFAULT;
213 }
214 214
215 /* add the bos to the list of bos to validate - 215 ret = -EINVAL;
216 need to validate first then process relocs? */ 216 goto out_free_bos;
217 if (reloc.dst_handle) { 217 }
218 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 218 reloc_info[i].type = reloc.reloc_type;
219 reloc.dst_handle, &reloc_list); 219
220 if (!reloc_dst_bo) { 220 if (reloc.dst_handle) {
221 qxl_bo_list_unreserve(&reloc_list, true); 221 reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
222 qxl_release_unreserve(qdev, release); 222 reloc.dst_handle, release);
223 qxl_release_free(qdev, release); 223 if (!reloc_info[i].dst_bo) {
224 return -EINVAL; 224 ret = -EINVAL;
225 } 225 reloc_info[i].src_bo = NULL;
226 reloc_dst_offset = 0; 226 goto out_free_bos;
227 } else {
228 reloc_dst_bo = cmd_bo;
229 reloc_dst_offset = release->release_offset;
230 } 227 }
231 228 reloc_info[i].dst_offset = reloc.dst_offset;
232 /* reserve and validate the reloc dst bo */ 229 } else {
233 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 230 reloc_info[i].dst_bo = cmd_bo;
234 reloc_src_bo = 231 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235 qxlhw_handle_to_bo(qdev, file_priv, 232 }
236 reloc.src_handle, &reloc_list); 233 num_relocs++;
237 if (!reloc_src_bo) { 234
238 if (reloc_dst_bo != cmd_bo) 235 /* reserve and validate the reloc dst bo */
239 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
240 qxl_bo_list_unreserve(&reloc_list, true); 237 reloc_info[i].src_bo =
241 qxl_release_unreserve(qdev, release); 238 qxlhw_handle_to_bo(qdev, file_priv,
242 qxl_release_free(qdev, release); 239 reloc.src_handle, release);
243 return -EINVAL; 240 if (!reloc_info[i].src_bo) {
244 } 241 if (reloc_info[i].dst_bo != cmd_bo)
245 } else 242 drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
246 reloc_src_bo = NULL; 243 ret = -EINVAL;
247 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { 244 goto out_free_bos;
248 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
249 reloc_src_bo, reloc.src_offset);
250 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
251 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
252 } else {
253 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
254 return -EINVAL;
255 } 245 }
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
256 252
257 if (reloc_src_bo && reloc_src_bo != cmd_bo) { 253 /* validate all buffers */
258 qxl_release_add_res(qdev, release, reloc_src_bo); 254 ret = qxl_release_reserve_list(release, false);
259 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); 255 if (ret)
260 } 256 goto out_free_bos;
261 257
262 if (reloc_dst_bo != cmd_bo) 258 for (i = 0; i < cmd->relocs_num; ++i) {
263 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
264 } 260 apply_reloc(qdev, &reloc_info[i]);
265 qxl_fence_releaseable(qdev, release); 261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
266 264
267 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); 265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
268 if (ret == -ERESTARTSYS) { 266 if (ret)
269 qxl_release_unreserve(qdev, release); 267 qxl_release_backoff_reserve_list(release);
270 qxl_release_free(qdev, release); 268 else
271 qxl_bo_list_unreserve(&reloc_list, true); 269 qxl_release_fence_buffer_objects(release);
270
271out_free_bos:
272 for (j = 0; j < num_relocs; j++) {
273 if (reloc_info[j].dst_bo != cmd_bo)
274 drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 }
278out_free_release:
279 if (ret)
280 qxl_release_free(qdev, release);
281out_free_reloc:
282 kfree(reloc_info);
283 return ret;
284}
285
286static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
288{
289 struct qxl_device *qdev = dev->dev_private;
290 struct drm_qxl_execbuffer *execbuffer = data;
291 struct drm_qxl_command user_cmd;
292 int cmd_num;
293 int ret;
294
295 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
296
297 struct drm_qxl_command *commands =
298 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
299
300 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
301 sizeof(user_cmd)))
302 return -EFAULT;
303
304 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
305 if (ret)
272 return ret; 306 return ret;
273 }
274 qxl_release_unreserve(qdev, release);
275 } 307 }
276 qxl_bo_list_unreserve(&reloc_list, 0);
277 return 0; 308 return 0;
278} 309}
279 310
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 goto out; 336 goto out;
306 337
307 if (!qobj->pin_count) { 338 if (!qobj->pin_count) {
308 qxl_ttm_placement_from_domain(qobj, qobj->type); 339 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
309 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 340 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
310 true, false); 341 true, false);
311 if (unlikely(ret)) 342 if (unlikely(ret))
@@ -402,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
402 return ret; 433 return ret;
403} 434}
404 435
405struct drm_ioctl_desc qxl_ioctls[] = { 436const struct drm_ioctl_desc qxl_ioctls[] = {
406 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), 437 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
407 438
408 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), 439 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..8691c76c5ef0 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
51 return false; 51 return false;
52} 52}
53 53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 55{
56 u32 c = 0; 56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 58
58 qbo->placement.fpfn = 0; 59 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0; 60 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements; 61 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements; 62 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM) 63 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
64 if (domain == QXL_GEM_DOMAIN_SURFACE) 65 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
66 if (domain == QXL_GEM_DOMAIN_CPU) 67 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
68 if (!c) 69 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c; 71 qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
73 74
74 75
75int qxl_bo_create(struct qxl_device *qdev, 76int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain, 77 unsigned long size, bool kernel, bool pinned, u32 domain,
77 struct qxl_surface *surf, 78 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr) 79 struct qxl_bo **bo_ptr)
79{ 80{
@@ -97,17 +98,16 @@ int qxl_bo_create(struct qxl_device *qdev,
97 kfree(bo); 98 kfree(bo);
98 return r; 99 return r;
99 } 100 }
100 bo->gem_base.driver_private = NULL;
101 bo->type = domain; 101 bo->type = domain;
102 bo->pin_count = 0; 102 bo->pin_count = pinned ? 1 : 0;
103 bo->surface_id = 0; 103 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence); 104 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list); 105 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0); 106
107 if (surf) 107 if (surf)
108 bo->surf = *surf; 108 bo->surf = *surf;
109 109
110 qxl_ttm_placement_from_domain(bo, domain); 110 qxl_ttm_placement_from_domain(bo, domain, pinned);
111 111
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size, 113 &bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +228,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{ 229{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i; 231 int r;
232 232
233 if (bo->pin_count) { 233 if (bo->pin_count) {
234 bo->pin_count++; 234 bo->pin_count++;
@@ -236,9 +236,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo); 236 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0; 237 return 0;
238 } 238 }
239 qxl_ttm_placement_from_domain(bo, domain); 239 qxl_ttm_placement_from_domain(bo, domain, true);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 240 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) { 241 if (likely(r == 0)) {
244 bo->pin_count = 1; 242 bo->pin_count = 1;
@@ -317,53 +315,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
317 return 0; 315 return 0;
318} 316}
319 317
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
366
367int qxl_surf_evict(struct qxl_device *qdev) 318int qxl_surf_evict(struct qxl_device *qdev)
368{ 319{
369 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); 320 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..d458a140c024 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
59 59
60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
61{ 61{
62 return bo->tbo.addr_space_offset; 62 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
63} 63}
64 64
65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, 65static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
88 88
89extern int qxl_bo_create(struct qxl_device *qdev, 89extern int qxl_bo_create(struct qxl_device *qdev,
90 unsigned long size, 90 unsigned long size,
91 bool kernel, u32 domain, 91 bool kernel, bool pinned, u32 domain,
92 struct qxl_surface *surf, 92 struct qxl_surface *surf,
93 struct qxl_bo **bo_ptr); 93 struct qxl_bo **bo_ptr);
94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99extern void qxl_bo_unref(struct qxl_bo **bo); 99extern void qxl_bo_unref(struct qxl_bo **bo);
100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101extern int qxl_bo_unpin(struct qxl_bo *bo); 101extern int qxl_bo_unpin(struct qxl_bo *bo);
102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); 102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
104 104
105extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
106extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
107#endif 105#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..0109a9644cb2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
38 38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t 41
42static uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type, 43qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret) 44 struct qxl_release **ret)
44{ 45{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
53 return 0; 54 return 0;
54 } 55 }
55 release->type = type; 56 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0; 57 release->release_offset = 0;
58 release->surface_release_id = 0; 58 release->surface_release_id = 0;
59 INIT_LIST_HEAD(&release->bos);
59 60
60 idr_preload(GFP_KERNEL); 61 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock); 62 spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
77qxl_release_free(struct qxl_device *qdev, 78qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release) 79 struct qxl_release *release)
79{ 80{
80 int i; 81 struct qxl_bo_list *entry, *tmp;
81 82 QXL_INFO(qdev, "release %d, type %d\n", release->id,
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 83 release->type);
83 release->type, release->bo_count);
84 84
85 if (release->surface_release_id) 85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 87
88 for (i = 0 ; i < release->bo_count; ++i) { 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
89 QXL_INFO(qdev, "release %llx\n", 90 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset 91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
91 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
93 qxl_bo_unref(&release->bos[i]); 94 qxl_bo_unref(&bo);
94 } 95 }
95 spin_lock(&qdev->release_idr_lock); 96 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id); 97 idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
98 kfree(release); 99 kfree(release);
99} 100}
100 101
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev, 102static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo) 103 struct qxl_bo **bo)
119{ 104{
120 int ret; 105 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 106 /* pin releases bo's they are too messy to evict */
107 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
108 QXL_GEM_DOMAIN_VRAM, NULL,
122 bo); 109 bo);
123 return ret; 110 return ret;
124} 111}
125 112
126int qxl_release_reserve(struct qxl_device *qdev, 113int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
127 struct qxl_release *release, bool no_wait) 114{
115 struct qxl_bo_list *entry;
116
117 list_for_each_entry(entry, &release->bos, tv.head) {
118 if (entry->tv.bo == &bo->tbo)
119 return 0;
120 }
121
122 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
123 if (!entry)
124 return -ENOMEM;
125
126 qxl_bo_ref(bo);
127 entry->tv.bo = &bo->tbo;
128 list_add_tail(&entry->tv.head, &release->bos);
129 return 0;
130}
131
132static int qxl_release_validate_bo(struct qxl_bo *bo)
128{ 133{
129 int ret; 134 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 135
131 ret = qxl_bo_reserve(release->bos[0], no_wait); 136 if (!bo->pin_count) {
137 qxl_ttm_placement_from_domain(bo, bo->type, false);
138 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
139 true, false);
132 if (ret) 140 if (ret)
133 return ret; 141 return ret;
134 } 142 }
143
144 /* allocate a surface for reserved + validated buffers */
145 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
146 if (ret)
147 return ret;
148 return 0;
149}
150
151int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
152{
153 int ret;
154 struct qxl_bo_list *entry;
155
156 /* if only one object on the release its the release itself
157 since these objects are pinned no need to reserve */
158 if (list_is_singular(&release->bos))
159 return 0;
160
161 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
162 if (ret)
163 return ret;
164
165 list_for_each_entry(entry, &release->bos, tv.head) {
166 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
167
168 ret = qxl_release_validate_bo(bo);
169 if (ret) {
170 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
171 return ret;
172 }
173 }
135 return 0; 174 return 0;
136} 175}
137 176
138void qxl_release_unreserve(struct qxl_device *qdev, 177void qxl_release_backoff_reserve_list(struct qxl_release *release)
139 struct qxl_release *release)
140{ 178{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 179 /* if only one object on the release its the release itself
142 qxl_bo_unreserve(release->bos[0]); 180 since these objects are pinned no need to reserve */
181 if (list_is_singular(&release->bos))
182 return;
183
184 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
143} 185}
144 186
187
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 188int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type, 189 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel, 190 struct qxl_release *create_rel,
148 struct qxl_release **release) 191 struct qxl_release **release)
149{ 192{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 193 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret; 194 int idr_ret;
195 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
154 struct qxl_bo *bo; 196 struct qxl_bo *bo;
155 union qxl_release_info *info; 197 union qxl_release_info *info;
156 198
157 /* stash the release after the create command */ 199 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 200 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]); 201 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
160 202
161 (*release)->release_offset = create_rel->release_offset + 64; 203 (*release)->release_offset = create_rel->release_offset + 64;
162 204
163 qxl_release_add_res(qdev, *release, bo); 205 qxl_release_list_add(*release, bo);
164 206
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release); 207 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret; 208 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info); 209 qxl_release_unmap(qdev, *release, info);
173 210
174
175out_unref:
176 qxl_bo_unref(&bo); 211 qxl_bo_unref(&bo);
177 return ret; 212 return 0;
178 } 213 }
179 214
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 215 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
187{ 222{
188 struct qxl_bo *bo; 223 struct qxl_bo *bo;
189 int idr_ret; 224 int idr_ret;
190 int ret; 225 int ret = 0;
191 union qxl_release_info *info; 226 union qxl_release_info *info;
192 int cur_idx; 227 int cur_idx;
193 228
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
216 mutex_unlock(&qdev->release_mutex); 251 mutex_unlock(&qdev->release_mutex);
217 return ret; 252 return ret;
218 } 253 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 } 254 }
225 255
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 256 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
231 if (rbo) 261 if (rbo)
232 *rbo = bo; 262 *rbo = bo;
233 263
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex); 264 mutex_unlock(&qdev->release_mutex);
238 if (ret) 265
239 goto out_unref; 266 qxl_release_list_add(*release, bo);
240 267
241 info = qxl_release_map(qdev, *release); 268 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret; 269 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info); 270 qxl_release_unmap(qdev, *release, info);
244 271
245out_unref:
246 qxl_bo_unref(&bo); 272 qxl_bo_unref(&bo);
247 return ret; 273 return ret;
248} 274}
249 275
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 276struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id) 277 uint64_t id)
266{ 278{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
273 DRM_ERROR("failed to find id in release_idr\n"); 285 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL; 286 return NULL;
275 } 287 }
276 if (release->bo_count < 1) { 288
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release; 289 return release;
281} 290}
282 291
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
285{ 294{
286 void *ptr; 295 void *ptr;
287 union qxl_release_info *info; 296 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0]; 297 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
298 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
289 299
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 300 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
301 if (!ptr)
302 return NULL;
291 info = ptr + (release->release_offset & ~PAGE_SIZE); 303 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info; 304 return info;
293} 305}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release, 308 struct qxl_release *release,
297 union qxl_release_info *info) 309 union qxl_release_info *info)
298{ 310{
299 struct qxl_bo *bo = release->bos[0]; 311 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
312 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
300 void *ptr; 313 void *ptr;
301 314
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 315 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 316 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304} 317}
318
319void qxl_release_fence_buffer_objects(struct qxl_release *release)
320{
321 struct ttm_validate_buffer *entry;
322 struct ttm_buffer_object *bo;
323 struct ttm_bo_global *glob;
324 struct ttm_bo_device *bdev;
325 struct ttm_bo_driver *driver;
326 struct qxl_bo *qbo;
327
328 /* if only one object on the release its the release itself
329 since these objects are pinned no need to reserve */
330 if (list_is_singular(&release->bos))
331 return;
332
333 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
334 bdev = bo->bdev;
335 driver = bdev->driver;
336 glob = bo->glob;
337
338 spin_lock(&glob->lru_lock);
339 spin_lock(&bdev->fence_lock);
340
341 list_for_each_entry(entry, &release->bos, head) {
342 bo = entry->bo;
343 qbo = to_qxl_bo(bo);
344
345 if (!entry->bo->sync_obj)
346 entry->bo->sync_obj = &qbo->fence;
347
348 qxl_fence_add_release_locked(&qbo->fence, release->id);
349
350 ttm_bo_add_to_lru(bo);
351 ww_mutex_unlock(&bo->resv->lock);
352 entry->reserved = false;
353 }
354 spin_unlock(&bdev->fence_lock);
355 spin_unlock(&glob->lru_lock);
356 ww_acquire_fini(&release->ticket);
357}
358
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..037786d7c1dc 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,13 +206,15 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
206 return; 206 return;
207 } 207 }
208 qbo = container_of(bo, struct qxl_bo, tbo); 208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); 209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
210 *placement = qbo->placement; 210 *placement = qbo->placement;
211} 211}
212 212
213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) 213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
214{ 214{
215 return 0; 215 struct qxl_bo *qbo = to_qxl_bo(bo);
216
217 return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
216} 218}
217 219
218static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 220static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a5..c451257f08fb 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle 540 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
541 + init->ring_size / sizeof(u32)); 541 + init->ring_size / sizeof(u32));
542 dev_priv->ring.size = init->ring_size; 542 dev_priv->ring.size = init->ring_size;
543 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 543 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
544 544
545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 545 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
546 546
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 472c38fe123f..5bd307cd8da1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = r128_compat_ioctl, 52 .compat_ioctl = r128_compat_ioctl,
54#endif 53#endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = 58 .driver_features =
60 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 59 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
61 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 60 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
62 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 61 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
63 .load = r128_driver_load, 62 .load = r128_driver_load,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5e..56eb5e3f5439 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
131 drm_r128_freelist_t *list_entry; 131 drm_r128_freelist_t *list_entry;
132} drm_r128_buf_priv_t; 132} drm_r128_buf_priv_t;
133 133
134extern struct drm_ioctl_desc r128_ioctls[]; 134extern const struct drm_ioctl_desc r128_ioctls[];
135extern int r128_max_ioctl; 135extern int r128_max_ioctl;
136 136
137 /* r128_cce.c */ 137 /* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9a..01dd9aef9f0e 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
1643 r128_do_cleanup_cce(dev); 1643 r128_do_cleanup_cce(dev);
1644} 1644}
1645 1645
1646struct drm_ioctl_desc r128_ioctls[] = { 1646const struct drm_ioctl_desc r128_ioctls[] = {
1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1647 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1648 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1649 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed480..3569d89b9e41 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
44}; 44};
45 45
46/***** radeon AUX functions *****/ 46/***** radeon AUX functions *****/
47
48/* Atom needs data in little endian format
49 * so swap as appropriate when copying data to
50 * or from atom. Note that atom operates on
51 * dw units.
52 */
53static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
54{
55#ifdef __BIG_ENDIAN
56 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
57 u32 *dst32, *src32;
58 int i;
59
60 memcpy(src_tmp, src, num_bytes);
61 src32 = (u32 *)src_tmp;
62 dst32 = (u32 *)dst_tmp;
63 if (to_le) {
64 for (i = 0; i < ((num_bytes + 3) / 4); i++)
65 dst32[i] = cpu_to_le32(src32[i]);
66 memcpy(dst, dst_tmp, num_bytes);
67 } else {
68 u8 dws = num_bytes & ~3;
69 for (i = 0; i < ((num_bytes + 3) / 4); i++)
70 dst32[i] = le32_to_cpu(src32[i]);
71 memcpy(dst, dst_tmp, dws);
72 if (num_bytes % 4) {
73 for (i = 0; i < (num_bytes % 4); i++)
74 dst[dws+i] = dst_tmp[dws+i];
75 }
76 }
77#else
78 memcpy(dst, src, num_bytes);
79#endif
80}
81
47union aux_channel_transaction { 82union aux_channel_transaction {
48 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 83 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
49 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 84 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
65 100
66 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
67 102
68 memcpy(base, send, send_bytes); 103 radeon_copy_swap(base, send, send_bytes, true);
69 104
70 args.v1.lpAuxRequest = 0 + 4; 105 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
71 args.v1.lpDataOut = 16 + 4; 106 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
72 args.v1.ucDataOutLen = 0; 107 args.v1.ucDataOutLen = 0;
73 args.v1.ucChannelID = chan->rec.i2c_id; 108 args.v1.ucChannelID = chan->rec.i2c_id;
74 args.v1.ucDelay = delay / 10; 109 args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
102 recv_bytes = recv_size; 137 recv_bytes = recv_size;
103 138
104 if (recv && recv_size) 139 if (recv && recv_size)
105 memcpy(recv, base + 16, recv_bytes); 140 radeon_copy_swap(recv, base + 16, recv_bytes, false);
106 141
107 return recv_bytes; 142 return recv_bytes;
108} 143}
@@ -550,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
550 return false; 585 return false;
551 } 586 }
552 587
553 DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); 588 DRM_DEBUG_KMS("link status %6ph\n", link_status);
554 return true; 589 return true;
555} 590}
556 591
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dacec4e2090..6adbc998349e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
2535 /* ring 0 - compute and gfx */ 2535 /* ring 0 - compute and gfx */
2536 /* Set ring buffer size */ 2536 /* Set ring buffer size */
2537 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2537 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2538 rb_bufsz = drm_order(ring->ring_size / 8); 2538 rb_bufsz = order_base_2(ring->ring_size / 8);
2539 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2539 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2540#ifdef __BIG_ENDIAN 2540#ifdef __BIG_ENDIAN
2541 tmp |= BUF_SWAP_32BIT; 2541 tmp |= BUF_SWAP_32BIT;
2542#endif 2542#endif
@@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
2915 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2915 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2916 tmp = RREG32(CP_HPD_EOP_CONTROL); 2916 tmp = RREG32(CP_HPD_EOP_CONTROL);
2917 tmp &= ~EOP_SIZE_MASK; 2917 tmp &= ~EOP_SIZE_MASK;
2918 tmp |= drm_order(MEC_HPD_SIZE / 8); 2918 tmp |= order_base_2(MEC_HPD_SIZE / 8);
2919 WREG32(CP_HPD_EOP_CONTROL, tmp); 2919 WREG32(CP_HPD_EOP_CONTROL, tmp);
2920 } 2920 }
2921 cik_srbm_select(rdev, 0, 0, 0, 0); 2921 cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
3030 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); 3030 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
3031 3031
3032 mqd->queue_state.cp_hqd_pq_control |= 3032 mqd->queue_state.cp_hqd_pq_control |=
3033 drm_order(rdev->ring[idx].ring_size / 8); 3033 order_base_2(rdev->ring[idx].ring_size / 8);
3034 mqd->queue_state.cp_hqd_pq_control |= 3034 mqd->queue_state.cp_hqd_pq_control |=
3035 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); 3035 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
3036#ifdef __BIG_ENDIAN 3036#ifdef __BIG_ENDIAN
3037 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; 3037 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
3038#endif 3038#endif
@@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
3375 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 3375 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
3376 3376
3377 /* Set ring buffer size in dwords */ 3377 /* Set ring buffer size in dwords */
3378 rb_bufsz = drm_order(ring->ring_size / 4); 3378 rb_bufsz = order_base_2(ring->ring_size / 4);
3379 rb_cntl = rb_bufsz << 1; 3379 rb_cntl = rb_bufsz << 1;
3380#ifdef __BIG_ENDIAN 3380#ifdef __BIG_ENDIAN
3381 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; 3381 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev)
5030 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5030 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5031 5031
5032 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5032 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5033 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5033 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5034 5034
5035 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5035 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5036 IH_WPTR_OVERFLOW_CLEAR | 5036 IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 038dcac7670c..b67c9ec7f690 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
2881 RREG32(GRBM_SOFT_RESET); 2881 RREG32(GRBM_SOFT_RESET);
2882 2882
2883 /* Set ring buffer size */ 2883 /* Set ring buffer size */
2884 rb_bufsz = drm_order(ring->ring_size / 8); 2884 rb_bufsz = order_base_2(ring->ring_size / 8);
2885 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2885 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2886#ifdef __BIG_ENDIAN 2886#ifdef __BIG_ENDIAN
2887 tmp |= BUF_SWAP_32BIT; 2887 tmp |= BUF_SWAP_32BIT;
2888#endif 2888#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 56bd4f3be4fe..5b6e47765656 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1560 1560
1561 /* Set ring buffer size */ 1561 /* Set ring buffer size */
1562 ring = &rdev->ring[ridx[i]]; 1562 ring = &rdev->ring[ridx[i]];
1563 rb_cntl = drm_order(ring->ring_size / 8); 1563 rb_cntl = order_base_2(ring->ring_size / 8);
1564 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1564 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1565#ifdef __BIG_ENDIAN 1565#ifdef __BIG_ENDIAN
1566 rb_cntl |= BUF_SWAP_32BIT; 1566 rb_cntl |= BUF_SWAP_32BIT;
1567#endif 1567#endif
@@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
1720 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 1720 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1721 1721
1722 /* Set ring buffer size in dwords */ 1722 /* Set ring buffer size in dwords */
1723 rb_bufsz = drm_order(ring->ring_size / 4); 1723 rb_bufsz = order_base_2(ring->ring_size / 4);
1724 rb_cntl = rb_bufsz << 1; 1724 rb_cntl = rb_bufsz << 1;
1725#ifdef __BIG_ENDIAN 1725#ifdef __BIG_ENDIAN
1726 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 1726 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84b..5625cf706f0c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1097 } 1097 }
1098 1098
1099 /* Align ring size */ 1099 /* Align ring size */
1100 rb_bufsz = drm_order(ring_size / 8); 1100 rb_bufsz = order_base_2(ring_size / 8);
1101 ring_size = (1 << (rb_bufsz + 1)) * 4; 1101 ring_size = (1 << (rb_bufsz + 1)) * 4;
1102 r100_cp_load_microcode(rdev); 1102 r100_cp_load_microcode(rdev);
1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, 1103 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 393880a09412..cfc1d28ade39 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2413 WREG32(GRBM_SOFT_RESET, 0); 2413 WREG32(GRBM_SOFT_RESET, 0);
2414 2414
2415 /* Set ring buffer size */ 2415 /* Set ring buffer size */
2416 rb_bufsz = drm_order(ring->ring_size / 8); 2416 rb_bufsz = order_base_2(ring->ring_size / 8);
2417 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2417 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2418#ifdef __BIG_ENDIAN 2418#ifdef __BIG_ENDIAN
2419 tmp |= BUF_SWAP_32BIT; 2419 tmp |= BUF_SWAP_32BIT;
2420#endif 2420#endif
@@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
2467 int r; 2467 int r;
2468 2468
2469 /* Align ring size */ 2469 /* Align ring size */
2470 rb_bufsz = drm_order(ring_size / 8); 2470 rb_bufsz = order_base_2(ring_size / 8);
2471 ring_size = (1 << (rb_bufsz + 1)) * 4; 2471 ring_size = (1 << (rb_bufsz + 1)) * 4;
2472 ring->ring_size = ring_size; 2472 ring->ring_size = ring_size;
2473 ring->align_mask = 16 - 1; 2473 ring->align_mask = 16 - 1;
@@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev)
2547 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 2547 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2548 2548
2549 /* Set ring buffer size in dwords */ 2549 /* Set ring buffer size in dwords */
2550 rb_bufsz = drm_order(ring->ring_size / 4); 2550 rb_bufsz = order_base_2(ring->ring_size / 4);
2551 rb_cntl = rb_bufsz << 1; 2551 rb_cntl = rb_bufsz << 1;
2552#ifdef __BIG_ENDIAN 2552#ifdef __BIG_ENDIAN
2553 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 2553 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
2656 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); 2656 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2657 2657
2658 /* Set ring buffer size */ 2658 /* Set ring buffer size */
2659 rb_bufsz = drm_order(ring->ring_size); 2659 rb_bufsz = order_base_2(ring->ring_size);
2660 rb_bufsz = (0x1 << 8) | rb_bufsz; 2660 rb_bufsz = (0x1 << 8) | rb_bufsz;
2661 WREG32(UVD_RBC_RB_CNTL, rb_bufsz); 2661 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2662 2662
@@ -3166,7 +3166,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3166 3166
3167 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 3167 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3168 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 3168 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3169 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); 3169 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3170 if (r) { 3170 if (r) {
3171 DRM_ERROR("radeon: moving bo (%d).\n", r); 3171 DRM_ERROR("radeon: moving bo (%d).\n", r);
3172 radeon_semaphore_free(rdev, &sem, NULL); 3172 radeon_semaphore_free(rdev, &sem, NULL);
@@ -3181,6 +3181,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
3181 radeon_semaphore_free(rdev, &sem, NULL); 3181 radeon_semaphore_free(rdev, &sem, NULL);
3182 } 3182 }
3183 3183
3184 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3185 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3186 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3184 for (i = 0; i < num_loops; i++) { 3187 for (i = 0; i < num_loops; i++) {
3185 cur_size_in_bytes = size_in_bytes; 3188 cur_size_in_bytes = size_in_bytes;
3186 if (cur_size_in_bytes > 0x1fffff) 3189 if (cur_size_in_bytes > 0x1fffff)
@@ -3812,7 +3815,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3812 u32 rb_bufsz; 3815 u32 rb_bufsz;
3813 3816
3814 /* Align ring size */ 3817 /* Align ring size */
3815 rb_bufsz = drm_order(ring_size / 4); 3818 rb_bufsz = order_base_2(ring_size / 4);
3816 ring_size = (1 << rb_bufsz) * 4; 3819 ring_size = (1 << rb_bufsz) * 4;
3817 rdev->ih.ring_size = ring_size; 3820 rdev->ih.ring_size = ring_size;
3818 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3821 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4049,7 +4052,7 @@ int r600_irq_init(struct radeon_device *rdev)
4049 WREG32(INTERRUPT_CNTL, interrupt_cntl); 4052 WREG32(INTERRUPT_CNTL, interrupt_cntl);
4050 4053
4051 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 4054 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
4052 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 4055 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
4053 4056
4054 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 4057 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
4055 IH_WPTR_OVERFLOW_CLEAR | 4058 IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fde..d8eb48bff0ed 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
2201 + init->ring_size / sizeof(u32)); 2201 + init->ring_size / sizeof(u32));
2202 dev_priv->ring.size = init->ring_size; 2202 dev_priv->ring.size = init->ring_size;
2203 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 2203 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
2204 2204
2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
2206 dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); 2206 dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
2207 2207
2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
2209 dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); 2209 dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
2210 2210
2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
2212 2212
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b134ab..e5c860f4ccbe 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
278void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 278void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
279{ 279{
280 if (enable) 280 if (enable)
281 WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); 281 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
282 else 282 else
283 WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 283 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
284} 284}
285 285
286void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 286void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f08219c39b6..19066d1dcb7d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -492,9 +492,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
492int radeon_mode_dumb_mmap(struct drm_file *filp, 492int radeon_mode_dumb_mmap(struct drm_file *filp,
493 struct drm_device *dev, 493 struct drm_device *dev,
494 uint32_t handle, uint64_t *offset_p); 494 uint32_t handle, uint64_t *offset_p);
495int radeon_mode_dumb_destroy(struct drm_file *file_priv,
496 struct drm_device *dev,
497 uint32_t handle);
498 495
499/* 496/*
500 * Semaphores. 497 * Semaphores.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 78bec1a58ed1..f8f8b3113ddd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = {
1161 .get_mclk = &rv6xx_dpm_get_mclk, 1161 .get_mclk = &rv6xx_dpm_get_mclk,
1162 .print_power_state = &rv6xx_dpm_print_power_state, 1162 .print_power_state = &rv6xx_dpm_print_power_state,
1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, 1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
1164 .force_performance_level = &rv6xx_dpm_force_performance_level,
1164 }, 1165 },
1165 .pflip = { 1166 .pflip = {
1166 .pre_page_flip = &rs600_pre_page_flip, 1167 .pre_page_flip = &rs600_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ca1895709908..902479fa737f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
421 struct radeon_ps *ps); 421 struct radeon_ps *ps);
422void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 422void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
423 struct seq_file *m); 423 struct seq_file *m);
424int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
425 enum radeon_dpm_forced_level level);
424/* rs780 dpm */ 426/* rs780 dpm */
425int rs780_dpm_init(struct radeon_device *rdev); 427int rs780_dpm_init(struct radeon_device *rdev);
426int rs780_dpm_enable(struct radeon_device *rdev); 428int rs780_dpm_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86b..68ce36056019 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
147 enum radeon_combios_table_offset table) 147 enum radeon_combios_table_offset table)
148{ 148{
149 struct radeon_device *rdev = dev->dev_private; 149 struct radeon_device *rdev = dev->dev_private;
150 int rev; 150 int rev, size;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios) 153 if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
156 switch (table) { 156 switch (table) {
157 /* absolute offset tables */ 157 /* absolute offset tables */
158 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
159 check_offset = RBIOS16(rdev->bios_header_start + 0xc); 159 check_offset = 0xc;
160 if (check_offset)
161 offset = check_offset;
162 break; 160 break;
163 case COMBIOS_BIOS_SUPPORT_TABLE: 161 case COMBIOS_BIOS_SUPPORT_TABLE:
164 check_offset = RBIOS16(rdev->bios_header_start + 0x14); 162 check_offset = 0x14;
165 if (check_offset)
166 offset = check_offset;
167 break; 163 break;
168 case COMBIOS_DAC_PROGRAMMING_TABLE: 164 case COMBIOS_DAC_PROGRAMMING_TABLE:
169 check_offset = RBIOS16(rdev->bios_header_start + 0x2a); 165 check_offset = 0x2a;
170 if (check_offset)
171 offset = check_offset;
172 break; 166 break;
173 case COMBIOS_MAX_COLOR_DEPTH_TABLE: 167 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
174 check_offset = RBIOS16(rdev->bios_header_start + 0x2c); 168 check_offset = 0x2c;
175 if (check_offset)
176 offset = check_offset;
177 break; 169 break;
178 case COMBIOS_CRTC_INFO_TABLE: 170 case COMBIOS_CRTC_INFO_TABLE:
179 check_offset = RBIOS16(rdev->bios_header_start + 0x2e); 171 check_offset = 0x2e;
180 if (check_offset)
181 offset = check_offset;
182 break; 172 break;
183 case COMBIOS_PLL_INFO_TABLE: 173 case COMBIOS_PLL_INFO_TABLE:
184 check_offset = RBIOS16(rdev->bios_header_start + 0x30); 174 check_offset = 0x30;
185 if (check_offset)
186 offset = check_offset;
187 break; 175 break;
188 case COMBIOS_TV_INFO_TABLE: 176 case COMBIOS_TV_INFO_TABLE:
189 check_offset = RBIOS16(rdev->bios_header_start + 0x32); 177 check_offset = 0x32;
190 if (check_offset)
191 offset = check_offset;
192 break; 178 break;
193 case COMBIOS_DFP_INFO_TABLE: 179 case COMBIOS_DFP_INFO_TABLE:
194 check_offset = RBIOS16(rdev->bios_header_start + 0x34); 180 check_offset = 0x34;
195 if (check_offset)
196 offset = check_offset;
197 break; 181 break;
198 case COMBIOS_HW_CONFIG_INFO_TABLE: 182 case COMBIOS_HW_CONFIG_INFO_TABLE:
199 check_offset = RBIOS16(rdev->bios_header_start + 0x36); 183 check_offset = 0x36;
200 if (check_offset)
201 offset = check_offset;
202 break; 184 break;
203 case COMBIOS_MULTIMEDIA_INFO_TABLE: 185 case COMBIOS_MULTIMEDIA_INFO_TABLE:
204 check_offset = RBIOS16(rdev->bios_header_start + 0x38); 186 check_offset = 0x38;
205 if (check_offset)
206 offset = check_offset;
207 break; 187 break;
208 case COMBIOS_TV_STD_PATCH_TABLE: 188 case COMBIOS_TV_STD_PATCH_TABLE:
209 check_offset = RBIOS16(rdev->bios_header_start + 0x3e); 189 check_offset = 0x3e;
210 if (check_offset)
211 offset = check_offset;
212 break; 190 break;
213 case COMBIOS_LCD_INFO_TABLE: 191 case COMBIOS_LCD_INFO_TABLE:
214 check_offset = RBIOS16(rdev->bios_header_start + 0x40); 192 check_offset = 0x40;
215 if (check_offset)
216 offset = check_offset;
217 break; 193 break;
218 case COMBIOS_MOBILE_INFO_TABLE: 194 case COMBIOS_MOBILE_INFO_TABLE:
219 check_offset = RBIOS16(rdev->bios_header_start + 0x42); 195 check_offset = 0x42;
220 if (check_offset)
221 offset = check_offset;
222 break; 196 break;
223 case COMBIOS_PLL_INIT_TABLE: 197 case COMBIOS_PLL_INIT_TABLE:
224 check_offset = RBIOS16(rdev->bios_header_start + 0x46); 198 check_offset = 0x46;
225 if (check_offset)
226 offset = check_offset;
227 break; 199 break;
228 case COMBIOS_MEM_CONFIG_TABLE: 200 case COMBIOS_MEM_CONFIG_TABLE:
229 check_offset = RBIOS16(rdev->bios_header_start + 0x48); 201 check_offset = 0x48;
230 if (check_offset)
231 offset = check_offset;
232 break; 202 break;
233 case COMBIOS_SAVE_MASK_TABLE: 203 case COMBIOS_SAVE_MASK_TABLE:
234 check_offset = RBIOS16(rdev->bios_header_start + 0x4a); 204 check_offset = 0x4a;
235 if (check_offset)
236 offset = check_offset;
237 break; 205 break;
238 case COMBIOS_HARDCODED_EDID_TABLE: 206 case COMBIOS_HARDCODED_EDID_TABLE:
239 check_offset = RBIOS16(rdev->bios_header_start + 0x4c); 207 check_offset = 0x4c;
240 if (check_offset)
241 offset = check_offset;
242 break; 208 break;
243 case COMBIOS_ASIC_INIT_2_TABLE: 209 case COMBIOS_ASIC_INIT_2_TABLE:
244 check_offset = RBIOS16(rdev->bios_header_start + 0x4e); 210 check_offset = 0x4e;
245 if (check_offset)
246 offset = check_offset;
247 break; 211 break;
248 case COMBIOS_CONNECTOR_INFO_TABLE: 212 case COMBIOS_CONNECTOR_INFO_TABLE:
249 check_offset = RBIOS16(rdev->bios_header_start + 0x50); 213 check_offset = 0x50;
250 if (check_offset)
251 offset = check_offset;
252 break; 214 break;
253 case COMBIOS_DYN_CLK_1_TABLE: 215 case COMBIOS_DYN_CLK_1_TABLE:
254 check_offset = RBIOS16(rdev->bios_header_start + 0x52); 216 check_offset = 0x52;
255 if (check_offset)
256 offset = check_offset;
257 break; 217 break;
258 case COMBIOS_RESERVED_MEM_TABLE: 218 case COMBIOS_RESERVED_MEM_TABLE:
259 check_offset = RBIOS16(rdev->bios_header_start + 0x54); 219 check_offset = 0x54;
260 if (check_offset)
261 offset = check_offset;
262 break; 220 break;
263 case COMBIOS_EXT_TMDS_INFO_TABLE: 221 case COMBIOS_EXT_TMDS_INFO_TABLE:
264 check_offset = RBIOS16(rdev->bios_header_start + 0x58); 222 check_offset = 0x58;
265 if (check_offset)
266 offset = check_offset;
267 break; 223 break;
268 case COMBIOS_MEM_CLK_INFO_TABLE: 224 case COMBIOS_MEM_CLK_INFO_TABLE:
269 check_offset = RBIOS16(rdev->bios_header_start + 0x5a); 225 check_offset = 0x5a;
270 if (check_offset)
271 offset = check_offset;
272 break; 226 break;
273 case COMBIOS_EXT_DAC_INFO_TABLE: 227 case COMBIOS_EXT_DAC_INFO_TABLE:
274 check_offset = RBIOS16(rdev->bios_header_start + 0x5c); 228 check_offset = 0x5c;
275 if (check_offset)
276 offset = check_offset;
277 break; 229 break;
278 case COMBIOS_MISC_INFO_TABLE: 230 case COMBIOS_MISC_INFO_TABLE:
279 check_offset = RBIOS16(rdev->bios_header_start + 0x5e); 231 check_offset = 0x5e;
280 if (check_offset)
281 offset = check_offset;
282 break; 232 break;
283 case COMBIOS_CRT_INFO_TABLE: 233 case COMBIOS_CRT_INFO_TABLE:
284 check_offset = RBIOS16(rdev->bios_header_start + 0x60); 234 check_offset = 0x60;
285 if (check_offset)
286 offset = check_offset;
287 break; 235 break;
288 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: 236 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
289 check_offset = RBIOS16(rdev->bios_header_start + 0x62); 237 check_offset = 0x62;
290 if (check_offset)
291 offset = check_offset;
292 break; 238 break;
293 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: 239 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
294 check_offset = RBIOS16(rdev->bios_header_start + 0x64); 240 check_offset = 0x64;
295 if (check_offset)
296 offset = check_offset;
297 break; 241 break;
298 case COMBIOS_FAN_SPEED_INFO_TABLE: 242 case COMBIOS_FAN_SPEED_INFO_TABLE:
299 check_offset = RBIOS16(rdev->bios_header_start + 0x66); 243 check_offset = 0x66;
300 if (check_offset)
301 offset = check_offset;
302 break; 244 break;
303 case COMBIOS_OVERDRIVE_INFO_TABLE: 245 case COMBIOS_OVERDRIVE_INFO_TABLE:
304 check_offset = RBIOS16(rdev->bios_header_start + 0x68); 246 check_offset = 0x68;
305 if (check_offset)
306 offset = check_offset;
307 break; 247 break;
308 case COMBIOS_OEM_INFO_TABLE: 248 case COMBIOS_OEM_INFO_TABLE:
309 check_offset = RBIOS16(rdev->bios_header_start + 0x6a); 249 check_offset = 0x6a;
310 if (check_offset)
311 offset = check_offset;
312 break; 250 break;
313 case COMBIOS_DYN_CLK_2_TABLE: 251 case COMBIOS_DYN_CLK_2_TABLE:
314 check_offset = RBIOS16(rdev->bios_header_start + 0x6c); 252 check_offset = 0x6c;
315 if (check_offset)
316 offset = check_offset;
317 break; 253 break;
318 case COMBIOS_POWER_CONNECTOR_INFO_TABLE: 254 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
319 check_offset = RBIOS16(rdev->bios_header_start + 0x6e); 255 check_offset = 0x6e;
320 if (check_offset)
321 offset = check_offset;
322 break; 256 break;
323 case COMBIOS_I2C_INFO_TABLE: 257 case COMBIOS_I2C_INFO_TABLE:
324 check_offset = RBIOS16(rdev->bios_header_start + 0x70); 258 check_offset = 0x70;
325 if (check_offset)
326 offset = check_offset;
327 break; 259 break;
328 /* relative offset tables */ 260 /* relative offset tables */
329 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ 261 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
439 } 371 }
440 break; 372 break;
441 default: 373 default:
374 check_offset = 0;
442 break; 375 break;
443 } 376 }
444 377
445 return offset; 378 size = RBIOS8(rdev->bios_header_start + 0x6);
379 /* check absolute offset tables */
380 if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
381 offset = RBIOS16(rdev->bios_header_start + check_offset);
446 382
383 return offset;
447} 384}
448 385
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) 386bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
965 dac = RBIOS8(dac_info + 0x3) & 0xf; 902 dac = RBIOS8(dac_info + 0x3) & 0xf;
966 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 903 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
967 } 904 }
968 /* if the values are all zeros, use the table */ 905 /* if the values are zeros, use the table */
969 if (p_dac->ps2_pdac_adj) 906 if ((dac == 0) || (bg == 0))
907 found = 0;
908 else
970 found = 1; 909 found = 1;
971 } 910 }
972 911
973 /* quirks */ 912 /* quirks */
913 /* Radeon 7000 (RV100) */
914 if (((dev->pdev->device == 0x5159) &&
915 (dev->pdev->subsystem_vendor == 0x174B) &&
916 (dev->pdev->subsystem_device == 0x7c28)) ||
974 /* Radeon 9100 (R200) */ 917 /* Radeon 9100 (R200) */
975 if ((dev->pdev->device == 0x514D) && 918 ((dev->pdev->device == 0x514D) &&
976 (dev->pdev->subsystem_vendor == 0x174B) && 919 (dev->pdev->subsystem_vendor == 0x174B) &&
977 (dev->pdev->subsystem_device == 0x7149)) { 920 (dev->pdev->subsystem_device == 0x7149))) {
978 /* vbios value is bad, use the default */ 921 /* vbios value is bad, use the default */
979 found = 0; 922 found = 0;
980 } 923 }
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef4..3cae2bbc1854 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 1444 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1445 + init->ring_size / sizeof(u32)); 1445 + init->ring_size / sizeof(u32));
1446 dev_priv->ring.size = init->ring_size; 1446 dev_priv->ring.size = init->ring_size;
1447 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); 1447 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
1448 1448
1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 1449 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1450 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); 1450 dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
1451 1451
1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 1452 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1453 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); 1453 dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 1454 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1455 1455
1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1456 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e38ec4f..0610ca4fb6a3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1269,7 +1269,7 @@ int radeon_device_init(struct radeon_device *rdev,
1269 /* this will fail for cards that aren't VGA class devices, just 1269 /* this will fail for cards that aren't VGA class devices, just
1270 * ignore it */ 1270 * ignore it */
1271 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1271 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1272 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); 1272 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
1273 1273
1274 r = radeon_init(rdev); 1274 r = radeon_init(rdev);
1275 if (r) 1275 if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c2b67b4e1ac2..358bd96c06c5 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
345 345
346static int radeon_crtc_page_flip(struct drm_crtc *crtc, 346static int radeon_crtc_page_flip(struct drm_crtc *crtc,
347 struct drm_framebuffer *fb, 347 struct drm_framebuffer *fb,
348 struct drm_pending_vblank_event *event) 348 struct drm_pending_vblank_event *event,
349 uint32_t page_flip_flags)
349{ 350{
350 struct drm_device *dev = crtc->dev; 351 struct drm_device *dev = crtc->dev;
351 struct radeon_device *rdev = dev->dev_private; 352 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8ec..1f93dd503646 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,7 +81,6 @@
81#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
83int radeon_driver_unload_kms(struct drm_device *dev); 83int radeon_driver_unload_kms(struct drm_device *dev);
84int radeon_driver_firstopen_kms(struct drm_device *dev);
85void radeon_driver_lastclose_kms(struct drm_device *dev); 84void radeon_driver_lastclose_kms(struct drm_device *dev);
86int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 85int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
87void radeon_driver_postclose_kms(struct drm_device *dev, 86void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
101int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 100int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
102void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 101void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
103irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
104int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
105 struct drm_file *file_priv);
106int radeon_gem_object_init(struct drm_gem_object *obj); 103int radeon_gem_object_init(struct drm_gem_object *obj);
107void radeon_gem_object_free(struct drm_gem_object *obj); 104void radeon_gem_object_free(struct drm_gem_object *obj);
108int radeon_gem_object_open(struct drm_gem_object *obj, 105int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
111 struct drm_file *file_priv); 108 struct drm_file *file_priv);
112extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
113 int *vpos, int *hpos); 110 int *vpos, int *hpos);
114extern struct drm_ioctl_desc radeon_ioctls_kms[]; 111extern const struct drm_ioctl_desc radeon_ioctls_kms[];
115extern int radeon_max_kms_ioctl; 112extern int radeon_max_kms_ioctl;
116int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 113int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
117int radeon_mode_dumb_mmap(struct drm_file *filp, 114int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
120int radeon_mode_dumb_create(struct drm_file *file_priv, 117int radeon_mode_dumb_create(struct drm_file *file_priv,
121 struct drm_device *dev, 118 struct drm_device *dev,
122 struct drm_mode_create_dumb *args); 119 struct drm_mode_create_dumb *args);
123int radeon_mode_dumb_destroy(struct drm_file *file_priv,
124 struct drm_device *dev,
125 uint32_t handle);
126struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 120struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
127struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 121struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
128 size_t size, 122 size_t size,
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
272 .unlocked_ioctl = drm_ioctl, 266 .unlocked_ioctl = drm_ioctl,
273 .mmap = drm_mmap, 267 .mmap = drm_mmap,
274 .poll = drm_poll, 268 .poll = drm_poll,
275 .fasync = drm_fasync,
276 .read = drm_read, 269 .read = drm_read,
277#ifdef CONFIG_COMPAT 270#ifdef CONFIG_COMPAT
278 .compat_ioctl = radeon_compat_ioctl, 271 .compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
282 275
283static struct drm_driver driver_old = { 276static struct drm_driver driver_old = {
284 .driver_features = 277 .driver_features =
285 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 278 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
286 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 279 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
287 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 280 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
288 .load = radeon_driver_load, 281 .load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
381 .unlocked_ioctl = drm_ioctl, 374 .unlocked_ioctl = drm_ioctl,
382 .mmap = radeon_mmap, 375 .mmap = radeon_mmap,
383 .poll = drm_poll, 376 .poll = drm_poll,
384 .fasync = drm_fasync,
385 .read = drm_read, 377 .read = drm_read,
386#ifdef CONFIG_COMPAT 378#ifdef CONFIG_COMPAT
387 .compat_ioctl = radeon_kms_compat_ioctl, 379 .compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
390 382
391static struct drm_driver kms_driver = { 383static struct drm_driver kms_driver = {
392 .driver_features = 384 .driver_features =
393 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 385 DRIVER_USE_AGP |
394 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM | 386 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
395 DRIVER_PRIME, 387 DRIVER_PRIME,
396 .dev_priv_size = 0, 388 .dev_priv_size = 0,
397 .load = radeon_driver_load_kms, 389 .load = radeon_driver_load_kms,
398 .firstopen = radeon_driver_firstopen_kms,
399 .open = radeon_driver_open_kms, 390 .open = radeon_driver_open_kms,
400 .preclose = radeon_driver_preclose_kms, 391 .preclose = radeon_driver_preclose_kms,
401 .postclose = radeon_driver_postclose_kms, 392 .postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
421 .gem_free_object = radeon_gem_object_free, 412 .gem_free_object = radeon_gem_object_free,
422 .gem_open_object = radeon_gem_object_open, 413 .gem_open_object = radeon_gem_object_open,
423 .gem_close_object = radeon_gem_object_close, 414 .gem_close_object = radeon_gem_object_close,
424 .dma_ioctl = radeon_dma_ioctl_kms,
425 .dumb_create = radeon_mode_dumb_create, 415 .dumb_create = radeon_mode_dumb_create,
426 .dumb_map_offset = radeon_mode_dumb_mmap, 416 .dumb_map_offset = radeon_mode_dumb_mmap,
427 .dumb_destroy = radeon_mode_dumb_destroy, 417 .dumb_destroy = drm_gem_dumb_destroy,
428 .fops = &radeon_driver_kms_fops, 418 .fops = &radeon_driver_kms_fops,
429 419
430 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 420 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d9d31a383276..6a51d943ccf4 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -466,7 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
466 size += rdev->vm_manager.max_pfn * 8; 466 size += rdev->vm_manager.max_pfn * 8;
467 size *= 2; 467 size *= 2;
468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
469 RADEON_VM_PTB_ALIGN(size), 469 RADEON_GPU_PAGE_ALIGN(size),
470 RADEON_VM_PTB_ALIGN_SIZE, 470 RADEON_VM_PTB_ALIGN_SIZE,
471 RADEON_GEM_DOMAIN_VRAM); 471 RADEON_GEM_DOMAIN_VRAM);
472 if (r) { 472 if (r) {
@@ -621,7 +621,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
621 } 621 }
622 622
623retry: 623retry:
624 pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); 624 pd_size = radeon_vm_directory_size(rdev);
625 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 625 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
626 &vm->page_directory, pd_size, 626 &vm->page_directory, pd_size,
627 RADEON_VM_PTB_ALIGN_SIZE, false); 627 RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -953,8 +953,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
953retry: 953retry:
954 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 954 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
955 &vm->page_tables[pt_idx], 955 &vm->page_tables[pt_idx],
956 RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), 956 RADEON_VM_PTE_COUNT * 8,
957 RADEON_VM_PTB_ALIGN_SIZE, false); 957 RADEON_GPU_PAGE_SIZE, false);
958 958
959 if (r == -ENOMEM) { 959 if (r == -ENOMEM) {
960 r = radeon_vm_evict(rdev, vm); 960 r = radeon_vm_evict(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab65..dce99c8a5835 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
570 return 0; 570 return 0;
571} 571}
572 572
573int radeon_mode_dumb_destroy(struct drm_file *file_priv,
574 struct drm_device *dev,
575 uint32_t handle)
576{
577 return drm_gem_handle_delete(file_priv, handle);
578}
579
580#if defined(CONFIG_DEBUG_FS) 573#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 574static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{ 575{
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a6102..b46a5616664a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -449,19 +449,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
449 * Outdated mess for old drm with Xorg being in charge (void function now). 449 * Outdated mess for old drm with Xorg being in charge (void function now).
450 */ 450 */
451/** 451/**
452 * radeon_driver_firstopen_kms - drm callback for first open
453 *
454 * @dev: drm dev pointer
455 *
456 * Nothing to be done for KMS (all asics).
457 * Returns 0 on success.
458 */
459int radeon_driver_firstopen_kms(struct drm_device *dev)
460{
461 return 0;
462}
463
464/**
465 * radeon_driver_firstopen_kms - drm callback for last close 452 * radeon_driver_firstopen_kms - drm callback for last close
466 * 453 *
467 * @dev: drm dev pointer 454 * @dev: drm dev pointer
@@ -683,16 +670,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
683 drmcrtc); 670 drmcrtc);
684} 671}
685 672
686/*
687 * IOCTL.
688 */
689int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
690 struct drm_file *file_priv)
691{
692 /* Not valid in KMS. */
693 return -EINVAL;
694}
695
696#define KMS_INVALID_IOCTL(name) \ 673#define KMS_INVALID_IOCTL(name) \
697int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 674int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
698{ \ 675{ \
@@ -732,7 +709,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
732KMS_INVALID_IOCTL(radeon_surface_free_kms) 709KMS_INVALID_IOCTL(radeon_surface_free_kms)
733 710
734 711
735struct drm_ioctl_desc radeon_ioctls_kms[] = { 712const struct drm_ioctl_desc radeon_ioctls_kms[] = {
736 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 713 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
737 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 714 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
738 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 715 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2020bf4a3830..c0fa4aa9ceea 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
142 return r; 142 return r;
143 } 143 }
144 bo->rdev = rdev; 144 bo->rdev = rdev;
145 bo->gem_base.driver_private = NULL;
146 bo->surface_reg = -1; 145 bo->surface_reg = -1;
147 INIT_LIST_HEAD(&bo->list); 146 INIT_LIST_HEAD(&bo->list);
148 INIT_LIST_HEAD(&bo->va); 147 INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c480013..209b11150263 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
113 * @bo: radeon object for which we query the offset 113 * @bo: radeon object for which we query the offset
114 * 114 *
115 * Returns mmap offset of the object. 115 * Returns mmap offset of the object.
116 *
117 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
118 * by any lock.
119 */ 116 */
120static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) 117static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
121{ 118{
122 return bo->tbo.addr_space_offset; 119 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
123} 120}
124 121
125extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, 122extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 65b9eabd5a2f..20074560fc25 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
68 RADEON_GEM_DOMAIN_GTT, sg, &bo); 68 RADEON_GEM_DOMAIN_GTT, sg, &bo);
69 if (ret) 69 if (ret)
70 return ERR_PTR(ret); 70 return ERR_PTR(ret);
71 bo->gem_base.driver_private = bo;
72 71
73 mutex_lock(&rdev->gem.mutex); 72 mutex_lock(&rdev->gem.mutex);
74 list_add_tail(&bo->list, &rdev->gem.objects); 73 list_add_tail(&bo->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6c0ce8915fac..71245d6f34a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
203 203
204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 204static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
205{ 205{
206 return 0; 206 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
207
208 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
207} 209}
208 210
209static void radeon_move_null(struct ttm_buffer_object *bo, 211static void radeon_move_null(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 65e33f387341..363018c60412 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
819 POWERMODE1(calculate_memory_refresh_rate(rdev, 819 POWERMODE1(calculate_memory_refresh_rate(rdev,
820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
821 POWERMODE2(calculate_memory_refresh_rate(rdev, 821 POWERMODE2(calculate_memory_refresh_rate(rdev,
822 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | 822 pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
823 POWERMODE3(calculate_memory_refresh_rate(rdev, 823 POWERMODE3(calculate_memory_refresh_rate(rdev,
824 pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); 824 pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
825 WREG32(ARB_RFSH_RATE, arb_refresh_rate); 825 WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1183 1183
1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1185 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { 1185 if (rdev->pm.dpm.new_active_crtcs & 1) {
1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1188 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { 1188 } else if (rdev->pm.dpm.new_active_crtcs & 2) {
1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1191 } else { 1191 } else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; 1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1671 int ret; 1671 int ret;
1672 1672
1673 pi->restricted_levels = 0;
1674
1673 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1675 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1674 1676
1675 rv6xx_clear_vc(rdev); 1677 rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1756 1758
1757 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1758 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1759 return 0; 1763 return 0;
1760} 1764}
1761 1765
@@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2085 else 2089 else
2086 return requested_state->high.mclk; 2090 return requested_state->high.mclk;
2087} 2091}
2092
2093int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
2094 enum radeon_dpm_forced_level level)
2095{
2096 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
2097
2098 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
2099 pi->restricted_levels = 3;
2100 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
2101 pi->restricted_levels = 2;
2102 } else {
2103 pi->restricted_levels = 0;
2104 }
2105
2106 rv6xx_clear_vc(rdev);
2107 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
2108 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
2109 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
2110 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
2111 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
2112 rv6xx_enable_medium(rdev);
2113 rv6xx_enable_high(rdev);
2114 if (pi->restricted_levels == 3)
2115 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
2116 rv6xx_program_vc(rdev);
2117 rv6xx_program_at(rdev);
2118
2119 rdev->pm.dpm.forced_level = level;
2120
2121 return 0;
2122}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d325280e2f9f..d71037f4f68f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3383 /* ring 0 - compute and gfx */ 3383 /* ring 0 - compute and gfx */
3384 /* Set ring buffer size */ 3384 /* Set ring buffer size */
3385 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3385 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3386 rb_bufsz = drm_order(ring->ring_size / 8); 3386 rb_bufsz = order_base_2(ring->ring_size / 8);
3387 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3387 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3388#ifdef __BIG_ENDIAN 3388#ifdef __BIG_ENDIAN
3389 tmp |= BUF_SWAP_32BIT; 3389 tmp |= BUF_SWAP_32BIT;
3390#endif 3390#endif
@@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3416 /* ring1 - compute only */ 3416 /* ring1 - compute only */
3417 /* Set ring buffer size */ 3417 /* Set ring buffer size */
3418 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 3418 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3419 rb_bufsz = drm_order(ring->ring_size / 8); 3419 rb_bufsz = order_base_2(ring->ring_size / 8);
3420 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3420 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3421#ifdef __BIG_ENDIAN 3421#ifdef __BIG_ENDIAN
3422 tmp |= BUF_SWAP_32BIT; 3422 tmp |= BUF_SWAP_32BIT;
3423#endif 3423#endif
@@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3442 /* ring2 - compute only */ 3442 /* ring2 - compute only */
3443 /* Set ring buffer size */ 3443 /* Set ring buffer size */
3444 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 3444 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3445 rb_bufsz = drm_order(ring->ring_size / 8); 3445 rb_bufsz = order_base_2(ring->ring_size / 8);
3446 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 3446 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3447#ifdef __BIG_ENDIAN 3447#ifdef __BIG_ENDIAN
3448 tmp |= BUF_SWAP_32BIT; 3448 tmp |= BUF_SWAP_32BIT;
3449#endif 3449#endif
@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev)
5651 WREG32(INTERRUPT_CNTL, interrupt_cntl); 5651 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5652 5652
5653 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 5653 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5654 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 5654 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5655 5655
5656 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 5656 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5657 IH_WPTR_OVERFLOW_CLEAR | 5657 IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 72887df8dd76..c590cd9dca0b 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
7 help 7 help
8 Choose this option if you have an R-Car chipset. 8 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm. 9 If M is selected the module will be called rcar-du-drm.
10
11config DRM_RCAR_LVDS
12 bool "R-Car DU LVDS Encoder Support"
13 depends on DRM_RCAR_DU
14 help
15 Enable support the R-Car Display Unit embedded LVDS encoders
16 (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 7333c0094015..12b8d4477835 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,8 +1,12 @@
1rcar-du-drm-y := rcar_du_crtc.o \ 1rcar-du-drm-y := rcar_du_crtc.o \
2 rcar_du_drv.o \ 2 rcar_du_drv.o \
3 rcar_du_encoder.o \
4 rcar_du_group.o \
3 rcar_du_kms.o \ 5 rcar_du_kms.o \
4 rcar_du_lvds.o \ 6 rcar_du_lvdscon.o \
5 rcar_du_plane.o \ 7 rcar_du_plane.o \
6 rcar_du_vga.o 8 rcar_du_vgacon.o
7 9
8obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o 10rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
11
12obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 24183fb93592..a9d24e4bf792 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -23,30 +23,26 @@
23#include "rcar_du_crtc.h" 23#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h" 24#include "rcar_du_drv.h"
25#include "rcar_du_kms.h" 25#include "rcar_du_kms.h"
26#include "rcar_du_lvds.h"
27#include "rcar_du_plane.h" 26#include "rcar_du_plane.h"
28#include "rcar_du_regs.h" 27#include "rcar_du_regs.h"
29#include "rcar_du_vga.h"
30
31#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
32 28
33static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) 29static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34{ 30{
35 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 31 struct rcar_du_device *rcdu = rcrtc->group->dev;
36 32
37 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 33 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38} 34}
39 35
40static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) 36static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41{ 37{
42 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 38 struct rcar_du_device *rcdu = rcrtc->group->dev;
43 39
44 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); 40 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45} 41}
46 42
47static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) 43static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48{ 44{
49 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 45 struct rcar_du_device *rcdu = rcrtc->group->dev;
50 46
51 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 47 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); 48 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
54 50
55static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) 51static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56{ 52{
57 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 53 struct rcar_du_device *rcdu = rcrtc->group->dev;
58 54
59 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 55 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); 56 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
63static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, 59static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
64 u32 clr, u32 set) 60 u32 clr, u32 set)
65{ 61{
66 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private; 62 struct rcar_du_device *rcdu = rcrtc->group->dev;
67 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 63 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
68 64
69 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); 65 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
70} 66}
71 67
68static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
69{
70 int ret;
71
72 ret = clk_prepare_enable(rcrtc->clock);
73 if (ret < 0)
74 return ret;
75
76 ret = rcar_du_group_get(rcrtc->group);
77 if (ret < 0)
78 clk_disable_unprepare(rcrtc->clock);
79
80 return ret;
81}
82
83static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
84{
85 rcar_du_group_put(rcrtc->group);
86 clk_disable_unprepare(rcrtc->clock);
87}
88
72static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 89static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
73{ 90{
74 struct drm_crtc *crtc = &rcrtc->crtc; 91 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
75 struct rcar_du_device *rcdu = crtc->dev->dev_private;
76 const struct drm_display_mode *mode = &crtc->mode;
77 unsigned long clk; 92 unsigned long clk;
78 u32 value; 93 u32 value;
79 u32 div; 94 u32 div;
80 95
81 /* Dot clock */ 96 /* Dot clock */
82 clk = clk_get_rate(rcdu->clock); 97 clk = clk_get_rate(rcrtc->clock);
83 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000); 98 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
84 div = clamp(div, 1U, 64U) - 1; 99 div = clamp(div, 1U, 64U) - 1;
85 100
86 rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR, 101 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
87 ESCR_DCLKSEL_CLKS | div); 102 ESCR_DCLKSEL_CLKS | div);
88 rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0); 103 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
89 104
90 /* Signal polarities */ 105 /* Signal polarities */
91 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) 106 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
112 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 127 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
113} 128}
114 129
115static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc) 130void rcar_du_crtc_route_output(struct drm_crtc *crtc,
116{ 131 enum rcar_du_output output)
117 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
118 u32 dorcr = rcar_du_read(rcdu, DORCR);
119
120 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
121
122 /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
123 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
124 * default.
125 */
126 if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
127 dorcr |= DORCR_PG2D_DS1;
128 else
129 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
130
131 rcar_du_write(rcdu, DORCR, dorcr);
132}
133
134static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
135{
136 rcar_du_write(rcdu, DSYSR,
137 (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
138 (start ? DSYSR_DEN : DSYSR_DRES));
139}
140
141static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
142{
143 /* Many of the configuration bits are only updated when the display
144 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
145 * of those bits could be pre-configured, but others (especially the
146 * bits related to plane assignment to display timing controllers) need
147 * to be modified at runtime.
148 *
149 * Restart the display controller if a start is requested. Sorry for the
150 * flicker. It should be possible to move most of the "DRES-update" bits
151 * setup to driver initialization time and minimize the number of cases
152 * when the display controller will have to be restarted.
153 */
154 if (start) {
155 if (rcdu->used_crtcs++ != 0)
156 __rcar_du_start_stop(rcdu, false);
157 __rcar_du_start_stop(rcdu, true);
158 } else {
159 if (--rcdu->used_crtcs == 0)
160 __rcar_du_start_stop(rcdu, false);
161 }
162}
163
164void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
165{ 132{
166 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 133 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
134 struct rcar_du_device *rcdu = rcrtc->group->dev;
167 135
168 /* Store the route from the CRTC output to the DU output. The DU will be 136 /* Store the route from the CRTC output to the DU output. The DU will be
169 * configured when starting the CRTC. 137 * configured when starting the CRTC.
170 */ 138 */
171 rcrtc->outputs |= 1 << output; 139 rcrtc->outputs |= BIT(output);
140
141 /* Store RGB routing to DPAD0 for R8A7790. */
142 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
143 output == RCAR_DU_OUTPUT_DPAD0)
144 rcdu->dpad0_source = rcrtc->index;
172} 145}
173 146
174void rcar_du_crtc_update_planes(struct drm_crtc *crtc) 147void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
175{ 148{
176 struct rcar_du_device *rcdu = crtc->dev->dev_private;
177 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 149 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
178 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; 150 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
179 unsigned int num_planes = 0; 151 unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
182 u32 dptsr = 0; 154 u32 dptsr = 0;
183 u32 dspr = 0; 155 u32 dspr = 0;
184 156
185 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 157 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
186 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 158 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
187 unsigned int j; 159 unsigned int j;
188 160
189 if (plane->crtc != &rcrtc->crtc || !plane->enabled) 161 if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
220 /* Select display timing and dot clock generator 2 for planes associated 192 /* Select display timing and dot clock generator 2 for planes associated
221 * with superposition controller 2. 193 * with superposition controller 2.
222 */ 194 */
223 if (rcrtc->index) { 195 if (rcrtc->index % 2) {
224 u32 value = rcar_du_read(rcdu, DPTSR); 196 u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
225 197
226 /* The DPTSR register is updated when the display controller is 198 /* The DPTSR register is updated when the display controller is
227 * stopped. We thus need to restart the DU. Once again, sorry 199 * stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
231 * occur only if we need to break the pre-association. 203 * occur only if we need to break the pre-association.
232 */ 204 */
233 if (value != dptsr) { 205 if (value != dptsr) {
234 rcar_du_write(rcdu, DPTSR, dptsr); 206 rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
235 if (rcdu->used_crtcs) { 207 if (rcrtc->group->used_crtcs)
236 __rcar_du_start_stop(rcdu, false); 208 rcar_du_group_restart(rcrtc->group);
237 __rcar_du_start_stop(rcdu, true);
238 }
239 } 209 }
240 } 210 }
241 211
242 rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr); 212 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
213 dspr);
243} 214}
244 215
245static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 216static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
246{ 217{
247 struct drm_crtc *crtc = &rcrtc->crtc; 218 struct drm_crtc *crtc = &rcrtc->crtc;
248 struct rcar_du_device *rcdu = crtc->dev->dev_private;
249 unsigned int i; 219 unsigned int i;
250 220
251 if (rcrtc->started) 221 if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
260 230
261 /* Configure display timings and output routing */ 231 /* Configure display timings and output routing */
262 rcar_du_crtc_set_display_timing(rcrtc); 232 rcar_du_crtc_set_display_timing(rcrtc);
263 rcar_du_crtc_set_routing(rcrtc); 233 rcar_du_group_set_routing(rcrtc->group);
264 234
265 mutex_lock(&rcdu->planes.lock); 235 mutex_lock(&rcrtc->group->planes.lock);
266 rcrtc->plane->enabled = true; 236 rcrtc->plane->enabled = true;
267 rcar_du_crtc_update_planes(crtc); 237 rcar_du_crtc_update_planes(crtc);
268 mutex_unlock(&rcdu->planes.lock); 238 mutex_unlock(&rcrtc->group->planes.lock);
269 239
270 /* Setup planes. */ 240 /* Setup planes. */
271 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 241 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
272 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 242 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
273 243
274 if (plane->crtc != crtc || !plane->enabled) 244 if (plane->crtc != crtc || !plane->enabled)
275 continue; 245 continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
283 */ 253 */
284 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER); 254 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
285 255
286 rcar_du_start_stop(rcdu, true); 256 rcar_du_group_start_stop(rcrtc->group, true);
287 257
288 rcrtc->started = true; 258 rcrtc->started = true;
289} 259}
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
291static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 261static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
292{ 262{
293 struct drm_crtc *crtc = &rcrtc->crtc; 263 struct drm_crtc *crtc = &rcrtc->crtc;
294 struct rcar_du_device *rcdu = crtc->dev->dev_private;
295 264
296 if (!rcrtc->started) 265 if (!rcrtc->started)
297 return; 266 return;
298 267
299 mutex_lock(&rcdu->planes.lock); 268 mutex_lock(&rcrtc->group->planes.lock);
300 rcrtc->plane->enabled = false; 269 rcrtc->plane->enabled = false;
301 rcar_du_crtc_update_planes(crtc); 270 rcar_du_crtc_update_planes(crtc);
302 mutex_unlock(&rcdu->planes.lock); 271 mutex_unlock(&rcrtc->group->planes.lock);
303 272
304 /* Select switch sync mode. This stops display operation and configures 273 /* Select switch sync mode. This stops display operation and configures
305 * the HSYNC and VSYNC signals as inputs. 274 * the HSYNC and VSYNC signals as inputs.
306 */ 275 */
307 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); 276 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
308 277
309 rcar_du_start_stop(rcdu, false); 278 rcar_du_group_start_stop(rcrtc->group, false);
310 279
311 rcrtc->started = false; 280 rcrtc->started = false;
312} 281}
313 282
314void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) 283void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
315{ 284{
316 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
317
318 rcar_du_crtc_stop(rcrtc); 285 rcar_du_crtc_stop(rcrtc);
319 rcar_du_put(rcdu); 286 rcar_du_crtc_put(rcrtc);
320} 287}
321 288
322void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) 289void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
323{ 290{
324 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
325
326 if (rcrtc->dpms != DRM_MODE_DPMS_ON) 291 if (rcrtc->dpms != DRM_MODE_DPMS_ON)
327 return; 292 return;
328 293
329 rcar_du_get(rcdu); 294 rcar_du_crtc_get(rcrtc);
330 rcar_du_crtc_start(rcrtc); 295 rcar_du_crtc_start(rcrtc);
331} 296}
332 297
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
340 305
341static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode) 306static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
342{ 307{
343 struct rcar_du_device *rcdu = crtc->dev->dev_private;
344 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 308 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
345 309
346 if (rcrtc->dpms == mode) 310 if (rcrtc->dpms == mode)
347 return; 311 return;
348 312
349 if (mode == DRM_MODE_DPMS_ON) { 313 if (mode == DRM_MODE_DPMS_ON) {
350 rcar_du_get(rcdu); 314 rcar_du_crtc_get(rcrtc);
351 rcar_du_crtc_start(rcrtc); 315 rcar_du_crtc_start(rcrtc);
352 } else { 316 } else {
353 rcar_du_crtc_stop(rcrtc); 317 rcar_du_crtc_stop(rcrtc);
354 rcar_du_put(rcdu); 318 rcar_du_crtc_put(rcrtc);
355 } 319 }
356 320
357 rcrtc->dpms = mode; 321 rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
367 331
368static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) 332static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
369{ 333{
370 struct rcar_du_device *rcdu = crtc->dev->dev_private;
371 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 334 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
372 335
373 /* We need to access the hardware during mode set, acquire a reference 336 /* We need to access the hardware during mode set, acquire a reference
374 * to the DU. 337 * to the CRTC.
375 */ 338 */
376 rcar_du_get(rcdu); 339 rcar_du_crtc_get(rcrtc);
377 340
378 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a 341 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
379 * result. 342 * result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
390 int x, int y, 353 int x, int y,
391 struct drm_framebuffer *old_fb) 354 struct drm_framebuffer *old_fb)
392{ 355{
393 struct rcar_du_device *rcdu = crtc->dev->dev_private;
394 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 356 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
357 struct rcar_du_device *rcdu = rcrtc->group->dev;
395 const struct rcar_du_format_info *format; 358 const struct rcar_du_format_info *format;
396 int ret; 359 int ret;
397 360
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
423 386
424error: 387error:
425 /* There's no rollback/abort operation to clean up in case of error. We 388 /* There's no rollback/abort operation to clean up in case of error. We
426 * thus need to release the reference to the DU acquired in prepare() 389 * thus need to release the reference to the CRTC acquired in prepare()
427 * here. 390 * here.
428 */ 391 */
429 rcar_du_put(rcdu); 392 rcar_du_crtc_put(rcrtc);
430 return ret; 393 return ret;
431} 394}
432 395
@@ -514,9 +477,28 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
514 drm_vblank_put(dev, rcrtc->index); 477 drm_vblank_put(dev, rcrtc->index);
515} 478}
516 479
480static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
481{
482 struct rcar_du_crtc *rcrtc = arg;
483 irqreturn_t ret = IRQ_NONE;
484 u32 status;
485
486 status = rcar_du_crtc_read(rcrtc, DSSR);
487 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
488
489 if (status & DSSR_VBK) {
490 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
491 rcar_du_crtc_finish_page_flip(rcrtc);
492 ret = IRQ_HANDLED;
493 }
494
495 return ret;
496}
497
517static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, 498static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
518 struct drm_framebuffer *fb, 499 struct drm_framebuffer *fb,
519 struct drm_pending_vblank_event *event) 500 struct drm_pending_vblank_event *event,
501 uint32_t page_flip_flags)
520{ 502{
521 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 503 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
522 struct drm_device *dev = rcrtc->crtc.dev; 504 struct drm_device *dev = rcrtc->crtc.dev;
@@ -549,16 +531,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
549 .page_flip = rcar_du_crtc_page_flip, 531 .page_flip = rcar_du_crtc_page_flip,
550}; 532};
551 533
552int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index) 534int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
553{ 535{
536 static const unsigned int mmio_offsets[] = {
537 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
538 };
539
540 struct rcar_du_device *rcdu = rgrp->dev;
541 struct platform_device *pdev = to_platform_device(rcdu->dev);
554 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; 542 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
555 struct drm_crtc *crtc = &rcrtc->crtc; 543 struct drm_crtc *crtc = &rcrtc->crtc;
544 unsigned int irqflags;
545 char clk_name[5];
546 char *name;
547 int irq;
556 int ret; 548 int ret;
557 549
558 rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0; 550 /* Get the CRTC clock. */
551 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
552 sprintf(clk_name, "du.%u", index);
553 name = clk_name;
554 } else {
555 name = NULL;
556 }
557
558 rcrtc->clock = devm_clk_get(rcdu->dev, name);
559 if (IS_ERR(rcrtc->clock)) {
560 dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
561 return PTR_ERR(rcrtc->clock);
562 }
563
564 rcrtc->group = rgrp;
565 rcrtc->mmio_offset = mmio_offsets[index];
559 rcrtc->index = index; 566 rcrtc->index = index;
560 rcrtc->dpms = DRM_MODE_DPMS_OFF; 567 rcrtc->dpms = DRM_MODE_DPMS_OFF;
561 rcrtc->plane = &rcdu->planes.planes[index]; 568 rcrtc->plane = &rgrp->planes.planes[index % 2];
562 569
563 rcrtc->plane->crtc = crtc; 570 rcrtc->plane->crtc = crtc;
564 571
@@ -568,6 +575,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
568 575
569 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 576 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
570 577
578 /* Register the interrupt handler. */
579 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
580 irq = platform_get_irq(pdev, index);
581 irqflags = 0;
582 } else {
583 irq = platform_get_irq(pdev, 0);
584 irqflags = IRQF_SHARED;
585 }
586
587 if (irq < 0) {
588 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
589 return ret;
590 }
591
592 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
593 dev_name(rcdu->dev), rcrtc);
594 if (ret < 0) {
595 dev_err(rcdu->dev,
596 "failed to register IRQ for CRTC %u\n", index);
597 return ret;
598 }
599
571 return 0; 600 return 0;
572} 601}
573 602
@@ -580,16 +609,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
580 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); 609 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
581 } 610 }
582} 611}
583
584void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
585{
586 u32 status;
587
588 status = rcar_du_crtc_read(rcrtc, DSSR);
589 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
590
591 if (status & DSSR_VBK) {
592 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
593 rcar_du_crtc_finish_page_flip(rcrtc);
594 }
595}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2a0365bcbd14..43e7575c700c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,16 +15,18 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/platform_data/rcar-du.h>
18 19
19#include <drm/drmP.h> 20#include <drm/drmP.h>
20#include <drm/drm_crtc.h> 21#include <drm/drm_crtc.h>
21 22
22struct rcar_du_device; 23struct rcar_du_group;
23struct rcar_du_plane; 24struct rcar_du_plane;
24 25
25struct rcar_du_crtc { 26struct rcar_du_crtc {
26 struct drm_crtc crtc; 27 struct drm_crtc crtc;
27 28
29 struct clk *clock;
28 unsigned int mmio_offset; 30 unsigned int mmio_offset;
29 unsigned int index; 31 unsigned int index;
30 bool started; 32 bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
33 unsigned int outputs; 35 unsigned int outputs;
34 int dpms; 36 int dpms;
35 37
38 struct rcar_du_group *group;
36 struct rcar_du_plane *plane; 39 struct rcar_du_plane *plane;
37}; 40};
38 41
39int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index); 42#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
43
44int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
40void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); 45void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
41void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
42void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, 46void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
43 struct drm_file *file); 47 struct drm_file *file);
44void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); 48void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
45void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); 49void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
46 50
47void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output); 51void rcar_du_crtc_route_output(struct drm_crtc *crtc,
52 enum rcar_du_output output);
48void rcar_du_crtc_update_planes(struct drm_crtc *crtc); 53void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
49 54
50#endif /* __RCAR_DU_CRTC_H__ */ 55#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index dc0fe09b2ba1..0023f9719cf1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
25 26
26#include "rcar_du_crtc.h" 27#include "rcar_du_crtc.h"
@@ -29,74 +30,21 @@
29#include "rcar_du_regs.h" 30#include "rcar_du_regs.h"
30 31
31/* ----------------------------------------------------------------------------- 32/* -----------------------------------------------------------------------------
32 * Core device operations
33 */
34
35/*
36 * rcar_du_get - Acquire a reference to the DU
37 *
38 * Acquiring a reference enables the device clock and setup core registers. A
39 * reference must be held before accessing any hardware registers.
40 *
41 * This function must be called with the DRM mode_config lock held.
42 *
43 * Return 0 in case of success or a negative error code otherwise.
44 */
45int rcar_du_get(struct rcar_du_device *rcdu)
46{
47 int ret;
48
49 if (rcdu->use_count)
50 goto done;
51
52 /* Enable clocks before accessing the hardware. */
53 ret = clk_prepare_enable(rcdu->clock);
54 if (ret < 0)
55 return ret;
56
57 /* Enable extended features */
58 rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
59 rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
60 rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
61 rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
62 rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
63
64 /* Use DS1PR and DS2PR to configure planes priorities and connects the
65 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
66 */
67 rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
68
69done:
70 rcdu->use_count++;
71 return 0;
72}
73
74/*
75 * rcar_du_put - Release a reference to the DU
76 *
77 * Releasing the last reference disables the device clock.
78 *
79 * This function must be called with the DRM mode_config lock held.
80 */
81void rcar_du_put(struct rcar_du_device *rcdu)
82{
83 if (--rcdu->use_count)
84 return;
85
86 clk_disable_unprepare(rcdu->clock);
87}
88
89/* -----------------------------------------------------------------------------
90 * DRM operations 33 * DRM operations
91 */ 34 */
92 35
93static int rcar_du_unload(struct drm_device *dev) 36static int rcar_du_unload(struct drm_device *dev)
94{ 37{
38 struct rcar_du_device *rcdu = dev->dev_private;
39
40 if (rcdu->fbdev)
41 drm_fbdev_cma_fini(rcdu->fbdev);
42
95 drm_kms_helper_poll_fini(dev); 43 drm_kms_helper_poll_fini(dev);
96 drm_mode_config_cleanup(dev); 44 drm_mode_config_cleanup(dev);
97 drm_vblank_cleanup(dev); 45 drm_vblank_cleanup(dev);
98 drm_irq_uninstall(dev);
99 46
47 dev->irq_enabled = 0;
100 dev->dev_private = NULL; 48 dev->dev_private = NULL;
101 49
102 return 0; 50 return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
107 struct platform_device *pdev = dev->platformdev; 55 struct platform_device *pdev = dev->platformdev;
108 struct rcar_du_platform_data *pdata = pdev->dev.platform_data; 56 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
109 struct rcar_du_device *rcdu; 57 struct rcar_du_device *rcdu;
110 struct resource *ioarea;
111 struct resource *mem; 58 struct resource *mem;
112 int ret; 59 int ret;
113 60
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
124 71
125 rcdu->dev = &pdev->dev; 72 rcdu->dev = &pdev->dev;
126 rcdu->pdata = pdata; 73 rcdu->pdata = pdata;
74 rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
127 rcdu->ddev = dev; 75 rcdu->ddev = dev;
128 dev->dev_private = rcdu; 76 dev->dev_private = rcdu;
129 77
130 /* I/O resources and clocks */ 78 /* I/O resources */
131 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 79 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 if (mem == NULL) { 80 rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
133 dev_err(&pdev->dev, "failed to get memory resource\n"); 81 if (IS_ERR(rcdu->mmio))
134 return -EINVAL; 82 return PTR_ERR(rcdu->mmio);
135 }
136
137 ioarea = devm_request_mem_region(&pdev->dev, mem->start,
138 resource_size(mem), pdev->name);
139 if (ioarea == NULL) {
140 dev_err(&pdev->dev, "failed to request memory region\n");
141 return -EBUSY;
142 }
143
144 rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
145 resource_size(ioarea));
146 if (rcdu->mmio == NULL) {
147 dev_err(&pdev->dev, "failed to remap memory resource\n");
148 return -ENOMEM;
149 }
150
151 rcdu->clock = devm_clk_get(&pdev->dev, NULL);
152 if (IS_ERR(rcdu->clock)) {
153 dev_err(&pdev->dev, "failed to get clock\n");
154 return -ENOENT;
155 }
156 83
157 /* DRM/KMS objects */ 84 /* DRM/KMS objects */
158 ret = rcar_du_modeset_init(rcdu); 85 ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
161 goto done; 88 goto done;
162 } 89 }
163 90
164 /* IRQ and vblank handling */ 91 /* vblank handling */
165 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); 92 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
166 if (ret < 0) { 93 if (ret < 0) {
167 dev_err(&pdev->dev, "failed to initialize vblank\n"); 94 dev_err(&pdev->dev, "failed to initialize vblank\n");
168 goto done; 95 goto done;
169 } 96 }
170 97
171 ret = drm_irq_install(dev); 98 dev->irq_enabled = 1;
172 if (ret < 0) {
173 dev_err(&pdev->dev, "failed to install IRQ handler\n");
174 goto done;
175 }
176 99
177 platform_set_drvdata(pdev, rcdu); 100 platform_set_drvdata(pdev, rcdu);
178 101
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
188 struct rcar_du_device *rcdu = dev->dev_private; 111 struct rcar_du_device *rcdu = dev->dev_private;
189 unsigned int i; 112 unsigned int i;
190 113
191 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) 114 for (i = 0; i < rcdu->num_crtcs; ++i)
192 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); 115 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
193} 116}
194 117
195static irqreturn_t rcar_du_irq(int irq, void *arg) 118static void rcar_du_lastclose(struct drm_device *dev)
196{ 119{
197 struct drm_device *dev = arg;
198 struct rcar_du_device *rcdu = dev->dev_private; 120 struct rcar_du_device *rcdu = dev->dev_private;
199 unsigned int i;
200
201 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
202 rcar_du_crtc_irq(&rcdu->crtcs[i]);
203 121
204 return IRQ_HANDLED; 122 drm_fbdev_cma_restore_mode(rcdu->fbdev);
205} 123}
206 124
207static int rcar_du_enable_vblank(struct drm_device *dev, int crtc) 125static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
230#endif 148#endif
231 .poll = drm_poll, 149 .poll = drm_poll,
232 .read = drm_read, 150 .read = drm_read,
233 .fasync = drm_fasync,
234 .llseek = no_llseek, 151 .llseek = no_llseek,
235 .mmap = drm_gem_cma_mmap, 152 .mmap = drm_gem_cma_mmap,
236}; 153};
237 154
238static struct drm_driver rcar_du_driver = { 155static struct drm_driver rcar_du_driver = {
239 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET 156 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
240 | DRIVER_PRIME,
241 .load = rcar_du_load, 157 .load = rcar_du_load,
242 .unload = rcar_du_unload, 158 .unload = rcar_du_unload,
243 .preclose = rcar_du_preclose, 159 .preclose = rcar_du_preclose,
244 .irq_handler = rcar_du_irq, 160 .lastclose = rcar_du_lastclose,
245 .get_vblank_counter = drm_vblank_count, 161 .get_vblank_counter = drm_vblank_count,
246 .enable_vblank = rcar_du_enable_vblank, 162 .enable_vblank = rcar_du_enable_vblank,
247 .disable_vblank = rcar_du_disable_vblank, 163 .disable_vblank = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
258 .gem_prime_mmap = drm_gem_cma_prime_mmap, 174 .gem_prime_mmap = drm_gem_cma_prime_mmap,
259 .dumb_create = rcar_du_dumb_create, 175 .dumb_create = rcar_du_dumb_create,
260 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 176 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
261 .dumb_destroy = drm_gem_cma_dumb_destroy, 177 .dumb_destroy = drm_gem_dumb_destroy,
262 .fops = &rcar_du_fops, 178 .fops = &rcar_du_fops,
263 .name = "rcar-du", 179 .name = "rcar-du",
264 .desc = "Renesas R-Car Display Unit", 180 .desc = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
313 return 0; 229 return 0;
314} 230}
315 231
232static const struct rcar_du_device_info rcar_du_r8a7779_info = {
233 .features = 0,
234 .num_crtcs = 2,
235 .routes = {
236 /* R8A7779 has two RGB outputs and one (currently unsupported)
237 * TCON output.
238 */
239 [RCAR_DU_OUTPUT_DPAD0] = {
240 .possible_crtcs = BIT(0),
241 .encoder_type = DRM_MODE_ENCODER_NONE,
242 },
243 [RCAR_DU_OUTPUT_DPAD1] = {
244 .possible_crtcs = BIT(1) | BIT(0),
245 .encoder_type = DRM_MODE_ENCODER_NONE,
246 },
247 },
248 .num_lvds = 0,
249};
250
251static const struct rcar_du_device_info rcar_du_r8a7790_info = {
252 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
253 | RCAR_DU_FEATURE_DEFR8,
254 .num_crtcs = 3,
255 .routes = {
256 /* R8A7790 has one RGB output, two LVDS outputs and one
257 * (currently unsupported) TCON output.
258 */
259 [RCAR_DU_OUTPUT_DPAD0] = {
260 .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
261 .encoder_type = DRM_MODE_ENCODER_NONE,
262 },
263 [RCAR_DU_OUTPUT_LVDS0] = {
264 .possible_crtcs = BIT(0),
265 .encoder_type = DRM_MODE_ENCODER_LVDS,
266 },
267 [RCAR_DU_OUTPUT_LVDS1] = {
268 .possible_crtcs = BIT(2) | BIT(1),
269 .encoder_type = DRM_MODE_ENCODER_LVDS,
270 },
271 },
272 .num_lvds = 2,
273};
274
275static const struct platform_device_id rcar_du_id_table[] = {
276 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
277 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
278 { }
279};
280
281MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
282
316static struct platform_driver rcar_du_platform_driver = { 283static struct platform_driver rcar_du_platform_driver = {
317 .probe = rcar_du_probe, 284 .probe = rcar_du_probe,
318 .remove = rcar_du_remove, 285 .remove = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
321 .name = "rcar-du", 288 .name = "rcar-du",
322 .pm = &rcar_du_pm_ops, 289 .pm = &rcar_du_pm_ops,
323 }, 290 },
291 .id_table = rcar_du_id_table,
324}; 292};
325 293
326module_platform_driver(rcar_du_platform_driver); 294module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 193cc59d495c..65d2d636b002 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,43 +15,74 @@
15#define __RCAR_DU_DRV_H__ 15#define __RCAR_DU_DRV_H__
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mutex.h>
19#include <linux/platform_data/rcar-du.h> 18#include <linux/platform_data/rcar-du.h>
20 19
21#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
22#include "rcar_du_plane.h" 21#include "rcar_du_group.h"
23 22
24struct clk; 23struct clk;
25struct device; 24struct device;
26struct drm_device; 25struct drm_device;
26struct drm_fbdev_cma;
27struct rcar_du_device;
28struct rcar_du_lvdsenc;
29
30#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
31#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
32#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
33
34/*
35 * struct rcar_du_output_routing - Output routing specification
36 * @possible_crtcs: bitmask of possible CRTCs for the output
37 * @encoder_type: DRM type of the internal encoder associated with the output
38 *
39 * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
40 * specify the valid SoC outputs, which CRTCs can drive the output, and the type
41 * of in-SoC encoder for the output.
42 */
43struct rcar_du_output_routing {
44 unsigned int possible_crtcs;
45 unsigned int encoder_type;
46};
47
48/*
49 * struct rcar_du_device_info - DU model-specific information
50 * @features: device features (RCAR_DU_FEATURE_*)
51 * @num_crtcs: total number of CRTCs
52 * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
53 * @num_lvds: number of internal LVDS encoders
54 */
55struct rcar_du_device_info {
56 unsigned int features;
57 unsigned int num_crtcs;
58 struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
59 unsigned int num_lvds;
60};
27 61
28struct rcar_du_device { 62struct rcar_du_device {
29 struct device *dev; 63 struct device *dev;
30 const struct rcar_du_platform_data *pdata; 64 const struct rcar_du_platform_data *pdata;
65 const struct rcar_du_device_info *info;
31 66
32 void __iomem *mmio; 67 void __iomem *mmio;
33 struct clk *clock;
34 unsigned int use_count;
35 68
36 struct drm_device *ddev; 69 struct drm_device *ddev;
70 struct drm_fbdev_cma *fbdev;
37 71
38 struct rcar_du_crtc crtcs[2]; 72 struct rcar_du_crtc crtcs[3];
39 unsigned int used_crtcs;
40 unsigned int num_crtcs; 73 unsigned int num_crtcs;
41 74
42 struct { 75 struct rcar_du_group groups[2];
43 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
44 unsigned int free;
45 struct mutex lock;
46 76
47 struct drm_property *alpha; 77 unsigned int dpad0_source;
48 struct drm_property *colorkey; 78 struct rcar_du_lvdsenc *lvds[2];
49 struct drm_property *zpos;
50 } planes;
51}; 79};
52 80
53int rcar_du_get(struct rcar_du_device *rcdu); 81static inline bool rcar_du_has(struct rcar_du_device *rcdu,
54void rcar_du_put(struct rcar_du_device *rcdu); 82 unsigned int feature)
83{
84 return rcdu->info->features & feature;
85}
55 86
56static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) 87static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
57{ 88{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644
index 000000000000..3daa7a168dc6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -0,0 +1,202 @@
1/*
2 * rcar_du_encoder.c -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/export.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h"
23#include "rcar_du_lvdscon.h"
24#include "rcar_du_lvdsenc.h"
25#include "rcar_du_vgacon.h"
26
27/* -----------------------------------------------------------------------------
28 * Common connector functions
29 */
30
31struct drm_encoder *
32rcar_du_connector_best_encoder(struct drm_connector *connector)
33{
34 struct rcar_du_connector *rcon = to_rcar_connector(connector);
35
36 return &rcon->encoder->encoder;
37}
38
39/* -----------------------------------------------------------------------------
40 * Encoder
41 */
42
43static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
44{
45 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
46
47 if (renc->lvds)
48 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
49}
50
51static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode)
54{
55 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
56 const struct drm_display_mode *panel_mode;
57 struct drm_device *dev = encoder->dev;
58 struct drm_connector *connector;
59 bool found = false;
60
61 /* DAC encoders have currently no restriction on the mode. */
62 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
63 return true;
64
65 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
66 if (connector->encoder == encoder) {
67 found = true;
68 break;
69 }
70 }
71
72 if (!found) {
73 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
74 return false;
75 }
76
77 if (list_empty(&connector->modes)) {
78 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
79 return false;
80 }
81
82 panel_mode = list_first_entry(&connector->modes,
83 struct drm_display_mode, head);
84
85 /* We're not allowed to modify the resolution. */
86 if (mode->hdisplay != panel_mode->hdisplay ||
87 mode->vdisplay != panel_mode->vdisplay)
88 return false;
89
90 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
91 drm_mode_copy(adjusted_mode, panel_mode);
92
93 /* The internal LVDS encoder has a clock frequency operating range of
94 * 30MHz to 150MHz. Clamp the clock accordingly.
95 */
96 if (renc->lvds)
97 adjusted_mode->clock = clamp(adjusted_mode->clock,
98 30000, 150000);
99
100 return true;
101}
102
103static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
104{
105 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
106
107 if (renc->lvds)
108 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
109 DRM_MODE_DPMS_OFF);
110}
111
112static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
113{
114 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
115
116 if (renc->lvds)
117 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
118 DRM_MODE_DPMS_ON);
119}
120
121static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
122 struct drm_display_mode *mode,
123 struct drm_display_mode *adjusted_mode)
124{
125 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
126
127 rcar_du_crtc_route_output(encoder->crtc, renc->output);
128}
129
130static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
131 .dpms = rcar_du_encoder_dpms,
132 .mode_fixup = rcar_du_encoder_mode_fixup,
133 .prepare = rcar_du_encoder_mode_prepare,
134 .commit = rcar_du_encoder_mode_commit,
135 .mode_set = rcar_du_encoder_mode_set,
136};
137
138static const struct drm_encoder_funcs encoder_funcs = {
139 .destroy = drm_encoder_cleanup,
140};
141
142int rcar_du_encoder_init(struct rcar_du_device *rcdu,
143 enum rcar_du_encoder_type type,
144 enum rcar_du_output output,
145 const struct rcar_du_encoder_data *data)
146{
147 struct rcar_du_encoder *renc;
148 unsigned int encoder_type;
149 int ret;
150
151 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
152 if (renc == NULL)
153 return -ENOMEM;
154
155 renc->output = output;
156
157 switch (output) {
158 case RCAR_DU_OUTPUT_LVDS0:
159 renc->lvds = rcdu->lvds[0];
160 break;
161
162 case RCAR_DU_OUTPUT_LVDS1:
163 renc->lvds = rcdu->lvds[1];
164 break;
165
166 default:
167 break;
168 }
169
170 switch (type) {
171 case RCAR_DU_ENCODER_VGA:
172 encoder_type = DRM_MODE_ENCODER_DAC;
173 break;
174 case RCAR_DU_ENCODER_LVDS:
175 encoder_type = DRM_MODE_ENCODER_LVDS;
176 break;
177 case RCAR_DU_ENCODER_NONE:
178 default:
179 /* No external encoder, use the internal encoder type. */
180 encoder_type = rcdu->info->routes[output].encoder_type;
181 break;
182 }
183
184 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
185 encoder_type);
186 if (ret < 0)
187 return ret;
188
189 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
190
191 switch (encoder_type) {
192 case DRM_MODE_ENCODER_LVDS:
193 return rcar_du_lvds_connector_init(rcdu, renc,
194 &data->connector.lvds.panel);
195
196 case DRM_MODE_ENCODER_DAC:
197 return rcar_du_vga_connector_init(rcdu, renc);
198
199 default:
200 return -EINVAL;
201 }
202}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644
index 000000000000..0e5a65e45d0e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -0,0 +1,49 @@
1/*
2 * rcar_du_encoder.h -- R-Car Display Unit Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_ENCODER_H__
15#define __RCAR_DU_ENCODER_H__
16
17#include <linux/platform_data/rcar-du.h>
18
19#include <drm/drm_crtc.h>
20
21struct rcar_du_device;
22struct rcar_du_lvdsenc;
23
24struct rcar_du_encoder {
25 struct drm_encoder encoder;
26 enum rcar_du_output output;
27 struct rcar_du_lvdsenc *lvds;
28};
29
30#define to_rcar_encoder(e) \
31 container_of(e, struct rcar_du_encoder, encoder)
32
33struct rcar_du_connector {
34 struct drm_connector connector;
35 struct rcar_du_encoder *encoder;
36};
37
38#define to_rcar_connector(c) \
39 container_of(c, struct rcar_du_connector, connector)
40
41struct drm_encoder *
42rcar_du_connector_best_encoder(struct drm_connector *connector);
43
44int rcar_du_encoder_init(struct rcar_du_device *rcdu,
45 enum rcar_du_encoder_type type,
46 enum rcar_du_output output,
47 const struct rcar_du_encoder_data *data);
48
49#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644
index 000000000000..eb53cd97e8c6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -0,0 +1,187 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Channels Pair
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14/*
15 * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
16 * unit, timings generator, ...) and device-global resources (start/stop
17 * control, planes, ...) shared between the two CRTCs.
18 *
19 * The R8A7790 introduced a third CRTC with its own set of global resources.
20 * This would be modeled as two separate DU device instances if it wasn't for
21 * a handful or resources that are shared between the three CRTCs (mostly
22 * related to input and output routing). For this reason the R8A7790 DU must be
23 * modeled as a single device with three CRTCs, two sets of "semi-global"
24 * resources, and a few device-global resources.
25 *
26 * The rcar_du_group object is a driver specific object, without any real
27 * counterpart in the DU documentation, that models those semi-global resources.
28 */
29
30#include <linux/clk.h>
31#include <linux/io.h>
32
33#include "rcar_du_drv.h"
34#include "rcar_du_group.h"
35#include "rcar_du_regs.h"
36
37u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
38{
39 return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
40}
41
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
43{
44 rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
45}
46
47static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
48{
49 u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
50
51 if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
52 return;
53
54 /* The DEFR8 register for the first group also controls RGB output
55 * routing to DPAD0
56 */
57 if (rgrp->index == 0)
58 defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
59
60 rcar_du_group_write(rgrp, DEFR8, defr8);
61}
62
63static void rcar_du_group_setup(struct rcar_du_group *rgrp)
64{
65 /* Enable extended features */
66 rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
67 rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
68 rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
69 rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
70 rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
71
72 rcar_du_group_setup_defr8(rgrp);
73
74 /* Use DS1PR and DS2PR to configure planes priorities and connects the
75 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
76 */
77 rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
78}
79
80/*
81 * rcar_du_group_get - Acquire a reference to the DU channels group
82 *
83 * Acquiring the first reference setups core registers. A reference must be held
84 * before accessing any hardware registers.
85 *
86 * This function must be called with the DRM mode_config lock held.
87 *
88 * Return 0 in case of success or a negative error code otherwise.
89 */
90int rcar_du_group_get(struct rcar_du_group *rgrp)
91{
92 if (rgrp->use_count)
93 goto done;
94
95 rcar_du_group_setup(rgrp);
96
97done:
98 rgrp->use_count++;
99 return 0;
100}
101
102/*
103 * rcar_du_group_put - Release a reference to the DU
104 *
105 * This function must be called with the DRM mode_config lock held.
106 */
107void rcar_du_group_put(struct rcar_du_group *rgrp)
108{
109 --rgrp->use_count;
110}
111
112static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
113{
114 rcar_du_group_write(rgrp, DSYSR,
115 (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
116 (start ? DSYSR_DEN : DSYSR_DRES));
117}
118
119void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
120{
121 /* Many of the configuration bits are only updated when the display
122 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
123 * of those bits could be pre-configured, but others (especially the
124 * bits related to plane assignment to display timing controllers) need
125 * to be modified at runtime.
126 *
127 * Restart the display controller if a start is requested. Sorry for the
128 * flicker. It should be possible to move most of the "DRES-update" bits
129 * setup to driver initialization time and minimize the number of cases
130 * when the display controller will have to be restarted.
131 */
132 if (start) {
133 if (rgrp->used_crtcs++ != 0)
134 __rcar_du_group_start_stop(rgrp, false);
135 __rcar_du_group_start_stop(rgrp, true);
136 } else {
137 if (--rgrp->used_crtcs == 0)
138 __rcar_du_group_start_stop(rgrp, false);
139 }
140}
141
142void rcar_du_group_restart(struct rcar_du_group *rgrp)
143{
144 __rcar_du_group_start_stop(rgrp, false);
145 __rcar_du_group_start_stop(rgrp, true);
146}
147
148static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
149{
150 int ret;
151
152 /* RGB output routing to DPAD0 is configured in the DEFR8 register of
153 * the first group. As this function can be called with the DU0 and DU1
154 * CRTCs disabled, we need to enable the first group clock before
155 * accessing the register.
156 */
157 ret = clk_prepare_enable(rcdu->crtcs[0].clock);
158 if (ret < 0)
159 return ret;
160
161 rcar_du_group_setup_defr8(&rcdu->groups[0]);
162
163 clk_disable_unprepare(rcdu->crtcs[0].clock);
164
165 return 0;
166}
167
168int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
169{
170 struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
171 u32 dorcr = rcar_du_group_read(rgrp, DORCR);
172
173 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
174
175 /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
176 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
177 * by default.
178 */
179 if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
180 dorcr |= DORCR_PG2D_DS1;
181 else
182 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
183
184 rcar_du_group_write(rgrp, DORCR, dorcr);
185
186 return rcar_du_set_dpad0_routing(rgrp->dev);
187}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644
index 000000000000..5025930972ec
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -0,0 +1,50 @@
1/*
2 * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_GROUP_H__
15#define __RCAR_DU_GROUP_H__
16
17#include "rcar_du_plane.h"
18
19struct rcar_du_device;
20
21/*
22 * struct rcar_du_group - CRTCs and planes group
23 * @dev: the DU device
24 * @mmio_offset: registers offset in the device memory map
25 * @index: group index
26 * @use_count: number of users of the group (rcar_du_group_(get|put))
27 * @used_crtcs: number of CRTCs currently in use
28 * @planes: planes handled by the group
29 */
30struct rcar_du_group {
31 struct rcar_du_device *dev;
32 unsigned int mmio_offset;
33 unsigned int index;
34
35 unsigned int use_count;
36 unsigned int used_crtcs;
37
38 struct rcar_du_planes planes;
39};
40
41u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
42void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
43
44int rcar_du_group_get(struct rcar_du_group *rgrp);
45void rcar_du_group_put(struct rcar_du_group *rgrp);
46void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
47void rcar_du_group_restart(struct rcar_du_group *rgrp);
48int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
49
50#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index d30c2e29bee2..b31ac080c4a7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,10 +19,10 @@
19 19
20#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
21#include "rcar_du_drv.h" 21#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h"
22#include "rcar_du_kms.h" 23#include "rcar_du_kms.h"
23#include "rcar_du_lvds.h" 24#include "rcar_du_lvdsenc.h"
24#include "rcar_du_regs.h" 25#include "rcar_du_regs.h"
25#include "rcar_du_vga.h"
26 26
27/* ----------------------------------------------------------------------------- 27/* -----------------------------------------------------------------------------
28 * Format helpers 28 * Format helpers
@@ -106,46 +106,24 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
106} 106}
107 107
108/* ----------------------------------------------------------------------------- 108/* -----------------------------------------------------------------------------
109 * Common connector and encoder functions
110 */
111
112struct drm_encoder *
113rcar_du_connector_best_encoder(struct drm_connector *connector)
114{
115 struct rcar_du_connector *rcon = to_rcar_connector(connector);
116
117 return &rcon->encoder->encoder;
118}
119
120void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
121{
122}
123
124void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
125 struct drm_display_mode *mode,
126 struct drm_display_mode *adjusted_mode)
127{
128 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
129
130 rcar_du_crtc_route_output(encoder->crtc, renc->output);
131}
132
133void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
134{
135}
136
137/* -----------------------------------------------------------------------------
138 * Frame buffer 109 * Frame buffer
139 */ 110 */
140 111
141int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 112int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
142 struct drm_mode_create_dumb *args) 113 struct drm_mode_create_dumb *args)
143{ 114{
115 struct rcar_du_device *rcdu = dev->dev_private;
144 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 116 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
145 unsigned int align; 117 unsigned int align;
146 118
147 /* The pitch must be aligned to a 16 pixels boundary. */ 119 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
148 align = 16 * args->bpp / 8; 120 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
121 */
122 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
123 align = 128;
124 else
125 align = 16 * args->bpp / 8;
126
149 args->pitch = roundup(max(args->pitch, min_pitch), align); 127 args->pitch = roundup(max(args->pitch, min_pitch), align);
150 128
151 return drm_gem_cma_dumb_create(file, dev, args); 129 return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
155rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 133rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
156 struct drm_mode_fb_cmd2 *mode_cmd) 134 struct drm_mode_fb_cmd2 *mode_cmd)
157{ 135{
136 struct rcar_du_device *rcdu = dev->dev_private;
158 const struct rcar_du_format_info *format; 137 const struct rcar_du_format_info *format;
159 unsigned int align; 138 unsigned int align;
160 139
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
165 return ERR_PTR(-EINVAL); 144 return ERR_PTR(-EINVAL);
166 } 145 }
167 146
168 align = 16 * format->bpp / 8; 147 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
148 align = 128;
149 else
150 align = 16 * format->bpp / 8;
169 151
170 if (mode_cmd->pitches[0] & (align - 1) || 152 if (mode_cmd->pitches[0] & (align - 1) ||
171 mode_cmd->pitches[0] >= 8192) { 153 mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
185 return drm_fb_cma_create(dev, file_priv, mode_cmd); 167 return drm_fb_cma_create(dev, file_priv, mode_cmd);
186} 168}
187 169
170static void rcar_du_output_poll_changed(struct drm_device *dev)
171{
172 struct rcar_du_device *rcdu = dev->dev_private;
173
174 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
175}
176
188static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { 177static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
189 .fb_create = rcar_du_fb_create, 178 .fb_create = rcar_du_fb_create,
179 .output_poll_changed = rcar_du_output_poll_changed,
190}; 180};
191 181
192int rcar_du_modeset_init(struct rcar_du_device *rcdu) 182int rcar_du_modeset_init(struct rcar_du_device *rcdu)
193{ 183{
184 static const unsigned int mmio_offsets[] = {
185 DU0_REG_OFFSET, DU2_REG_OFFSET
186 };
187
194 struct drm_device *dev = rcdu->ddev; 188 struct drm_device *dev = rcdu->ddev;
195 struct drm_encoder *encoder; 189 struct drm_encoder *encoder;
190 struct drm_fbdev_cma *fbdev;
191 unsigned int num_groups;
196 unsigned int i; 192 unsigned int i;
197 int ret; 193 int ret;
198 194
199 drm_mode_config_init(rcdu->ddev); 195 drm_mode_config_init(dev);
200 196
201 rcdu->ddev->mode_config.min_width = 0; 197 dev->mode_config.min_width = 0;
202 rcdu->ddev->mode_config.min_height = 0; 198 dev->mode_config.min_height = 0;
203 rcdu->ddev->mode_config.max_width = 4095; 199 dev->mode_config.max_width = 4095;
204 rcdu->ddev->mode_config.max_height = 2047; 200 dev->mode_config.max_height = 2047;
205 rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs; 201 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
206 202
207 ret = rcar_du_plane_init(rcdu); 203 rcdu->num_crtcs = rcdu->info->num_crtcs;
208 if (ret < 0) 204
209 return ret; 205 /* Initialize the groups. */
206 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
207
208 for (i = 0; i < num_groups; ++i) {
209 struct rcar_du_group *rgrp = &rcdu->groups[i];
210
211 rgrp->dev = rcdu;
212 rgrp->mmio_offset = mmio_offsets[i];
213 rgrp->index = i;
214
215 ret = rcar_du_planes_init(rgrp);
216 if (ret < 0)
217 return ret;
218 }
219
220 /* Create the CRTCs. */
221 for (i = 0; i < rcdu->num_crtcs; ++i) {
222 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
210 223
211 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) { 224 ret = rcar_du_crtc_create(rgrp, i);
212 ret = rcar_du_crtc_create(rcdu, i);
213 if (ret < 0) 225 if (ret < 0)
214 return ret; 226 return ret;
215 } 227 }
216 228
217 rcdu->used_crtcs = 0; 229 /* Initialize the encoders. */
218 rcdu->num_crtcs = i; 230 ret = rcar_du_lvdsenc_init(rcdu);
231 if (ret < 0)
232 return ret;
219 233
220 for (i = 0; i < rcdu->pdata->num_encoders; ++i) { 234 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
221 const struct rcar_du_encoder_data *pdata = 235 const struct rcar_du_encoder_data *pdata =
222 &rcdu->pdata->encoders[i]; 236 &rcdu->pdata->encoders[i];
237 const struct rcar_du_output_routing *route =
238 &rcdu->info->routes[pdata->output];
239
240 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
241 continue;
223 242
224 if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) { 243 if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
244 route->possible_crtcs == 0) {
225 dev_warn(rcdu->dev, 245 dev_warn(rcdu->dev,
226 "encoder %u references unexisting output %u, skipping\n", 246 "encoder %u references unexisting output %u, skipping\n",
227 i, pdata->output); 247 i, pdata->output);
228 continue; 248 continue;
229 } 249 }
230 250
231 switch (pdata->encoder) { 251 rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
232 case RCAR_DU_ENCODER_VGA:
233 rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
234 break;
235
236 case RCAR_DU_ENCODER_LVDS:
237 rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
238 break;
239
240 default:
241 break;
242 }
243 } 252 }
244 253
245 /* Set the possible CRTCs and possible clones. All encoders can be 254 /* Set the possible CRTCs and possible clones. There's always at least
246 * driven by the CRTC associated with the output they're connected to, 255 * one way for all encoders to clone each other, set all bits in the
247 * as well as by CRTC 0. 256 * possible clones field.
248 */ 257 */
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 258 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 259 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
260 const struct rcar_du_output_routing *route =
261 &rcdu->info->routes[renc->output];
251 262
252 encoder->possible_crtcs = (1 << 0) | (1 << renc->output); 263 encoder->possible_crtcs = route->possible_crtcs;
253 encoder->possible_clones = 1 << 0; 264 encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
254 } 265 }
255 266
256 ret = rcar_du_plane_register(rcdu); 267 /* Now that the CRTCs have been initialized register the planes. */
257 if (ret < 0) 268 for (i = 0; i < num_groups; ++i) {
258 return ret; 269 ret = rcar_du_planes_register(&rcdu->groups[i]);
270 if (ret < 0)
271 return ret;
272 }
273
274 drm_kms_helper_poll_init(dev);
275
276 drm_helper_disable_unused_functions(dev);
277
278 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
279 dev->mode_config.num_connector);
280 if (IS_ERR(fbdev))
281 return PTR_ERR(fbdev);
259 282
260 drm_kms_helper_poll_init(rcdu->ddev); 283#ifndef CONFIG_FRAMEBUFFER_CONSOLE
284 drm_fbdev_cma_restore_mode(fbdev);
285#endif
261 286
262 drm_helper_disable_unused_functions(rcdu->ddev); 287 rcdu->fbdev = fbdev;
263 288
264 return 0; 289 return 0;
265} 290}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index dba472263486..5750e6af5655 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -16,8 +16,9 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19#include <drm/drm_crtc.h> 19struct drm_file;
20 20struct drm_device;
21struct drm_mode_create_dumb;
21struct rcar_du_device; 22struct rcar_du_device;
22 23
23struct rcar_du_format_info { 24struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
28 unsigned int edf; 29 unsigned int edf;
29}; 30};
30 31
31struct rcar_du_encoder {
32 struct drm_encoder encoder;
33 unsigned int output;
34};
35
36#define to_rcar_encoder(e) \
37 container_of(e, struct rcar_du_encoder, encoder)
38
39struct rcar_du_connector {
40 struct drm_connector connector;
41 struct rcar_du_encoder *encoder;
42};
43
44#define to_rcar_connector(c) \
45 container_of(c, struct rcar_du_connector, connector)
46
47const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc); 32const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
48 33
49struct drm_encoder *
50rcar_du_connector_best_encoder(struct drm_connector *connector);
51void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
52void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
53 struct drm_display_mode *mode,
54 struct drm_display_mode *adjusted_mode);
55void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
56
57int rcar_du_modeset_init(struct rcar_du_device *rcdu); 34int rcar_du_modeset_init(struct rcar_du_device *rcdu);
58 35
59int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, 36int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1d..4f3ba93cd91d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,8 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_lvds.h" 21#include "rcar_du_lvdscon.h"
21 22
22struct rcar_du_lvds_connector { 23struct rcar_du_lvds_connector {
23 struct rcar_du_connector connector; 24 struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
28#define to_rcar_lvds_connector(c) \ 29#define to_rcar_lvds_connector(c) \
29 container_of(c, struct rcar_du_lvds_connector, connector.connector) 30 container_of(c, struct rcar_du_lvds_connector, connector.connector)
30 31
31/* -----------------------------------------------------------------------------
32 * Connector
33 */
34
35static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) 32static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
36{ 33{
37 struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector); 34 struct rcar_du_lvds_connector *lvdscon =
35 to_rcar_lvds_connector(connector);
38 struct drm_display_mode *mode; 36 struct drm_display_mode *mode;
39 37
40 mode = drm_mode_create(connector->dev); 38 mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
90 .destroy = rcar_du_lvds_connector_destroy, 88 .destroy = rcar_du_lvds_connector_destroy,
91}; 89};
92 90
93static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 91int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
94 struct rcar_du_encoder *renc, 92 struct rcar_du_encoder *renc,
95 const struct rcar_du_panel_data *panel) 93 const struct rcar_du_panel_data *panel)
96{ 94{
97 struct rcar_du_lvds_connector *lvdscon; 95 struct rcar_du_lvds_connector *lvdscon;
98 struct drm_connector *connector; 96 struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
131 129
132 return 0; 130 return 0;
133} 131}
134
135/* -----------------------------------------------------------------------------
136 * Encoder
137 */
138
139static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
140{
141}
142
143static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
144 const struct drm_display_mode *mode,
145 struct drm_display_mode *adjusted_mode)
146{
147 const struct drm_display_mode *panel_mode;
148 struct drm_device *dev = encoder->dev;
149 struct drm_connector *connector;
150 bool found = false;
151
152 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
153 if (connector->encoder == encoder) {
154 found = true;
155 break;
156 }
157 }
158
159 if (!found) {
160 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
161 return false;
162 }
163
164 if (list_empty(&connector->modes)) {
165 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
166 return false;
167 }
168
169 panel_mode = list_first_entry(&connector->modes,
170 struct drm_display_mode, head);
171
172 /* We're not allowed to modify the resolution. */
173 if (mode->hdisplay != panel_mode->hdisplay ||
174 mode->vdisplay != panel_mode->vdisplay)
175 return false;
176
177 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
178 drm_mode_copy(adjusted_mode, panel_mode);
179
180 return true;
181}
182
183static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
184 .dpms = rcar_du_lvds_encoder_dpms,
185 .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
186 .prepare = rcar_du_encoder_mode_prepare,
187 .commit = rcar_du_encoder_mode_commit,
188 .mode_set = rcar_du_encoder_mode_set,
189};
190
191static const struct drm_encoder_funcs encoder_funcs = {
192 .destroy = drm_encoder_cleanup,
193};
194
195int rcar_du_lvds_init(struct rcar_du_device *rcdu,
196 const struct rcar_du_encoder_lvds_data *data,
197 unsigned int output)
198{
199 struct rcar_du_encoder *renc;
200 int ret;
201
202 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
203 if (renc == NULL)
204 return -ENOMEM;
205
206 renc->output = output;
207
208 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
209 DRM_MODE_ENCODER_LVDS);
210 if (ret < 0)
211 return ret;
212
213 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
214
215 return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
216}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e103..bff8683699ca 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector 2 * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,15 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_LVDS_H__ 14#ifndef __RCAR_DU_LVDSCON_H__
15#define __RCAR_DU_LVDS_H__ 15#define __RCAR_DU_LVDSCON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_lvds_data; 18struct rcar_du_encoder;
19struct rcar_du_panel_data;
19 20
20int rcar_du_lvds_init(struct rcar_du_device *rcdu, 21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_lvds_data *data, 22 struct rcar_du_encoder *renc,
22 unsigned int output); 23 const struct rcar_du_panel_data *panel);
23 24
24#endif /* __RCAR_DU_LVDS_H__ */ 25#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644
index 000000000000..a0f6a1781925
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -0,0 +1,196 @@
1/*
2 * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_lvdsenc.h"
23#include "rcar_lvds_regs.h"
24
25struct rcar_du_lvdsenc {
26 struct rcar_du_device *dev;
27
28 unsigned int index;
29 void __iomem *mmio;
30 struct clk *clock;
31 int dpms;
32
33 enum rcar_lvds_input input;
34};
35
36static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
37{
38 iowrite32(data, lvds->mmio + reg);
39}
40
41static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
42 struct rcar_du_crtc *rcrtc)
43{
44 const struct drm_display_mode *mode = &rcrtc->crtc.mode;
45 unsigned int freq = mode->clock;
46 u32 lvdcr0;
47 u32 pllcr;
48 int ret;
49
50 if (lvds->dpms == DRM_MODE_DPMS_ON)
51 return 0;
52
53 ret = clk_prepare_enable(lvds->clock);
54 if (ret < 0)
55 return ret;
56
57 /* PLL clock configuration */
58 if (freq <= 38000)
59 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
60 else if (freq <= 60000)
61 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
62 else if (freq <= 121000)
63 pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
64 else
65 pllcr = LVDPLLCR_PLLDLYCNT_150M;
66
67 rcar_lvds_write(lvds, LVDPLLCR, pllcr);
68
69 /* Hardcode the channels and control signals routing for now.
70 *
71 * HSYNC -> CTRL0
72 * VSYNC -> CTRL1
73 * DISP -> CTRL2
74 * 0 -> CTRL3
75 *
76 * Channels 1 and 3 are switched on ES1.
77 */
78 rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
79 LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
80 LVDCTRCR_CTR0SEL_HSYNC);
81 rcar_lvds_write(lvds, LVDCHCR,
82 LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
83 LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
84
85 /* Select the input, hardcode mode 0, enable LVDS operation and turn
86 * bias circuitry on.
87 */
88 lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
89 if (rcrtc->index == 2)
90 lvdcr0 |= LVDCR0_DUSEL;
91 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
92
93 /* Turn all the channels on. */
94 rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
95 LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
96
97 /* Turn the PLL on, wait for the startup delay, and turn the output
98 * on.
99 */
100 lvdcr0 |= LVDCR0_PLLEN;
101 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
102
103 usleep_range(100, 150);
104
105 lvdcr0 |= LVDCR0_LVRES;
106 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
107
108 lvds->dpms = DRM_MODE_DPMS_ON;
109 return 0;
110}
111
112static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
113{
114 if (lvds->dpms == DRM_MODE_DPMS_OFF)
115 return;
116
117 rcar_lvds_write(lvds, LVDCR0, 0);
118 rcar_lvds_write(lvds, LVDCR1, 0);
119
120 clk_disable_unprepare(lvds->clock);
121
122 lvds->dpms = DRM_MODE_DPMS_OFF;
123}
124
125int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
126 struct drm_crtc *crtc, int mode)
127{
128 if (mode == DRM_MODE_DPMS_OFF) {
129 rcar_du_lvdsenc_stop(lvds);
130 return 0;
131 } else if (crtc) {
132 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
133 return rcar_du_lvdsenc_start(lvds, rcrtc);
134 } else
135 return -EINVAL;
136}
137
138static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
139 struct platform_device *pdev)
140{
141 struct resource *mem;
142 char name[7];
143
144 sprintf(name, "lvds.%u", lvds->index);
145
146 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
147 if (mem == NULL) {
148 dev_err(&pdev->dev, "failed to get memory resource for %s\n",
149 name);
150 return -EINVAL;
151 }
152
153 lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
154 if (lvds->mmio == NULL) {
155 dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
156 name);
157 return -ENOMEM;
158 }
159
160 lvds->clock = devm_clk_get(&pdev->dev, name);
161 if (IS_ERR(lvds->clock)) {
162 dev_err(&pdev->dev, "failed to get clock for %s\n", name);
163 return PTR_ERR(lvds->clock);
164 }
165
166 return 0;
167}
168
169int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
170{
171 struct platform_device *pdev = to_platform_device(rcdu->dev);
172 struct rcar_du_lvdsenc *lvds;
173 unsigned int i;
174 int ret;
175
176 for (i = 0; i < rcdu->info->num_lvds; ++i) {
177 lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
178 if (lvds == NULL) {
179 dev_err(&pdev->dev, "failed to allocate private data\n");
180 return -ENOMEM;
181 }
182
183 lvds->dev = rcdu;
184 lvds->index = i;
185 lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
186 lvds->dpms = DRM_MODE_DPMS_OFF;
187
188 ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
189 if (ret < 0)
190 return ret;
191
192 rcdu->lvds[i] = lvds;
193 }
194
195 return 0;
196}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644
index 000000000000..7051c6de19ae
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -0,0 +1,46 @@
1/*
2 * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_LVDSENC_H__
15#define __RCAR_DU_LVDSENC_H__
16
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/platform_data/rcar-du.h>
20
21struct rcar_drm_crtc;
22struct rcar_du_lvdsenc;
23
24enum rcar_lvds_input {
25 RCAR_LVDS_INPUT_DU0,
26 RCAR_LVDS_INPUT_DU1,
27 RCAR_LVDS_INPUT_DU2,
28};
29
30#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
31int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
32int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
33 struct drm_crtc *crtc, int mode);
34#else
35static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
36{
37 return 0;
38}
39static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
40 struct drm_crtc *crtc, int mode)
41{
42 return 0;
43}
44#endif
45
46#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a65f81ddf51d..53000644733f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane; 36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
37} 37}
38 38
39static u32 rcar_du_plane_read(struct rcar_du_device *rcdu, 39static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
40 unsigned int index, u32 reg) 40 unsigned int index, u32 reg)
41{ 41{
42 return rcar_du_read(rcdu, index * PLANE_OFF + reg); 42 return rcar_du_read(rgrp->dev,
43 rgrp->mmio_offset + index * PLANE_OFF + reg);
43} 44}
44 45
45static void rcar_du_plane_write(struct rcar_du_device *rcdu, 46static void rcar_du_plane_write(struct rcar_du_group *rgrp,
46 unsigned int index, u32 reg, u32 data) 47 unsigned int index, u32 reg, u32 data)
47{ 48{
48 rcar_du_write(rcdu, index * PLANE_OFF + reg, data); 49 rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
50 data);
49} 51}
50 52
51int rcar_du_plane_reserve(struct rcar_du_plane *plane, 53int rcar_du_plane_reserve(struct rcar_du_plane *plane,
52 const struct rcar_du_format_info *format) 54 const struct rcar_du_format_info *format)
53{ 55{
54 struct rcar_du_device *rcdu = plane->dev; 56 struct rcar_du_group *rgrp = plane->group;
55 unsigned int i; 57 unsigned int i;
56 int ret = -EBUSY; 58 int ret = -EBUSY;
57 59
58 mutex_lock(&rcdu->planes.lock); 60 mutex_lock(&rgrp->planes.lock);
59 61
60 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 62 for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
61 if (!(rcdu->planes.free & (1 << i))) 63 if (!(rgrp->planes.free & (1 << i)))
62 continue; 64 continue;
63 65
64 if (format->planes == 1 || 66 if (format->planes == 1 ||
65 rcdu->planes.free & (1 << ((i + 1) % 8))) 67 rgrp->planes.free & (1 << ((i + 1) % 8)))
66 break; 68 break;
67 } 69 }
68 70
69 if (i == ARRAY_SIZE(rcdu->planes.planes)) 71 if (i == ARRAY_SIZE(rgrp->planes.planes))
70 goto done; 72 goto done;
71 73
72 rcdu->planes.free &= ~(1 << i); 74 rgrp->planes.free &= ~(1 << i);
73 if (format->planes == 2) 75 if (format->planes == 2)
74 rcdu->planes.free &= ~(1 << ((i + 1) % 8)); 76 rgrp->planes.free &= ~(1 << ((i + 1) % 8));
75 77
76 plane->hwindex = i; 78 plane->hwindex = i;
77 79
78 ret = 0; 80 ret = 0;
79 81
80done: 82done:
81 mutex_unlock(&rcdu->planes.lock); 83 mutex_unlock(&rgrp->planes.lock);
82 return ret; 84 return ret;
83} 85}
84 86
85void rcar_du_plane_release(struct rcar_du_plane *plane) 87void rcar_du_plane_release(struct rcar_du_plane *plane)
86{ 88{
87 struct rcar_du_device *rcdu = plane->dev; 89 struct rcar_du_group *rgrp = plane->group;
88 90
89 if (plane->hwindex == -1) 91 if (plane->hwindex == -1)
90 return; 92 return;
91 93
92 mutex_lock(&rcdu->planes.lock); 94 mutex_lock(&rgrp->planes.lock);
93 rcdu->planes.free |= 1 << plane->hwindex; 95 rgrp->planes.free |= 1 << plane->hwindex;
94 if (plane->format->planes == 2) 96 if (plane->format->planes == 2)
95 rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8); 97 rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
96 mutex_unlock(&rcdu->planes.lock); 98 mutex_unlock(&rgrp->planes.lock);
97 99
98 plane->hwindex = -1; 100 plane->hwindex = -1;
99} 101}
100 102
101void rcar_du_plane_update_base(struct rcar_du_plane *plane) 103void rcar_du_plane_update_base(struct rcar_du_plane *plane)
102{ 104{
103 struct rcar_du_device *rcdu = plane->dev; 105 struct rcar_du_group *rgrp = plane->group;
104 unsigned int index = plane->hwindex; 106 unsigned int index = plane->hwindex;
105 107
106 /* According to the datasheet the Y position is expressed in raster line 108 /* The Y position is expressed in raster line units and must be doubled
107 * units. However, 32bpp formats seem to require a doubled Y position 109 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
108 * value. Similarly, for the second plane, NV12 and NV21 formats seem to 110 * doubling the Y position is found in the R8A7779 datasheet, but the
111 * rule seems to apply there as well.
112 *
113 * Similarly, for the second plane, NV12 and NV21 formats seem to
109 * require a halved Y position value. 114 * require a halved Y position value.
110 */ 115 */
111 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 116 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
112 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 117 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
113 (plane->format->bpp == 32 ? 2 : 1)); 118 (plane->format->bpp == 32 ? 2 : 1));
114 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]); 119 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
115 120
116 if (plane->format->planes == 2) { 121 if (plane->format->planes == 2) {
117 index = (index + 1) % 8; 122 index = (index + 1) % 8;
118 123
119 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x); 124 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
120 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y * 125 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
121 (plane->format->bpp == 16 ? 2 : 1) / 2); 126 (plane->format->bpp == 16 ? 2 : 1) / 2);
122 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]); 127 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
123 } 128 }
124} 129}
125 130
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
140static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, 145static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
141 unsigned int index) 146 unsigned int index)
142{ 147{
143 struct rcar_du_device *rcdu = plane->dev; 148 struct rcar_du_group *rgrp = plane->group;
144 u32 colorkey; 149 u32 colorkey;
145 u32 pnmr; 150 u32 pnmr;
146 151
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
154 * enable alpha-blending regardless of the X bit value. 159 * enable alpha-blending regardless of the X bit value.
155 */ 160 */
156 if (plane->format->fourcc != DRM_FORMAT_XRGB1555) 161 if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
157 rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0); 162 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
158 else 163 else
159 rcar_du_plane_write(rcdu, index, PnALPHAR, 164 rcar_du_plane_write(rgrp, index, PnALPHAR,
160 PnALPHAR_ABIT_X | plane->alpha); 165 PnALPHAR_ABIT_X | plane->alpha);
161 166
162 pnmr = PnMR_BM_MD | plane->format->pnmr; 167 pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
172 if (plane->format->fourcc == DRM_FORMAT_YUYV) 177 if (plane->format->fourcc == DRM_FORMAT_YUYV)
173 pnmr |= PnMR_YCDF_YUYV; 178 pnmr |= PnMR_YCDF_YUYV;
174 179
175 rcar_du_plane_write(rcdu, index, PnMR, pnmr); 180 rcar_du_plane_write(rgrp, index, PnMR, pnmr);
176 181
177 switch (plane->format->fourcc) { 182 switch (plane->format->fourcc) {
178 case DRM_FORMAT_RGB565: 183 case DRM_FORMAT_RGB565:
179 colorkey = ((plane->colorkey & 0xf80000) >> 8) 184 colorkey = ((plane->colorkey & 0xf80000) >> 8)
180 | ((plane->colorkey & 0x00fc00) >> 5) 185 | ((plane->colorkey & 0x00fc00) >> 5)
181 | ((plane->colorkey & 0x0000f8) >> 3); 186 | ((plane->colorkey & 0x0000f8) >> 3);
182 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 187 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
183 break; 188 break;
184 189
185 case DRM_FORMAT_ARGB1555: 190 case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
187 colorkey = ((plane->colorkey & 0xf80000) >> 9) 192 colorkey = ((plane->colorkey & 0xf80000) >> 9)
188 | ((plane->colorkey & 0x00f800) >> 6) 193 | ((plane->colorkey & 0x00f800) >> 6)
189 | ((plane->colorkey & 0x0000f8) >> 3); 194 | ((plane->colorkey & 0x0000f8) >> 3);
190 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey); 195 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
191 break; 196 break;
192 197
193 case DRM_FORMAT_XRGB8888: 198 case DRM_FORMAT_XRGB8888:
194 case DRM_FORMAT_ARGB8888: 199 case DRM_FORMAT_ARGB8888:
195 rcar_du_plane_write(rcdu, index, PnTC3R, 200 rcar_du_plane_write(rgrp, index, PnTC3R,
196 PnTC3R_CODE | (plane->colorkey & 0xffffff)); 201 PnTC3R_CODE | (plane->colorkey & 0xffffff));
197 break; 202 break;
198 } 203 }
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
201static void __rcar_du_plane_setup(struct rcar_du_plane *plane, 206static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
202 unsigned int index) 207 unsigned int index)
203{ 208{
204 struct rcar_du_device *rcdu = plane->dev; 209 struct rcar_du_group *rgrp = plane->group;
205 u32 ddcr2 = PnDDCR2_CODE; 210 u32 ddcr2 = PnDDCR2_CODE;
206 u32 ddcr4; 211 u32 ddcr4;
207 u32 mwr; 212 u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
211 * The data format is selected by the DDDF field in PnMR and the EDF 216 * The data format is selected by the DDDF field in PnMR and the EDF
212 * field in DDCR4. 217 * field in DDCR4.
213 */ 218 */
214 ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4); 219 ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
215 ddcr4 &= ~PnDDCR4_EDF_MASK; 220 ddcr4 &= ~PnDDCR4_EDF_MASK;
216 ddcr4 |= plane->format->edf | PnDDCR4_CODE; 221 ddcr4 |= plane->format->edf | PnDDCR4_CODE;
217 222
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
232 } 237 }
233 } 238 }
234 239
235 rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2); 240 rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
236 rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4); 241 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
237 242
238 /* Memory pitch (expressed in pixels) */ 243 /* Memory pitch (expressed in pixels) */
239 if (plane->format->planes == 2) 244 if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
241 else 246 else
242 mwr = plane->pitch * 8 / plane->format->bpp; 247 mwr = plane->pitch * 8 / plane->format->bpp;
243 248
244 rcar_du_plane_write(rcdu, index, PnMWR, mwr); 249 rcar_du_plane_write(rgrp, index, PnMWR, mwr);
245 250
246 /* Destination position and size */ 251 /* Destination position and size */
247 rcar_du_plane_write(rcdu, index, PnDSXR, plane->width); 252 rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
248 rcar_du_plane_write(rcdu, index, PnDSYR, plane->height); 253 rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
249 rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x); 254 rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
250 rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y); 255 rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
251 256
252 /* Wrap-around and blinking, disabled */ 257 /* Wrap-around and blinking, disabled */
253 rcar_du_plane_write(rcdu, index, PnWASPR, 0); 258 rcar_du_plane_write(rgrp, index, PnWASPR, 0);
254 rcar_du_plane_write(rcdu, index, PnWAMWR, 4095); 259 rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
255 rcar_du_plane_write(rcdu, index, PnBTR, 0); 260 rcar_du_plane_write(rgrp, index, PnBTR, 0);
256 rcar_du_plane_write(rcdu, index, PnMLR, 0); 261 rcar_du_plane_write(rgrp, index, PnMLR, 0);
257} 262}
258 263
259void rcar_du_plane_setup(struct rcar_du_plane *plane) 264void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
273 uint32_t src_w, uint32_t src_h) 278 uint32_t src_w, uint32_t src_h)
274{ 279{
275 struct rcar_du_plane *rplane = to_rcar_plane(plane); 280 struct rcar_du_plane *rplane = to_rcar_plane(plane);
276 struct rcar_du_device *rcdu = plane->dev->dev_private; 281 struct rcar_du_device *rcdu = rplane->group->dev;
277 const struct rcar_du_format_info *format; 282 const struct rcar_du_format_info *format;
278 unsigned int nplanes; 283 unsigned int nplanes;
279 int ret; 284 int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
316 rcar_du_plane_compute_base(rplane, fb); 321 rcar_du_plane_compute_base(rplane, fb);
317 rcar_du_plane_setup(rplane); 322 rcar_du_plane_setup(rplane);
318 323
319 mutex_lock(&rcdu->planes.lock); 324 mutex_lock(&rplane->group->planes.lock);
320 rplane->enabled = true; 325 rplane->enabled = true;
321 rcar_du_crtc_update_planes(rplane->crtc); 326 rcar_du_crtc_update_planes(rplane->crtc);
322 mutex_unlock(&rcdu->planes.lock); 327 mutex_unlock(&rplane->group->planes.lock);
323 328
324 return 0; 329 return 0;
325} 330}
326 331
327static int rcar_du_plane_disable(struct drm_plane *plane) 332static int rcar_du_plane_disable(struct drm_plane *plane)
328{ 333{
329 struct rcar_du_device *rcdu = plane->dev->dev_private;
330 struct rcar_du_plane *rplane = to_rcar_plane(plane); 334 struct rcar_du_plane *rplane = to_rcar_plane(plane);
331 335
332 if (!rplane->enabled) 336 if (!rplane->enabled)
333 return 0; 337 return 0;
334 338
335 mutex_lock(&rcdu->planes.lock); 339 mutex_lock(&rplane->group->planes.lock);
336 rplane->enabled = false; 340 rplane->enabled = false;
337 rcar_du_crtc_update_planes(rplane->crtc); 341 rcar_du_crtc_update_planes(rplane->crtc);
338 mutex_unlock(&rcdu->planes.lock); 342 mutex_unlock(&rplane->group->planes.lock);
339 343
340 rcar_du_plane_release(rplane); 344 rcar_du_plane_release(rplane);
341 345
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
377static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, 381static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
378 unsigned int zpos) 382 unsigned int zpos)
379{ 383{
380 struct rcar_du_device *rcdu = plane->dev; 384 mutex_lock(&plane->group->planes.lock);
381
382 mutex_lock(&rcdu->planes.lock);
383 if (plane->zpos == zpos) 385 if (plane->zpos == zpos)
384 goto done; 386 goto done;
385 387
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
390 rcar_du_crtc_update_planes(plane->crtc); 392 rcar_du_crtc_update_planes(plane->crtc);
391 393
392done: 394done:
393 mutex_unlock(&rcdu->planes.lock); 395 mutex_unlock(&plane->group->planes.lock);
394} 396}
395 397
396static int rcar_du_plane_set_property(struct drm_plane *plane, 398static int rcar_du_plane_set_property(struct drm_plane *plane,
397 struct drm_property *property, 399 struct drm_property *property,
398 uint64_t value) 400 uint64_t value)
399{ 401{
400 struct rcar_du_device *rcdu = plane->dev->dev_private;
401 struct rcar_du_plane *rplane = to_rcar_plane(plane); 402 struct rcar_du_plane *rplane = to_rcar_plane(plane);
403 struct rcar_du_group *rgrp = rplane->group;
402 404
403 if (property == rcdu->planes.alpha) 405 if (property == rgrp->planes.alpha)
404 rcar_du_plane_set_alpha(rplane, value); 406 rcar_du_plane_set_alpha(rplane, value);
405 else if (property == rcdu->planes.colorkey) 407 else if (property == rgrp->planes.colorkey)
406 rcar_du_plane_set_colorkey(rplane, value); 408 rcar_du_plane_set_colorkey(rplane, value);
407 else if (property == rcdu->planes.zpos) 409 else if (property == rgrp->planes.zpos)
408 rcar_du_plane_set_zpos(rplane, value); 410 rcar_du_plane_set_zpos(rplane, value);
409 else 411 else
410 return -EINVAL; 412 return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
432 DRM_FORMAT_NV16, 434 DRM_FORMAT_NV16,
433}; 435};
434 436
435int rcar_du_plane_init(struct rcar_du_device *rcdu) 437int rcar_du_planes_init(struct rcar_du_group *rgrp)
436{ 438{
439 struct rcar_du_planes *planes = &rgrp->planes;
440 struct rcar_du_device *rcdu = rgrp->dev;
437 unsigned int i; 441 unsigned int i;
438 442
439 mutex_init(&rcdu->planes.lock); 443 mutex_init(&planes->lock);
440 rcdu->planes.free = 0xff; 444 planes->free = 0xff;
441 445
442 rcdu->planes.alpha = 446 planes->alpha =
443 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); 447 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
444 if (rcdu->planes.alpha == NULL) 448 if (planes->alpha == NULL)
445 return -ENOMEM; 449 return -ENOMEM;
446 450
447 /* The color key is expressed as an RGB888 triplet stored in a 32-bit 451 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
448 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) 452 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
449 * or enable source color keying (1). 453 * or enable source color keying (1).
450 */ 454 */
451 rcdu->planes.colorkey = 455 planes->colorkey =
452 drm_property_create_range(rcdu->ddev, 0, "colorkey", 456 drm_property_create_range(rcdu->ddev, 0, "colorkey",
453 0, 0x01ffffff); 457 0, 0x01ffffff);
454 if (rcdu->planes.colorkey == NULL) 458 if (planes->colorkey == NULL)
455 return -ENOMEM; 459 return -ENOMEM;
456 460
457 rcdu->planes.zpos = 461 planes->zpos =
458 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7); 462 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
459 if (rcdu->planes.zpos == NULL) 463 if (planes->zpos == NULL)
460 return -ENOMEM; 464 return -ENOMEM;
461 465
462 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) { 466 for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
463 struct rcar_du_plane *plane = &rcdu->planes.planes[i]; 467 struct rcar_du_plane *plane = &planes->planes[i];
464 468
465 plane->dev = rcdu; 469 plane->group = rgrp;
466 plane->hwindex = -1; 470 plane->hwindex = -1;
467 plane->alpha = 255; 471 plane->alpha = 255;
468 plane->colorkey = RCAR_DU_COLORKEY_NONE; 472 plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
472 return 0; 476 return 0;
473} 477}
474 478
475int rcar_du_plane_register(struct rcar_du_device *rcdu) 479int rcar_du_planes_register(struct rcar_du_group *rgrp)
476{ 480{
481 struct rcar_du_planes *planes = &rgrp->planes;
482 struct rcar_du_device *rcdu = rgrp->dev;
483 unsigned int crtcs;
477 unsigned int i; 484 unsigned int i;
478 int ret; 485 int ret;
479 486
487 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
488
480 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { 489 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
481 struct rcar_du_kms_plane *plane; 490 struct rcar_du_kms_plane *plane;
482 491
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
484 if (plane == NULL) 493 if (plane == NULL)
485 return -ENOMEM; 494 return -ENOMEM;
486 495
487 plane->hwplane = &rcdu->planes.planes[i + 2]; 496 plane->hwplane = &planes->planes[i + 2];
488 plane->hwplane->zpos = 1; 497 plane->hwplane->zpos = 1;
489 498
490 ret = drm_plane_init(rcdu->ddev, &plane->plane, 499 ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
491 (1 << rcdu->num_crtcs) - 1,
492 &rcar_du_plane_funcs, formats, 500 &rcar_du_plane_funcs, formats,
493 ARRAY_SIZE(formats), false); 501 ARRAY_SIZE(formats), false);
494 if (ret < 0) 502 if (ret < 0)
495 return ret; 503 return ret;
496 504
497 drm_object_attach_property(&plane->plane.base, 505 drm_object_attach_property(&plane->plane.base,
498 rcdu->planes.alpha, 255); 506 planes->alpha, 255);
499 drm_object_attach_property(&plane->plane.base, 507 drm_object_attach_property(&plane->plane.base,
500 rcdu->planes.colorkey, 508 planes->colorkey,
501 RCAR_DU_COLORKEY_NONE); 509 RCAR_DU_COLORKEY_NONE);
502 drm_object_attach_property(&plane->plane.base, 510 drm_object_attach_property(&plane->plane.base,
503 rcdu->planes.zpos, 1); 511 planes->zpos, 1);
504 } 512 }
505 513
506 return 0; 514 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5397dba2fe57..f94f9ce84998 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,10 +14,13 @@
14#ifndef __RCAR_DU_PLANE_H__ 14#ifndef __RCAR_DU_PLANE_H__
15#define __RCAR_DU_PLANE_H__ 15#define __RCAR_DU_PLANE_H__
16 16
17struct drm_crtc; 17#include <linux/mutex.h>
18struct drm_framebuffer; 18
19struct rcar_du_device; 19#include <drm/drmP.h>
20#include <drm/drm_crtc.h>
21
20struct rcar_du_format_info; 22struct rcar_du_format_info;
23struct rcar_du_group;
21 24
22/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As 25/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
23 * using KMS planes requires at least one of the CRTCs being enabled, no more 26 * using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
30#define RCAR_DU_NUM_SW_PLANES 9 33#define RCAR_DU_NUM_SW_PLANES 9
31 34
32struct rcar_du_plane { 35struct rcar_du_plane {
33 struct rcar_du_device *dev; 36 struct rcar_du_group *group;
34 struct drm_crtc *crtc; 37 struct drm_crtc *crtc;
35 38
36 bool enabled; 39 bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
54 unsigned int dst_y; 57 unsigned int dst_y;
55}; 58};
56 59
57int rcar_du_plane_init(struct rcar_du_device *rcdu); 60struct rcar_du_planes {
58int rcar_du_plane_register(struct rcar_du_device *rcdu); 61 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
62 unsigned int free;
63 struct mutex lock;
64
65 struct drm_property *alpha;
66 struct drm_property *colorkey;
67 struct drm_property *zpos;
68};
69
70int rcar_du_planes_init(struct rcar_du_group *rgrp);
71int rcar_du_planes_register(struct rcar_du_group *rgrp);
72
59void rcar_du_plane_setup(struct rcar_du_plane *plane); 73void rcar_du_plane_setup(struct rcar_du_plane *plane);
60void rcar_du_plane_update_base(struct rcar_du_plane *plane); 74void rcar_du_plane_update_base(struct rcar_du_plane *plane);
61void rcar_du_plane_compute_base(struct rcar_du_plane *plane, 75void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 69f21f19b51c..73f7347f740b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -13,14 +13,15 @@
13#ifndef __RCAR_DU_REGS_H__ 13#ifndef __RCAR_DU_REGS_H__
14#define __RCAR_DU_REGS_H__ 14#define __RCAR_DU_REGS_H__
15 15
16#define DISP2_REG_OFFSET 0x30000 16#define DU0_REG_OFFSET 0x00000
17#define DU1_REG_OFFSET 0x30000
18#define DU2_REG_OFFSET 0x40000
17 19
18/* ----------------------------------------------------------------------------- 20/* -----------------------------------------------------------------------------
19 * Display Control Registers 21 * Display Control Registers
20 */ 22 */
21 23
22#define DSYSR 0x00000 /* display 1 */ 24#define DSYSR 0x00000 /* display 1 */
23#define D2SYSR 0x30000 /* display 2 */
24#define DSYSR_ILTS (1 << 29) 25#define DSYSR_ILTS (1 << 29)
25#define DSYSR_DSEC (1 << 20) 26#define DSYSR_DSEC (1 << 20)
26#define DSYSR_IUPD (1 << 16) 27#define DSYSR_IUPD (1 << 16)
@@ -35,7 +36,6 @@
35#define DSYSR_SCM_INT_VIDEO (3 << 4) 36#define DSYSR_SCM_INT_VIDEO (3 << 4)
36 37
37#define DSMR 0x00004 38#define DSMR 0x00004
38#define D2SMR 0x30004
39#define DSMR_VSPM (1 << 28) 39#define DSMR_VSPM (1 << 28)
40#define DSMR_ODPM (1 << 27) 40#define DSMR_ODPM (1 << 27)
41#define DSMR_DIPM_DISP (0 << 25) 41#define DSMR_DIPM_DISP (0 << 25)
@@ -60,7 +60,6 @@
60#define DSMR_CSY_MASK (3 << 6) 60#define DSMR_CSY_MASK (3 << 6)
61 61
62#define DSSR 0x00008 62#define DSSR 0x00008
63#define D2SSR 0x30008
64#define DSSR_VC1FB_DSA0 (0 << 30) 63#define DSSR_VC1FB_DSA0 (0 << 30)
65#define DSSR_VC1FB_DSA1 (1 << 30) 64#define DSSR_VC1FB_DSA1 (1 << 30)
66#define DSSR_VC1FB_DSA2 (2 << 30) 65#define DSSR_VC1FB_DSA2 (2 << 30)
@@ -80,7 +79,6 @@
80#define DSSR_ADC(n) (1 << ((n)-1)) 79#define DSSR_ADC(n) (1 << ((n)-1))
81 80
82#define DSRCR 0x0000c 81#define DSRCR 0x0000c
83#define D2SRCR 0x3000c
84#define DSRCR_TVCL (1 << 15) 82#define DSRCR_TVCL (1 << 15)
85#define DSRCR_FRCL (1 << 14) 83#define DSRCR_FRCL (1 << 14)
86#define DSRCR_VBCL (1 << 11) 84#define DSRCR_VBCL (1 << 11)
@@ -90,7 +88,6 @@
90#define DSRCR_MASK 0x0000cbff 88#define DSRCR_MASK 0x0000cbff
91 89
92#define DIER 0x00010 90#define DIER 0x00010
93#define D2IER 0x30010
94#define DIER_TVE (1 << 15) 91#define DIER_TVE (1 << 15)
95#define DIER_FRE (1 << 14) 92#define DIER_FRE (1 << 14)
96#define DIER_VBE (1 << 11) 93#define DIER_VBE (1 << 11)
@@ -114,7 +111,6 @@
114#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */ 111#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
115 112
116#define DEFR 0x00020 113#define DEFR 0x00020
117#define D2EFR 0x30020
118#define DEFR_CODE (0x7773 << 16) 114#define DEFR_CODE (0x7773 << 16)
119#define DEFR_EXSL (1 << 12) 115#define DEFR_EXSL (1 << 12)
120#define DEFR_EXVL (1 << 11) 116#define DEFR_EXVL (1 << 11)
@@ -137,12 +133,10 @@
137#define DCPCR_DCE (1 << 0) 133#define DCPCR_DCE (1 << 0)
138 134
139#define DEFR2 0x00034 135#define DEFR2 0x00034
140#define D2EFR2 0x30034
141#define DEFR2_CODE (0x7775 << 16) 136#define DEFR2_CODE (0x7775 << 16)
142#define DEFR2_DEFE2G (1 << 0) 137#define DEFR2_DEFE2G (1 << 0)
143 138
144#define DEFR3 0x00038 139#define DEFR3 0x00038
145#define D2EFR3 0x30038
146#define DEFR3_CODE (0x7776 << 16) 140#define DEFR3_CODE (0x7776 << 16)
147#define DEFR3_EVDA (1 << 14) 141#define DEFR3_EVDA (1 << 14)
148#define DEFR3_EVDM_1 (1 << 12) 142#define DEFR3_EVDM_1 (1 << 12)
@@ -153,7 +147,6 @@
153#define DEFR3_DEFE3 (1 << 0) 147#define DEFR3_DEFE3 (1 << 0)
154 148
155#define DEFR4 0x0003c 149#define DEFR4 0x0003c
156#define D2EFR4 0x3003c
157#define DEFR4_CODE (0x7777 << 16) 150#define DEFR4_CODE (0x7777 << 16)
158#define DEFR4_LRUO (1 << 5) 151#define DEFR4_LRUO (1 << 5)
159#define DEFR4_SPCE (1 << 4) 152#define DEFR4_SPCE (1 << 4)
@@ -205,6 +198,68 @@
205#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) 198#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
206 199
207/* ----------------------------------------------------------------------------- 200/* -----------------------------------------------------------------------------
201 * R8A7790-only Control Registers
202 */
203
204#define DD1SSR 0x20008
205#define DD1SSR_TVR (1 << 15)
206#define DD1SSR_FRM (1 << 14)
207#define DD1SSR_BUF (1 << 12)
208#define DD1SSR_VBK (1 << 11)
209#define DD1SSR_RINT (1 << 9)
210#define DD1SSR_HBK (1 << 8)
211#define DD1SSR_ADC(n) (1 << ((n)-1))
212
213#define DD1SRCR 0x2000c
214#define DD1SRCR_TVR (1 << 15)
215#define DD1SRCR_FRM (1 << 14)
216#define DD1SRCR_BUF (1 << 12)
217#define DD1SRCR_VBK (1 << 11)
218#define DD1SRCR_RINT (1 << 9)
219#define DD1SRCR_HBK (1 << 8)
220#define DD1SRCR_ADC(n) (1 << ((n)-1))
221
222#define DD1IER 0x20010
223#define DD1IER_TVR (1 << 15)
224#define DD1IER_FRM (1 << 14)
225#define DD1IER_BUF (1 << 12)
226#define DD1IER_VBK (1 << 11)
227#define DD1IER_RINT (1 << 9)
228#define DD1IER_HBK (1 << 8)
229#define DD1IER_ADC(n) (1 << ((n)-1))
230
231#define DEFR8 0x20020
232#define DEFR8_CODE (0x7790 << 16)
233#define DEFR8_VSCS (1 << 6)
234#define DEFR8_DRGBS_DU(n) ((n) << 4)
235#define DEFR8_DRGBS_MASK (3 << 4)
236#define DEFR8_DEFE8 (1 << 0)
237
238#define DOFLR 0x20024
239#define DOFLR_CODE (0x7790 << 16)
240#define DOFLR_HSYCFL1 (1 << 13)
241#define DOFLR_VSYCFL1 (1 << 12)
242#define DOFLR_ODDFL1 (1 << 11)
243#define DOFLR_DISPFL1 (1 << 10)
244#define DOFLR_CDEFL1 (1 << 9)
245#define DOFLR_RGBFL1 (1 << 8)
246#define DOFLR_HSYCFL0 (1 << 5)
247#define DOFLR_VSYCFL0 (1 << 4)
248#define DOFLR_ODDFL0 (1 << 3)
249#define DOFLR_DISPFL0 (1 << 2)
250#define DOFLR_CDEFL0 (1 << 1)
251#define DOFLR_RGBFL0 (1 << 0)
252
253#define DIDSR 0x20028
254#define DIDSR_CODE (0x7790 << 16)
255#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
256#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
257#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
258#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
259#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
260#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
261
262/* -----------------------------------------------------------------------------
208 * Display Timing Generation Registers 263 * Display Timing Generation Registers
209 */ 264 */
210 265
@@ -349,21 +404,34 @@
349#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */ 404#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
350 405
351#define APnMWR 0x0a104 406#define APnMWR 0x0a104
407
408#define APnDSXR 0x0a110
409#define APnDSYR 0x0a114
410#define APnDPXR 0x0a118
411#define APnDPYR 0x0a11c
412
352#define APnDSA0R 0x0a120 413#define APnDSA0R 0x0a120
353#define APnDSA1R 0x0a124 414#define APnDSA1R 0x0a124
354#define APnDSA2R 0x0a128 415#define APnDSA2R 0x0a128
416
417#define APnSPXR 0x0a130
418#define APnSPYR 0x0a134
419#define APnWASPR 0x0a138
420#define APnWAMWR 0x0a13c
421
422#define APnBTR 0x0a140
423
355#define APnMLR 0x0a150 424#define APnMLR 0x0a150
425#define APnSWAPR 0x0a180
356 426
357/* ----------------------------------------------------------------------------- 427/* -----------------------------------------------------------------------------
358 * Display Capture Registers 428 * Display Capture Registers
359 */ 429 */
360 430
431#define DCMR 0x0c100
361#define DCMWR 0x0c104 432#define DCMWR 0x0c104
362#define DC2MWR 0x0c204
363#define DCSAR 0x0c120 433#define DCSAR 0x0c120
364#define DC2SAR 0x0c220
365#define DCMLR 0x0c150 434#define DCMLR 0x0c150
366#define DC2MLR 0x0c250
367 435
368/* ----------------------------------------------------------------------------- 436/* -----------------------------------------------------------------------------
369 * Color Palette Registers 437 * Color Palette Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380d..41d563adfeaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -16,12 +16,9 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include "rcar_du_drv.h" 18#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h"
19#include "rcar_du_kms.h" 20#include "rcar_du_kms.h"
20#include "rcar_du_vga.h" 21#include "rcar_du_vgacon.h"
21
22/* -----------------------------------------------------------------------------
23 * Connector
24 */
25 22
26static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) 23static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
27{ 24{
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
49static enum drm_connector_status 46static enum drm_connector_status
50rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) 47rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
51{ 48{
52 return connector_status_unknown; 49 return connector_status_connected;
53} 50}
54 51
55static const struct drm_connector_funcs connector_funcs = { 52static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
59 .destroy = rcar_du_vga_connector_destroy, 56 .destroy = rcar_du_vga_connector_destroy,
60}; 57};
61 58
62static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, 59int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
63 struct rcar_du_encoder *renc) 60 struct rcar_du_encoder *renc)
64{ 61{
65 struct rcar_du_connector *rcon; 62 struct rcar_du_connector *rcon;
66 struct drm_connector *connector; 63 struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
97 94
98 return 0; 95 return 0;
99} 96}
100
101/* -----------------------------------------------------------------------------
102 * Encoder
103 */
104
105static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
106{
107}
108
109static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
110 const struct drm_display_mode *mode,
111 struct drm_display_mode *adjusted_mode)
112{
113 return true;
114}
115
116static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
117 .dpms = rcar_du_vga_encoder_dpms,
118 .mode_fixup = rcar_du_vga_encoder_mode_fixup,
119 .prepare = rcar_du_encoder_mode_prepare,
120 .commit = rcar_du_encoder_mode_commit,
121 .mode_set = rcar_du_encoder_mode_set,
122};
123
124static const struct drm_encoder_funcs encoder_funcs = {
125 .destroy = drm_encoder_cleanup,
126};
127
128int rcar_du_vga_init(struct rcar_du_device *rcdu,
129 const struct rcar_du_encoder_vga_data *data,
130 unsigned int output)
131{
132 struct rcar_du_encoder *renc;
133 int ret;
134
135 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
136 if (renc == NULL)
137 return -ENOMEM;
138
139 renc->output = output;
140
141 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
142 DRM_MODE_ENCODER_DAC);
143 if (ret < 0)
144 return ret;
145
146 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
147
148 return rcar_du_vga_connector_init(rcdu, renc);
149}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190d..b12b0cf7f117 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector 2 * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013 Renesas Corporation
5 * 5 *
@@ -11,14 +11,13 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __RCAR_DU_VGA_H__ 14#ifndef __RCAR_DU_VGACON_H__
15#define __RCAR_DU_VGA_H__ 15#define __RCAR_DU_VGACON_H__
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder_vga_data; 18struct rcar_du_encoder;
19 19
20int rcar_du_vga_init(struct rcar_du_device *rcdu, 20int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_vga_data *data, 21 struct rcar_du_encoder *renc);
22 unsigned int output);
23 22
24#endif /* __RCAR_DU_VGA_H__ */ 23#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644
index 000000000000..77cf9289ab65
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -0,0 +1,69 @@
1/*
2 * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
3 *
4 * Copyright (C) 2013 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 */
12
13#ifndef __RCAR_LVDS_REGS_H__
14#define __RCAR_LVDS_REGS_H__
15
16#define LVDCR0 0x0000
17#define LVDCR0_DUSEL (1 << 15)
18#define LVDCR0_DMD (1 << 12)
19#define LVDCR0_LVMD_MASK (0xf << 8)
20#define LVDCR0_LVMD_SHIFT 8
21#define LVDCR0_PLLEN (1 << 4)
22#define LVDCR0_BEN (1 << 2)
23#define LVDCR0_LVEN (1 << 1)
24#define LVDCR0_LVRES (1 << 0)
25
26#define LVDCR1 0x0004
27#define LVDCR1_CKSEL (1 << 15)
28#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
29#define LVDCR1_CLKSTBY (3 << 0)
30
31#define LVDPLLCR 0x0008
32#define LVDPLLCR_CEEN (1 << 14)
33#define LVDPLLCR_FBEN (1 << 13)
34#define LVDPLLCR_COSEL (1 << 12)
35#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
36#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
37#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
38#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
39#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
40
41#define LVDCTRCR 0x000c
42#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
43#define LVDCTRCR_CTR3SEL_ODD (1 << 12)
44#define LVDCTRCR_CTR3SEL_CDE (2 << 12)
45#define LVDCTRCR_CTR3SEL_MASK (7 << 12)
46#define LVDCTRCR_CTR2SEL_DISP (0 << 8)
47#define LVDCTRCR_CTR2SEL_ODD (1 << 8)
48#define LVDCTRCR_CTR2SEL_CDE (2 << 8)
49#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8)
50#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8)
51#define LVDCTRCR_CTR2SEL_MASK (7 << 8)
52#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4)
53#define LVDCTRCR_CTR1SEL_DISP (1 << 4)
54#define LVDCTRCR_CTR1SEL_ODD (2 << 4)
55#define LVDCTRCR_CTR1SEL_CDE (3 << 4)
56#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4)
57#define LVDCTRCR_CTR1SEL_MASK (7 << 4)
58#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0)
59#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0)
60#define LVDCTRCR_CTR0SEL_DISP (2 << 0)
61#define LVDCTRCR_CTR0SEL_ODD (3 << 0)
62#define LVDCTRCR_CTR0SEL_CDE (4 << 0)
63#define LVDCTRCR_CTR0SEL_MASK (7 << 0)
64
65#define LVDCHCR 0x0010
66#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4))
67#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4))
68
69#endif /* __RCAR_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d5..b17d0710871a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1072 drm_idlelock_release(&file_priv->master->lock); 1072 drm_idlelock_release(&file_priv->master->lock);
1073} 1073}
1074 1074
1075struct drm_ioctl_desc savage_ioctls[] = { 1075const struct drm_ioctl_desc savage_ioctls[] = {
1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1078 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 71b2081e7835..3c030216e888 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
42 .unlocked_ioctl = drm_ioctl, 42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap, 43 .mmap = drm_mmap,
44 .poll = drm_poll, 44 .poll = drm_poll,
45 .fasync = drm_fasync,
46#ifdef CONFIG_COMPAT 45#ifdef CONFIG_COMPAT
47 .compat_ioctl = drm_compat_ioctl, 46 .compat_ioctl = drm_compat_ioctl,
48#endif 47#endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
51 50
52static struct drm_driver driver = { 51static struct drm_driver driver = {
53 .driver_features = 52 .driver_features =
54 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 53 DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
55 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 54 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
56 .load = savage_driver_load, 55 .load = savage_driver_load,
57 .firstopen = savage_driver_firstopen, 56 .firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6f..335f8fcf1041 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
104 S3_LAST 104 S3_LAST
105}; 105};
106 106
107extern struct drm_ioctl_desc savage_ioctls[]; 107extern const struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl; 108extern int savage_max_ioctl;
109 109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 99e2034e49cc..54bad98e9477 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
465 465
466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, 466static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 struct drm_framebuffer *fb, 467 struct drm_framebuffer *fb,
468 struct drm_pending_vblank_event *event) 468 struct drm_pending_vblank_event *event,
469 uint32_t page_flip_flags)
469{ 470{
470 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 471 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
471 struct drm_device *dev = scrtc->crtc.dev; 472 struct drm_device *dev = scrtc->crtc.dev;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef59..015551866b4a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
257#endif 257#endif
258 .poll = drm_poll, 258 .poll = drm_poll,
259 .read = drm_read, 259 .read = drm_read,
260 .fasync = drm_fasync,
261 .llseek = no_llseek, 260 .llseek = no_llseek,
262 .mmap = drm_gem_cma_mmap, 261 .mmap = drm_gem_cma_mmap,
263}; 262};
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
285 .gem_prime_mmap = drm_gem_cma_prime_mmap, 284 .gem_prime_mmap = drm_gem_cma_prime_mmap,
286 .dumb_create = drm_gem_cma_dumb_create, 285 .dumb_create = drm_gem_cma_dumb_create,
287 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 286 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
288 .dumb_destroy = drm_gem_cma_dumb_destroy, 287 .dumb_destroy = drm_gem_dumb_destroy,
289 .fops = &shmob_drm_fops, 288 .fops = &shmob_drm_fops,
290 .name = "shmob-drm", 289 .name = "shmob-drm",
291 .desc = "Renesas SH Mobile DRM", 290 .desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 5a5325e6b759..4383b74a3aa4 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
72 .unlocked_ioctl = drm_ioctl, 72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap, 73 .mmap = drm_mmap,
74 .poll = drm_poll, 74 .poll = drm_poll,
75 .fasync = drm_fasync,
76#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
77 .compat_ioctl = drm_compat_ioctl, 76 .compat_ioctl = drm_compat_ioctl,
78#endif 77#endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
103} 102}
104 103
105static struct drm_driver driver = { 104static struct drm_driver driver = {
106 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, 105 .driver_features = DRIVER_USE_AGP,
107 .load = sis_driver_load, 106 .load = sis_driver_load,
108 .unload = sis_driver_unload, 107 .unload = sis_driver_unload,
109 .open = sis_driver_open, 108 .open = sis_driver_open,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83be..c31c0253054d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
70 struct drm_file *file_priv); 70 struct drm_file *file_priv);
71extern void sis_lastclose(struct drm_device *dev); 71extern void sis_lastclose(struct drm_device *dev);
72 72
73extern struct drm_ioctl_desc sis_ioctls[]; 73extern const struct drm_ioctl_desc sis_ioctls[];
74extern int sis_max_ioctl; 74extern int sis_max_ioctl;
75 75
76#endif 76#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e5003..01857d836350 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
109 if (pool == AGP_TYPE) { 109 if (pool == AGP_TYPE) {
110 retval = drm_mm_insert_node(&dev_priv->agp_mm, 110 retval = drm_mm_insert_node(&dev_priv->agp_mm,
111 &item->mm_node, 111 &item->mm_node,
112 mem->size, 0); 112 mem->size, 0,
113 DRM_MM_SEARCH_DEFAULT);
113 offset = item->mm_node.start; 114 offset = item->mm_node.start;
114 } else { 115 } else {
115#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 116#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
121#else 122#else
122 retval = drm_mm_insert_node(&dev_priv->vram_mm, 123 retval = drm_mm_insert_node(&dev_priv->vram_mm,
123 &item->mm_node, 124 &item->mm_node,
124 mem->size, 0); 125 mem->size, 0,
126 DRM_MM_SEARCH_DEFAULT);
125 offset = item->mm_node.start; 127 offset = item->mm_node.start;
126#endif 128#endif
127 } 129 }
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
348 return; 350 return;
349} 351}
350 352
351struct drm_ioctl_desc sis_ioctls[] = { 353const struct drm_ioctl_desc sis_ioctls[] = {
352 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 354 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
353 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), 355 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
354 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 356 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ddfa743459d0..3492ca5c46d3 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51 .fasync = drm_fasync,
52#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
53 .compat_ioctl = drm_compat_ioctl, 52 .compat_ioctl = drm_compat_ioctl,
54#endif 53#endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
56}; 55};
57 56
58static struct drm_driver driver = { 57static struct drm_driver driver = {
59 .driver_features = DRIVER_USE_MTRR,
60 .fops = &tdfx_driver_fops, 58 .fops = &tdfx_driver_fops,
61 .name = DRIVER_NAME, 59 .name = DRIVER_NAME,
62 .desc = DRIVER_DESC, 60 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d3..d36efc13b16f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,7 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/kfifo.h> 18#include "drm_flip_work.h"
19 19
20#include "tilcdc_drv.h" 20#include "tilcdc_drv.h"
21#include "tilcdc_regs.h" 21#include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
35 struct drm_framebuffer *scanout[2]; 35 struct drm_framebuffer *scanout[2];
36 36
37 /* for deferred fb unref's: */ 37 /* for deferred fb unref's: */
38 DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *); 38 struct drm_flip_work unref_work;
39 struct work_struct work;
40}; 39};
41#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) 40#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
42 41
43static void unref_worker(struct work_struct *work) 42static void unref_worker(struct drm_flip_work *work, void *val)
44{ 43{
45 struct tilcdc_crtc *tilcdc_crtc = 44 struct tilcdc_crtc *tilcdc_crtc =
46 container_of(work, struct tilcdc_crtc, work); 45 container_of(work, struct tilcdc_crtc, unref_work);
47 struct drm_device *dev = tilcdc_crtc->base.dev; 46 struct drm_device *dev = tilcdc_crtc->base.dev;
48 struct drm_framebuffer *fb;
49 47
50 mutex_lock(&dev->mode_config.mutex); 48 mutex_lock(&dev->mode_config.mutex);
51 while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb)) 49 drm_framebuffer_unreference(val);
52 drm_framebuffer_unreference(fb);
53 mutex_unlock(&dev->mode_config.mutex); 50 mutex_unlock(&dev->mode_config.mutex);
54} 51}
55 52
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
68 }; 65 };
69 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 66 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
70 struct drm_device *dev = crtc->dev; 67 struct drm_device *dev = crtc->dev;
68 struct tilcdc_drm_private *priv = dev->dev_private;
71 69
72 pm_runtime_get_sync(dev->dev); 70 pm_runtime_get_sync(dev->dev);
73 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start); 71 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
74 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end); 72 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
75 if (tilcdc_crtc->scanout[n]) { 73 if (tilcdc_crtc->scanout[n]) {
76 if (kfifo_put(&tilcdc_crtc->unref_fifo, 74 drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
77 (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) { 75 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
78 struct tilcdc_drm_private *priv = dev->dev_private;
79 queue_work(priv->wq, &tilcdc_crtc->work);
80 } else {
81 dev_err(dev->dev, "unref fifo full!\n");
82 drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
83 }
84 } 76 }
85 tilcdc_crtc->scanout[n] = crtc->fb; 77 tilcdc_crtc->scanout[n] = crtc->fb;
86 drm_framebuffer_reference(tilcdc_crtc->scanout[n]); 78 drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,14 +141,15 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
149 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON); 141 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
150 142
151 drm_crtc_cleanup(crtc); 143 drm_crtc_cleanup(crtc);
152 WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo)); 144 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
153 kfifo_free(&tilcdc_crtc->unref_fifo); 145
154 kfree(tilcdc_crtc); 146 kfree(tilcdc_crtc);
155} 147}
156 148
157static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, 149static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
158 struct drm_framebuffer *fb, 150 struct drm_framebuffer *fb,
159 struct drm_pending_vblank_event *event) 151 struct drm_pending_vblank_event *event,
152 uint32_t page_flip_flags)
160{ 153{
161 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 154 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
162 struct drm_device *dev = crtc->dev; 155 struct drm_device *dev = crtc->dev;
@@ -379,7 +372,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
379 else 372 else
380 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 373 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
381 374
382 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 375 /*
376 * use value from adjusted_mode here as this might have been
377 * changed as part of the fixup for slave encoders to solve the
378 * issue where tilcdc timings are not VESA compliant
379 */
380 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
383 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 381 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
384 else 382 else
385 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 383 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +664,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
666 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; 664 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
667 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 665 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
668 666
669 ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL); 667 ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
668 "unref", unref_worker);
670 if (ret) { 669 if (ret) {
671 dev_err(dev->dev, "could not allocate unref FIFO\n"); 670 dev_err(dev->dev, "could not allocate unref FIFO\n");
672 goto fail; 671 goto fail;
673 } 672 }
674 673
675 INIT_WORK(&tilcdc_crtc->work, unref_worker);
676
677 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); 674 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
678 if (ret < 0) 675 if (ret < 0)
679 goto fail; 676 goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a214..116da199b942 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
497#endif 497#endif
498 .poll = drm_poll, 498 .poll = drm_poll,
499 .read = drm_read, 499 .read = drm_read,
500 .fasync = drm_fasync,
501 .llseek = no_llseek, 500 .llseek = no_llseek,
502 .mmap = drm_gem_cma_mmap, 501 .mmap = drm_gem_cma_mmap,
503}; 502};
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
519 .gem_vm_ops = &drm_gem_cma_vm_ops, 518 .gem_vm_ops = &drm_gem_cma_vm_ops,
520 .dumb_create = drm_gem_cma_dumb_create, 519 .dumb_create = drm_gem_cma_dumb_create,
521 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 520 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
522 .dumb_destroy = drm_gem_cma_dumb_destroy, 521 .dumb_destroy = drm_gem_dumb_destroy,
523#ifdef CONFIG_DEBUG_FS 522#ifdef CONFIG_DEBUG_FS
524 .debugfs_init = tilcdc_debugfs_init, 523 .debugfs_init = tilcdc_debugfs_init,
525 .debugfs_cleanup = tilcdc_debugfs_cleanup, 524 .debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index dfffaf014022..23b3203d8241 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -73,13 +73,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
73 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info); 73 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
74} 74}
75 75
76static bool slave_encoder_fixup(struct drm_encoder *encoder,
77 const struct drm_display_mode *mode,
78 struct drm_display_mode *adjusted_mode)
79{
80 /*
81 * tilcdc does not generate VESA-complient sync but aligns
82 * VS on the second edge of HS instead of first edge.
83 * We use adjusted_mode, to fixup sync by aligning both rising
84 * edges and add HSKEW offset to let the slave encoder fix it up.
85 */
86 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
87 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
88
89 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
90 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
91 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
92 } else {
93 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
94 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
95 }
96
97 return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
98}
99
100
76static const struct drm_encoder_funcs slave_encoder_funcs = { 101static const struct drm_encoder_funcs slave_encoder_funcs = {
77 .destroy = slave_encoder_destroy, 102 .destroy = slave_encoder_destroy,
78}; 103};
79 104
80static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = { 105static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
81 .dpms = drm_i2c_encoder_dpms, 106 .dpms = drm_i2c_encoder_dpms,
82 .mode_fixup = drm_i2c_encoder_mode_fixup, 107 .mode_fixup = slave_encoder_fixup,
83 .prepare = slave_encoder_prepare, 108 .prepare = slave_encoder_prepare,
84 .commit = drm_i2c_encoder_commit, 109 .commit = drm_i2c_encoder_commit,
85 .mode_set = drm_i2c_encoder_mode_set, 110 .mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670c..f1a857ec1021 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
45#define TTM_DEBUG(fmt, arg...) 45#define TTM_DEBUG(fmt, arg...)
46#define TTM_BO_HASH_ORDER 13 46#define TTM_BO_HASH_ORDER 13
47 47
48static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
49static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 48static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50static void ttm_bo_global_kobj_release(struct kobject *kobj); 49static void ttm_bo_global_kobj_release(struct kobject *kobj);
51 50
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
615 struct ttm_bo_device *bdev = bo->bdev; 614 struct ttm_bo_device *bdev = bo->bdev;
616 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 615 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
617 616
618 write_lock(&bdev->vm_lock); 617 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
619 if (likely(bo->vm_node != NULL)) {
620 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
621 drm_mm_put_block(bo->vm_node);
622 bo->vm_node = NULL;
623 }
624 write_unlock(&bdev->vm_lock);
625 ttm_mem_io_lock(man, false); 618 ttm_mem_io_lock(man, false);
626 ttm_mem_io_free_vm(bo); 619 ttm_mem_io_free_vm(bo);
627 ttm_mem_io_unlock(man); 620 ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1129 bo->resv = &bo->ttm_resv; 1122 bo->resv = &bo->ttm_resv;
1130 reservation_object_init(bo->resv); 1123 reservation_object_init(bo->resv);
1131 atomic_inc(&bo->glob->bo_count); 1124 atomic_inc(&bo->glob->bo_count);
1125 drm_vma_node_reset(&bo->vma_node);
1132 1126
1133 ret = ttm_bo_check_placement(bo, placement); 1127 ret = ttm_bo_check_placement(bo, placement);
1134 1128
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1139 if (likely(!ret) && 1133 if (likely(!ret) &&
1140 (bo->type == ttm_bo_type_device || 1134 (bo->type == ttm_bo_type_device ||
1141 bo->type == ttm_bo_type_sg)) 1135 bo->type == ttm_bo_type_sg))
1142 ret = ttm_bo_setup_vm(bo); 1136 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1137 bo->mem.num_pages);
1143 1138
1144 locked = ww_mutex_trylock(&bo->resv->lock); 1139 locked = ww_mutex_trylock(&bo->resv->lock);
1145 WARN_ON(!locked); 1140 WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1424 TTM_DEBUG("Swap list was clean\n"); 1419 TTM_DEBUG("Swap list was clean\n");
1425 spin_unlock(&glob->lru_lock); 1420 spin_unlock(&glob->lru_lock);
1426 1421
1427 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1422 drm_vma_offset_manager_destroy(&bdev->vma_manager);
1428 write_lock(&bdev->vm_lock);
1429 drm_mm_takedown(&bdev->addr_space_mm);
1430 write_unlock(&bdev->vm_lock);
1431 1423
1432 return ret; 1424 return ret;
1433} 1425}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1441{ 1433{
1442 int ret = -EINVAL; 1434 int ret = -EINVAL;
1443 1435
1444 rwlock_init(&bdev->vm_lock);
1445 bdev->driver = driver; 1436 bdev->driver = driver;
1446 1437
1447 memset(bdev->man, 0, sizeof(bdev->man)); 1438 memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1454 if (unlikely(ret != 0)) 1445 if (unlikely(ret != 0))
1455 goto out_no_sys; 1446 goto out_no_sys;
1456 1447
1457 bdev->addr_space_rb = RB_ROOT; 1448 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1458 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1449 0x10000000);
1459
1460 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1450 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1461 INIT_LIST_HEAD(&bdev->ddestroy); 1451 INIT_LIST_HEAD(&bdev->ddestroy);
1462 bdev->dev_mapping = NULL; 1452 bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1498void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1488void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1499{ 1489{
1500 struct ttm_bo_device *bdev = bo->bdev; 1490 struct ttm_bo_device *bdev = bo->bdev;
1501 loff_t offset = (loff_t) bo->addr_space_offset;
1502 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1503 1491
1504 if (!bdev->dev_mapping) 1492 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1505 return;
1506 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1507 ttm_mem_io_free_vm(bo); 1493 ttm_mem_io_free_vm(bo);
1508} 1494}
1509 1495
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1520 1506
1521EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1507EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1522 1508
1523static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1524{
1525 struct ttm_bo_device *bdev = bo->bdev;
1526 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1527 struct rb_node *parent = NULL;
1528 struct ttm_buffer_object *cur_bo;
1529 unsigned long offset = bo->vm_node->start;
1530 unsigned long cur_offset;
1531
1532 while (*cur) {
1533 parent = *cur;
1534 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1535 cur_offset = cur_bo->vm_node->start;
1536 if (offset < cur_offset)
1537 cur = &parent->rb_left;
1538 else if (offset > cur_offset)
1539 cur = &parent->rb_right;
1540 else
1541 BUG();
1542 }
1543
1544 rb_link_node(&bo->vm_rb, parent, cur);
1545 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1546}
1547
1548/**
1549 * ttm_bo_setup_vm:
1550 *
1551 * @bo: the buffer to allocate address space for
1552 *
1553 * Allocate address space in the drm device so that applications
1554 * can mmap the buffer and access the contents. This only
1555 * applies to ttm_bo_type_device objects as others are not
1556 * placed in the drm device address space.
1557 */
1558
1559static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1560{
1561 struct ttm_bo_device *bdev = bo->bdev;
1562 int ret;
1563
1564retry_pre_get:
1565 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1566 if (unlikely(ret != 0))
1567 return ret;
1568
1569 write_lock(&bdev->vm_lock);
1570 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1571 bo->mem.num_pages, 0, 0);
1572
1573 if (unlikely(bo->vm_node == NULL)) {
1574 ret = -ENOMEM;
1575 goto out_unlock;
1576 }
1577
1578 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1579 bo->mem.num_pages, 0);
1580
1581 if (unlikely(bo->vm_node == NULL)) {
1582 write_unlock(&bdev->vm_lock);
1583 goto retry_pre_get;
1584 }
1585
1586 ttm_bo_vm_insert_rb(bo);
1587 write_unlock(&bdev->vm_lock);
1588 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1589
1590 return 0;
1591out_unlock:
1592 write_unlock(&bdev->vm_lock);
1593 return ret;
1594}
1595 1509
1596int ttm_bo_wait(struct ttm_buffer_object *bo, 1510int ttm_bo_wait(struct ttm_buffer_object *bo,
1597 bool lazy, bool interruptible, bool no_wait) 1511 bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472a..c58eba33bd5f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
61 lpfn = placement->lpfn; 61 lpfn = placement->lpfn;
62 if (!lpfn) 62 if (!lpfn)
63 lpfn = man->size; 63 lpfn = man->size;
64 do {
65 ret = drm_mm_pre_get(mm);
66 if (unlikely(ret))
67 return ret;
68 64
69 spin_lock(&rman->lock); 65 node = kzalloc(sizeof(*node), GFP_KERNEL);
70 node = drm_mm_search_free_in_range(mm, 66 if (!node)
71 mem->num_pages, mem->page_alignment, 67 return -ENOMEM;
72 placement->fpfn, lpfn, 1); 68
73 if (unlikely(node == NULL)) { 69 spin_lock(&rman->lock);
74 spin_unlock(&rman->lock); 70 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
75 return 0; 71 mem->page_alignment,
76 } 72 placement->fpfn, lpfn,
77 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 73 DRM_MM_SEARCH_BEST);
78 mem->page_alignment, 74 spin_unlock(&rman->lock);
79 placement->fpfn, 75
80 lpfn); 76 if (unlikely(ret)) {
81 spin_unlock(&rman->lock); 77 kfree(node);
82 } while (node == NULL); 78 } else {
79 mem->mm_node = node;
80 mem->start = node->start;
81 }
83 82
84 mem->mm_node = node;
85 mem->start = node->start;
86 return 0; 83 return 0;
87} 84}
88 85
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93 90
94 if (mem->mm_node) { 91 if (mem->mm_node) {
95 spin_lock(&rman->lock); 92 spin_lock(&rman->lock);
96 drm_mm_put_block(mem->mm_node); 93 drm_mm_remove_node(mem->mm_node);
97 spin_unlock(&rman->lock); 94 spin_unlock(&rman->lock);
95
96 kfree(mem->mm_node);
98 mem->mm_node = NULL; 97 mem->mm_node = NULL;
99 } 98 }
100} 99}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5b..7cc904d3a4d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/ttm/ttm_bo_driver.h> 31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
33#include <drm/drm_vma_manager.h>
33#include <linux/io.h> 34#include <linux/io.h>
34#include <linux/highmem.h> 35#include <linux/highmem.h>
35#include <linux/wait.h> 36#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
450 INIT_LIST_HEAD(&fbo->lru); 451 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap); 452 INIT_LIST_HEAD(&fbo->swap);
452 INIT_LIST_HEAD(&fbo->io_reserve_lru); 453 INIT_LIST_HEAD(&fbo->io_reserve_lru);
453 fbo->vm_node = NULL; 454 drm_vma_node_reset(&fbo->vma_node);
454 atomic_set(&fbo->cpu_writers, 0); 455 atomic_set(&fbo->cpu_writers, 0);
455 456
456 spin_lock(&bdev->fence_lock); 457 spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041c..1006c15445e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_module.h> 33#include <ttm/ttm_module.h>
34#include <ttm/ttm_bo_driver.h> 34#include <ttm/ttm_bo_driver.h>
35#include <ttm/ttm_placement.h> 35#include <ttm/ttm_placement.h>
36#include <drm/drm_vma_manager.h>
36#include <linux/mm.h> 37#include <linux/mm.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
38#include <linux/module.h> 39#include <linux/module.h>
@@ -40,37 +41,6 @@
40 41
41#define TTM_BO_VM_NUM_PREFAULT 16 42#define TTM_BO_VM_NUM_PREFAULT 16
42 43
43static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
44 unsigned long page_start,
45 unsigned long num_pages)
46{
47 struct rb_node *cur = bdev->addr_space_rb.rb_node;
48 unsigned long cur_offset;
49 struct ttm_buffer_object *bo;
50 struct ttm_buffer_object *best_bo = NULL;
51
52 while (likely(cur != NULL)) {
53 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54 cur_offset = bo->vm_node->start;
55 if (page_start >= cur_offset) {
56 cur = cur->rb_right;
57 best_bo = bo;
58 if (page_start == cur_offset)
59 break;
60 } else
61 cur = cur->rb_left;
62 }
63
64 if (unlikely(best_bo == NULL))
65 return NULL;
66
67 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68 (page_start + num_pages)))
69 return NULL;
70
71 return best_bo;
72}
73
74static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 44static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75{ 45{
76 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 46 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
146 } 116 }
147 117
148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 118 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149 bo->vm_node->start - vma->vm_pgoff; 119 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
150 page_last = vma_pages(vma) + 120 page_last = vma_pages(vma) +
151 bo->vm_node->start - vma->vm_pgoff; 121 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
152 122
153 if (unlikely(page_offset >= bo->num_pages)) { 123 if (unlikely(page_offset >= bo->num_pages)) {
154 retval = VM_FAULT_SIGBUS; 124 retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
249 .close = ttm_bo_vm_close 219 .close = ttm_bo_vm_close
250}; 220};
251 221
222static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
223 unsigned long offset,
224 unsigned long pages)
225{
226 struct drm_vma_offset_node *node;
227 struct ttm_buffer_object *bo = NULL;
228
229 drm_vma_offset_lock_lookup(&bdev->vma_manager);
230
231 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
232 if (likely(node)) {
233 bo = container_of(node, struct ttm_buffer_object, vma_node);
234 if (!kref_get_unless_zero(&bo->kref))
235 bo = NULL;
236 }
237
238 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
239
240 if (!bo)
241 pr_err("Could not find buffer object to map\n");
242
243 return bo;
244}
245
252int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 246int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
253 struct ttm_bo_device *bdev) 247 struct ttm_bo_device *bdev)
254{ 248{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
256 struct ttm_buffer_object *bo; 250 struct ttm_buffer_object *bo;
257 int ret; 251 int ret;
258 252
259 read_lock(&bdev->vm_lock); 253 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 254 if (unlikely(!bo))
261 vma_pages(vma));
262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 bo = NULL;
264 read_unlock(&bdev->vm_lock);
265
266 if (unlikely(bo == NULL)) {
267 pr_err("Could not find buffer object to map\n");
268 return -EINVAL; 255 return -EINVAL;
269 }
270 256
271 driver = bo->bdev->driver; 257 driver = bo->bdev->driver;
272 if (unlikely(!driver->verify_access)) { 258 if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
304 return 0; 290 return 0;
305} 291}
306EXPORT_SYMBOL(ttm_fbdev_mmap); 292EXPORT_SYMBOL(ttm_fbdev_mmap);
307
308
309ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
310 const char __user *wbuf, char __user *rbuf, size_t count,
311 loff_t *f_pos, bool write)
312{
313 struct ttm_buffer_object *bo;
314 struct ttm_bo_driver *driver;
315 struct ttm_bo_kmap_obj map;
316 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
317 unsigned long kmap_offset;
318 unsigned long kmap_end;
319 unsigned long kmap_num;
320 size_t io_size;
321 unsigned int page_offset;
322 char *virtual;
323 int ret;
324 bool no_wait = false;
325 bool dummy;
326
327 read_lock(&bdev->vm_lock);
328 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
329 if (likely(bo != NULL))
330 ttm_bo_reference(bo);
331 read_unlock(&bdev->vm_lock);
332
333 if (unlikely(bo == NULL))
334 return -EFAULT;
335
336 driver = bo->bdev->driver;
337 if (unlikely(!driver->verify_access)) {
338 ret = -EPERM;
339 goto out_unref;
340 }
341
342 ret = driver->verify_access(bo, filp);
343 if (unlikely(ret != 0))
344 goto out_unref;
345
346 kmap_offset = dev_offset - bo->vm_node->start;
347 if (unlikely(kmap_offset >= bo->num_pages)) {
348 ret = -EFBIG;
349 goto out_unref;
350 }
351
352 page_offset = *f_pos & ~PAGE_MASK;
353 io_size = bo->num_pages - kmap_offset;
354 io_size = (io_size << PAGE_SHIFT) - page_offset;
355 if (count < io_size)
356 io_size = count;
357
358 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
359 kmap_num = kmap_end - kmap_offset + 1;
360
361 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
362
363 switch (ret) {
364 case 0:
365 break;
366 case -EBUSY:
367 ret = -EAGAIN;
368 goto out_unref;
369 default:
370 goto out_unref;
371 }
372
373 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374 if (unlikely(ret != 0)) {
375 ttm_bo_unreserve(bo);
376 goto out_unref;
377 }
378
379 virtual = ttm_kmap_obj_virtual(&map, &dummy);
380 virtual += page_offset;
381
382 if (write)
383 ret = copy_from_user(virtual, wbuf, io_size);
384 else
385 ret = copy_to_user(rbuf, virtual, io_size);
386
387 ttm_bo_kunmap(&map);
388 ttm_bo_unreserve(bo);
389 ttm_bo_unref(&bo);
390
391 if (unlikely(ret != 0))
392 return -EFBIG;
393
394 *f_pos += io_size;
395
396 return io_size;
397out_unref:
398 ttm_bo_unref(&bo);
399 return ret;
400}
401
402ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
403 char __user *rbuf, size_t count, loff_t *f_pos,
404 bool write)
405{
406 struct ttm_bo_kmap_obj map;
407 unsigned long kmap_offset;
408 unsigned long kmap_end;
409 unsigned long kmap_num;
410 size_t io_size;
411 unsigned int page_offset;
412 char *virtual;
413 int ret;
414 bool no_wait = false;
415 bool dummy;
416
417 kmap_offset = (*f_pos >> PAGE_SHIFT);
418 if (unlikely(kmap_offset >= bo->num_pages))
419 return -EFBIG;
420
421 page_offset = *f_pos & ~PAGE_MASK;
422 io_size = bo->num_pages - kmap_offset;
423 io_size = (io_size << PAGE_SHIFT) - page_offset;
424 if (count < io_size)
425 io_size = count;
426
427 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
428 kmap_num = kmap_end - kmap_offset + 1;
429
430 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
431
432 switch (ret) {
433 case 0:
434 break;
435 case -EBUSY:
436 return -EAGAIN;
437 default:
438 return ret;
439 }
440
441 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442 if (unlikely(ret != 0)) {
443 ttm_bo_unreserve(bo);
444 return ret;
445 }
446
447 virtual = ttm_kmap_obj_virtual(&map, &dummy);
448 virtual += page_offset;
449
450 if (write)
451 ret = copy_from_user(virtual, wbuf, io_size);
452 else
453 ret = copy_to_user(rbuf, virtual, io_size);
454
455 ttm_bo_kunmap(&map);
456 ttm_bo_unreserve(bo);
457 ttm_bo_unref(&bo);
458
459 if (unlikely(ret != 0))
460 return ret;
461
462 *f_pos += io_size;
463
464 return io_size;
465}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74a..7650dc0d78ce 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
65 .read = drm_read, 65 .read = drm_read,
66 .unlocked_ioctl = drm_ioctl, 66 .unlocked_ioctl = drm_ioctl,
67 .release = drm_release, 67 .release = drm_release,
68 .fasync = drm_fasync,
69#ifdef CONFIG_COMPAT 68#ifdef CONFIG_COMPAT
70 .compat_ioctl = drm_compat_ioctl, 69 .compat_ioctl = drm_compat_ioctl,
71#endif 70#endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
84 83
85 .dumb_create = udl_dumb_create, 84 .dumb_create = udl_dumb_create,
86 .dumb_map_offset = udl_gem_mmap, 85 .dumb_map_offset = udl_gem_mmap,
87 .dumb_destroy = udl_dumb_destroy, 86 .dumb_destroy = drm_gem_dumb_destroy,
88 .fops = &udl_driver_fops, 87 .fops = &udl_driver_fops,
89 88
90 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 89 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c71..56aec9409fa3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
114 struct drm_mode_create_dumb *args); 114 struct drm_mode_create_dumb *args);
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
118 uint32_t handle);
119 117
120int udl_gem_init_object(struct drm_gem_object *obj); 118int udl_gem_init_object(struct drm_gem_object *obj);
121void udl_gem_free_object(struct drm_gem_object *gem_obj); 119void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f5..8dbe9d0ae9a7 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
66 args->size, &args->handle); 66 args->size, &args->handle);
67} 67}
68 68
69int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
70 uint32_t handle)
71{
72 return drm_gem_handle_delete(file, handle);
73}
74
75int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 69int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{ 70{
77 int ret; 71 int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
123 117
124static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 118static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125{ 119{
126 int page_count, i; 120 struct page **pages;
127 struct page *page;
128 struct inode *inode;
129 struct address_space *mapping;
130 121
131 if (obj->pages) 122 if (obj->pages)
132 return 0; 123 return 0;
133 124
134 page_count = obj->base.size / PAGE_SIZE; 125 pages = drm_gem_get_pages(&obj->base, gfpmask);
135 BUG_ON(obj->pages != NULL); 126 if (IS_ERR(pages))
136 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); 127 return PTR_ERR(pages);
137 if (obj->pages == NULL)
138 return -ENOMEM;
139 128
140 inode = file_inode(obj->base.filp); 129 obj->pages = pages;
141 mapping = inode->i_mapping;
142 gfpmask |= mapping_gfp_mask(mapping);
143
144 for (i = 0; i < page_count; i++) {
145 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
146 if (IS_ERR(page))
147 goto err_pages;
148 obj->pages[i] = page;
149 }
150 130
151 return 0; 131 return 0;
152err_pages:
153 while (i--)
154 page_cache_release(obj->pages[i]);
155 drm_free_large(obj->pages);
156 obj->pages = NULL;
157 return PTR_ERR(page);
158} 132}
159 133
160static void udl_gem_put_pages(struct udl_gem_object *obj) 134static void udl_gem_put_pages(struct udl_gem_object *obj)
161{ 135{
162 int page_count = obj->base.size / PAGE_SIZE; 136 drm_gem_put_pages(&obj->base, obj->pages, false, false);
163 int i;
164
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
167 obj->pages = NULL;
168 return;
169 }
170
171 for (i = 0; i < page_count; i++)
172 page_cache_release(obj->pages[i]);
173
174 drm_free_large(obj->pages);
175 obj->pages = NULL; 137 obj->pages = NULL;
176} 138}
177 139
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
223 if (obj->pages) 185 if (obj->pages)
224 udl_gem_put_pages(obj); 186 udl_gem_put_pages(obj);
225 187
226 if (gem_obj->map_list.map) 188 drm_gem_free_mmap_offset(gem_obj);
227 drm_gem_free_mmap_offset(gem_obj);
228} 189}
229 190
230/* the dumb interface doesn't work with the GEM straight MMAP 191/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
247 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
248 if (ret) 209 if (ret)
249 goto out; 210 goto out;
250 if (!gobj->base.map_list.map) { 211 ret = drm_gem_create_mmap_offset(obj);
251 ret = drm_gem_create_mmap_offset(obj); 212 if (ret)
252 if (ret) 213 goto out;
253 goto out;
254 }
255 214
256 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; 215 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
257 216
258out: 217out:
259 drm_gem_object_unreference(&gobj->base); 218 drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d7195256..f5ae57406f34 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) { 43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%*ph\n", 44 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
45 total_len, 11, desc); 45 total_len, desc);
46 46
47 if ((desc[0] != total_len) || /* descriptor length */ 47 if ((desc[0] != total_len) || /* descriptor length */
48 (desc[1] != 0x5f) || /* vendor descriptor type */ 48 (desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a2422..652f9b43ec9d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
720 return ret; 720 return ret;
721} 721}
722 722
723struct drm_ioctl_desc via_ioctls[] = { 723const struct drm_ioctl_desc via_ioctls[] = {
724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index f4ae20327941..92684a9b7e34 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
64 .unlocked_ioctl = drm_ioctl, 64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap, 65 .mmap = drm_mmap,
66 .poll = drm_poll, 66 .poll = drm_poll,
67 .fasync = drm_fasync,
68#ifdef CONFIG_COMPAT 67#ifdef CONFIG_COMPAT
69 .compat_ioctl = drm_compat_ioctl, 68 .compat_ioctl = drm_compat_ioctl,
70#endif 69#endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
73 72
74static struct drm_driver driver = { 73static struct drm_driver driver = {
75 .driver_features = 74 .driver_features =
76 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 75 DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
77 DRIVER_IRQ_SHARED, 76 DRIVER_IRQ_SHARED,
78 .load = via_driver_load, 77 .load = via_driver_load,
79 .unload = via_driver_unload, 78 .unload = via_driver_unload,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c36..a811ef2b505f 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 114#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) 115#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
116 116
117extern struct drm_ioctl_desc via_ioctls[]; 117extern const struct drm_ioctl_desc via_ioctls[];
118extern int via_max_ioctl; 118extern int via_max_ioctl;
119 119
120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 120extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff09873..7e3ad87c366c 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
140 if (mem->type == VIA_MEM_AGP) 140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm, 141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node, 142 &item->mm_node,
143 tmpSize, 0); 143 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
144 else 144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm, 145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node, 146 &item->mm_node,
147 tmpSize, 0); 147 tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
148 if (retval) 148 if (retval)
149 goto fail_alloc; 149 goto fail_alloc;
150 150
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48a..1a90f0a2f7e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
124 * Ioctl definitions. 124 * Ioctl definitions.
125 */ 125 */
126 126
127static struct drm_ioctl_desc vmw_ioctls[] = { 127static const struct drm_ioctl_desc vmw_ioctls[] = {
128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
129 DRM_AUTH | DRM_UNLOCKED), 129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
622 } 622 }
623 623
624 dev_priv->fman = vmw_fence_manager_init(dev_priv); 624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
625 if (unlikely(dev_priv->fman == NULL)) 625 if (unlikely(dev_priv->fman == NULL)) {
626 ret = -ENOMEM;
626 goto out_no_fman; 627 goto out_no_fman;
628 }
627 629
628 vmw_kms_save_vga(dev_priv); 630 vmw_kms_save_vga(dev_priv);
629 631
@@ -782,7 +784,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
782 784
783 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 785 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
784 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 786 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
785 struct drm_ioctl_desc *ioctl = 787 const struct drm_ioctl_desc *ioctl =
786 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 788 &vmw_ioctls[nr - DRM_COMMAND_BASE];
787 789
788 if (unlikely(ioctl->cmd_drv != cmd)) { 790 if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +797,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
795 return drm_ioctl(filp, cmd, arg); 797 return drm_ioctl(filp, cmd, arg);
796} 798}
797 799
798static int vmw_firstopen(struct drm_device *dev)
799{
800 struct vmw_private *dev_priv = vmw_priv(dev);
801 dev_priv->is_opened = true;
802
803 return 0;
804}
805
806static void vmw_lastclose(struct drm_device *dev) 800static void vmw_lastclose(struct drm_device *dev)
807{ 801{
808 struct vmw_private *dev_priv = vmw_priv(dev);
809 struct drm_crtc *crtc; 802 struct drm_crtc *crtc;
810 struct drm_mode_set set; 803 struct drm_mode_set set;
811 int ret; 804 int ret;
812 805
813 /**
814 * Do nothing on the lastclose call from drm_unload.
815 */
816
817 if (!dev_priv->is_opened)
818 return;
819
820 dev_priv->is_opened = false;
821 set.x = 0; 806 set.x = 0;
822 set.y = 0; 807 set.y = 0;
823 set.fb = NULL; 808 set.fb = NULL;
@@ -1120,7 +1105,6 @@ static const struct file_operations vmwgfx_driver_fops = {
1120 .mmap = vmw_mmap, 1105 .mmap = vmw_mmap,
1121 .poll = vmw_fops_poll, 1106 .poll = vmw_fops_poll,
1122 .read = vmw_fops_read, 1107 .read = vmw_fops_read,
1123 .fasync = drm_fasync,
1124#if defined(CONFIG_COMPAT) 1108#if defined(CONFIG_COMPAT)
1125 .compat_ioctl = drm_compat_ioctl, 1109 .compat_ioctl = drm_compat_ioctl,
1126#endif 1110#endif
@@ -1132,7 +1116,6 @@ static struct drm_driver driver = {
1132 DRIVER_MODESET, 1116 DRIVER_MODESET,
1133 .load = vmw_driver_load, 1117 .load = vmw_driver_load,
1134 .unload = vmw_driver_unload, 1118 .unload = vmw_driver_unload,
1135 .firstopen = vmw_firstopen,
1136 .lastclose = vmw_lastclose, 1119 .lastclose = vmw_lastclose,
1137 .irq_preinstall = vmw_irq_preinstall, 1120 .irq_preinstall = vmw_irq_preinstall,
1138 .irq_postinstall = vmw_irq_postinstall, 1121 .irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1126,6 @@ static struct drm_driver driver = {
1143 .disable_vblank = vmw_disable_vblank, 1126 .disable_vblank = vmw_disable_vblank,
1144 .ioctls = vmw_ioctls, 1127 .ioctls = vmw_ioctls,
1145 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 1128 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1146 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1147 .master_create = vmw_master_create, 1129 .master_create = vmw_master_create,
1148 .master_destroy = vmw_master_destroy, 1130 .master_destroy = vmw_master_destroy,
1149 .master_set = vmw_master_set, 1131 .master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 13aeda71280e..150ec64af617 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -324,7 +324,6 @@ struct vmw_private {
324 */ 324 */
325 325
326 bool stealth; 326 bool stealth;
327 bool is_opened;
328 bool enable_fb; 327 bool enable_fb;
329 328
330 /** 329 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d4607b2530d6..fc43c0601236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1706 1706
1707int vmw_du_page_flip(struct drm_crtc *crtc, 1707int vmw_du_page_flip(struct drm_crtc *crtc,
1708 struct drm_framebuffer *fb, 1708 struct drm_framebuffer *fb,
1709 struct drm_pending_vblank_event *event) 1709 struct drm_pending_vblank_event *event,
1710 uint32_t page_flip_flags)
1710{ 1711{
1711 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1712 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1712 struct drm_framebuffer *old_fb = crtc->fb; 1713 struct drm_framebuffer *old_fb = crtc->fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6fa89c9d6214..8d038c36bd57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -123,7 +123,8 @@ struct vmw_display_unit {
123void vmw_display_unit_cleanup(struct vmw_display_unit *du); 123void vmw_display_unit_cleanup(struct vmw_display_unit *du);
124int vmw_du_page_flip(struct drm_crtc *crtc, 124int vmw_du_page_flip(struct drm_crtc *crtc,
125 struct drm_framebuffer *fb, 125 struct drm_framebuffer *fb,
126 struct drm_pending_vblank_event *event); 126 struct drm_pending_vblank_event *event,
127 uint32_t page_flip_flags);
127void vmw_du_crtc_save(struct drm_crtc *crtc); 128void vmw_du_crtc_save(struct drm_crtc *crtc);
128void vmw_du_crtc_restore(struct drm_crtc *crtc); 129void vmw_du_crtc_restore(struct drm_crtc *crtc);
129void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 130void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b63..0e67cf41065d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
500 goto out_no_dmabuf; 500 goto out_no_dmabuf;
501 501
502 rep->handle = handle; 502 rep->handle = handle;
503 rep->map_handle = dma_buf->base.addr_space_offset; 503 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
504 rep->cur_gmr_id = handle; 504 rep->cur_gmr_id = handle;
505 rep->cur_gmr_offset = 0; 505 rep->cur_gmr_offset = 0;
506 506
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
834 if (ret != 0) 834 if (ret != 0)
835 return -EINVAL; 835 return -EINVAL;
836 836
837 *offset = out_buf->base.addr_space_offset; 837 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
838 vmw_dmabuf_unreference(&out_buf); 838 vmw_dmabuf_unreference(&out_buf);
839 return 0; 839 return 0;
840} 840}
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faacd..8c61ceeaa12d 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
356 356
357 bo = to_tegra_bo(gem); 357 bo = to_tegra_bo(gem);
358 358
359 args->offset = tegra_bo_get_mmap_offset(bo); 359 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
360 360
361 drm_gem_object_unreference(gem); 361 drm_gem_object_unreference(gem);
362 362
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
487} 487}
488#endif 488#endif
489 489
490static struct drm_ioctl_desc tegra_drm_ioctls[] = { 490static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
491#ifdef CONFIG_DRM_TEGRA_STAGING 491#ifdef CONFIG_DRM_TEGRA_STAGING
492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), 492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), 493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
508 .unlocked_ioctl = drm_ioctl, 508 .unlocked_ioctl = drm_ioctl,
509 .mmap = tegra_drm_mmap, 509 .mmap = tegra_drm_mmap,
510 .poll = drm_poll, 510 .poll = drm_poll,
511 .fasync = drm_fasync,
512 .read = drm_read, 511 .read = drm_read,
513#ifdef CONFIG_COMPAT 512#ifdef CONFIG_COMPAT
514 .compat_ioctl = drm_compat_ioctl, 513 .compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
633 .gem_vm_ops = &tegra_bo_vm_ops, 632 .gem_vm_ops = &tegra_bo_vm_ops,
634 .dumb_create = tegra_bo_dumb_create, 633 .dumb_create = tegra_bo_dumb_create,
635 .dumb_map_offset = tegra_bo_dumb_map_offset, 634 .dumb_map_offset = tegra_bo_dumb_map_offset,
636 .dumb_destroy = tegra_bo_dumb_destroy, 635 .dumb_destroy = drm_gem_dumb_destroy,
637 636
638 .ioctls = tegra_drm_ioctls, 637 .ioctls = tegra_drm_ioctls,
639 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 638 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c2..59623de4ee15 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107} 107}
108 108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) 109struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{ 110{
116 struct tegra_bo *bo; 111 struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
182{ 177{
183 struct tegra_bo *bo = to_tegra_bo(gem); 178 struct tegra_bo *bo = to_tegra_bo(gem);
184 179
185 if (gem->map_list.map) 180 drm_gem_free_mmap_offset(gem);
186 drm_gem_free_mmap_offset(gem);
187 181
188 drm_gem_object_release(gem); 182 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo); 183 tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
228 222
229 bo = to_tegra_bo(gem); 223 bo = to_tegra_bo(gem);
230 224
231 *offset = tegra_bo_get_mmap_offset(bo); 225 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
232 226
233 drm_gem_object_unreference(gem); 227 drm_gem_object_unreference(gem);
234 228
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
262 256
263 return ret; 257 return ret;
264} 258}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb7..492533a2dacb 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
44 unsigned int size, 44 unsigned int size,
45 unsigned int *handle); 45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem); 46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 47int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args); 48 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 49int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset); 50 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54 51
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); 52int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56 53
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index 01097da09f7f..52e3c9641a0f 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -551,24 +551,8 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
551 return; 551 return;
552 } 552 }
553 553
554 memset(&frame, 0, sizeof(frame)); 554 hdmi_vendor_infoframe_init(&frame);
555 555 frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
556 frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
557 frame.version = 0x01;
558 frame.length = 6;
559
560 frame.data[0] = 0x03; /* regid0 */
561 frame.data[1] = 0x0c; /* regid1 */
562 frame.data[2] = 0x00; /* regid2 */
563 frame.data[3] = 0x02 << 5; /* video format */
564
565 /* TODO: 74 MHz limit? */
566 if (1) {
567 frame.data[4] = 0x00 << 4; /* 3D structure */
568 } else {
569 frame.data[4] = 0x08 << 4; /* 3D structure */
570 frame.data[5] = 0x00 << 4; /* 3D ext. data */
571 }
572 556
573 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 557 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
574 if (err < 0) { 558 if (err < 0) {
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf787e1d9322..ec0ae2d1686a 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
30#include <linux/pm_runtime.h>
30 31
31#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
32 33
@@ -37,6 +38,7 @@ struct vga_switcheroo_client {
37 const struct vga_switcheroo_client_ops *ops; 38 const struct vga_switcheroo_client_ops *ops;
38 int id; 39 int id;
39 bool active; 40 bool active;
41 bool driver_power_control;
40 struct list_head list; 42 struct list_head list;
41}; 43};
42 44
@@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
132 134
133static int register_client(struct pci_dev *pdev, 135static int register_client(struct pci_dev *pdev,
134 const struct vga_switcheroo_client_ops *ops, 136 const struct vga_switcheroo_client_ops *ops,
135 int id, bool active) 137 int id, bool active, bool driver_power_control)
136{ 138{
137 struct vga_switcheroo_client *client; 139 struct vga_switcheroo_client *client;
138 140
@@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev,
145 client->ops = ops; 147 client->ops = ops;
146 client->id = id; 148 client->id = id;
147 client->active = active; 149 client->active = active;
150 client->driver_power_control = driver_power_control;
148 151
149 mutex_lock(&vgasr_mutex); 152 mutex_lock(&vgasr_mutex);
150 list_add_tail(&client->list, &vgasr_priv.clients); 153 list_add_tail(&client->list, &vgasr_priv.clients);
@@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev,
160} 163}
161 164
162int vga_switcheroo_register_client(struct pci_dev *pdev, 165int vga_switcheroo_register_client(struct pci_dev *pdev,
163 const struct vga_switcheroo_client_ops *ops) 166 const struct vga_switcheroo_client_ops *ops,
167 bool driver_power_control)
164{ 168{
165 return register_client(pdev, ops, -1, 169 return register_client(pdev, ops, -1,
166 pdev == vga_default_device()); 170 pdev == vga_default_device(), driver_power_control);
167} 171}
168EXPORT_SYMBOL(vga_switcheroo_register_client); 172EXPORT_SYMBOL(vga_switcheroo_register_client);
169 173
@@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
171 const struct vga_switcheroo_client_ops *ops, 175 const struct vga_switcheroo_client_ops *ops,
172 int id, bool active) 176 int id, bool active)
173{ 177{
174 return register_client(pdev, ops, id | ID_BIT_AUDIO, active); 178 return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
175} 179}
176EXPORT_SYMBOL(vga_switcheroo_register_audio_client); 180EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
177 181
@@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
258 int i = 0; 262 int i = 0;
259 mutex_lock(&vgasr_mutex); 263 mutex_lock(&vgasr_mutex);
260 list_for_each_entry(client, &vgasr_priv.clients, list) { 264 list_for_each_entry(client, &vgasr_priv.clients, list) {
261 seq_printf(m, "%d:%s%s:%c:%s:%s\n", i, 265 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
262 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 266 client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
263 client_is_vga(client) ? "" : "-Audio", 267 client_is_vga(client) ? "" : "-Audio",
264 client->active ? '+' : ' ', 268 client->active ? '+' : ' ',
269 client->driver_power_control ? "Dyn" : "",
265 client->pwr_state ? "Pwr" : "Off", 270 client->pwr_state ? "Pwr" : "Off",
266 pci_name(client->pdev)); 271 pci_name(client->pdev));
267 i++; 272 i++;
@@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
277 282
278static int vga_switchon(struct vga_switcheroo_client *client) 283static int vga_switchon(struct vga_switcheroo_client *client)
279{ 284{
285 if (client->driver_power_control)
286 return 0;
280 if (vgasr_priv.handler->power_state) 287 if (vgasr_priv.handler->power_state)
281 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); 288 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
282 /* call the driver callback to turn on device */ 289 /* call the driver callback to turn on device */
@@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client)
287 294
288static int vga_switchoff(struct vga_switcheroo_client *client) 295static int vga_switchoff(struct vga_switcheroo_client *client)
289{ 296{
297 if (client->driver_power_control)
298 return 0;
290 /* call the driver callback to turn off device */ 299 /* call the driver callback to turn off device */
291 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); 300 client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
292 if (vgasr_priv.handler->power_state) 301 if (vgasr_priv.handler->power_state)
@@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
402 list_for_each_entry(client, &vgasr_priv.clients, list) { 411 list_for_each_entry(client, &vgasr_priv.clients, list) {
403 if (client->active || client_is_audio(client)) 412 if (client->active || client_is_audio(client))
404 continue; 413 continue;
414 if (client->driver_power_control)
415 continue;
405 set_audio_state(client->id, VGA_SWITCHEROO_OFF); 416 set_audio_state(client->id, VGA_SWITCHEROO_OFF);
406 if (client->pwr_state == VGA_SWITCHEROO_ON) 417 if (client->pwr_state == VGA_SWITCHEROO_ON)
407 vga_switchoff(client); 418 vga_switchoff(client);
@@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
413 list_for_each_entry(client, &vgasr_priv.clients, list) { 424 list_for_each_entry(client, &vgasr_priv.clients, list) {
414 if (client->active || client_is_audio(client)) 425 if (client->active || client_is_audio(client))
415 continue; 426 continue;
427 if (client->driver_power_control)
428 continue;
416 if (client->pwr_state == VGA_SWITCHEROO_OFF) 429 if (client->pwr_state == VGA_SWITCHEROO_OFF)
417 vga_switchon(client); 430 vga_switchon(client);
418 set_audio_state(client->id, VGA_SWITCHEROO_ON); 431 set_audio_state(client->id, VGA_SWITCHEROO_ON);
@@ -565,3 +578,127 @@ err:
565 return err; 578 return err;
566} 579}
567EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 580EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
581
582static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
583{
584 struct vga_switcheroo_client *client;
585
586 if (!vgasr_priv.handler->power_state)
587 return;
588
589 client = find_client_from_pci(&vgasr_priv.clients, pdev);
590 if (!client)
591 return;
592
593 if (!client->driver_power_control)
594 return;
595
596 vgasr_priv.handler->power_state(client->id, state);
597}
598
599/* force a PCI device to a certain state - mainly to turn off audio clients */
600
601void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
602{
603 struct vga_switcheroo_client *client;
604
605 client = find_client_from_pci(&vgasr_priv.clients, pdev);
606 if (!client)
607 return;
608
609 if (!client->driver_power_control)
610 return;
611
612 client->pwr_state = dynamic;
613 set_audio_state(client->id, dynamic);
614}
615EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
616
617/* switcheroo power domain */
618static int vga_switcheroo_runtime_suspend(struct device *dev)
619{
620 struct pci_dev *pdev = to_pci_dev(dev);
621 int ret;
622
623 ret = dev->bus->pm->runtime_suspend(dev);
624 if (ret)
625 return ret;
626
627 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
628 return 0;
629}
630
631static int vga_switcheroo_runtime_resume(struct device *dev)
632{
633 struct pci_dev *pdev = to_pci_dev(dev);
634 int ret;
635
636 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
637 ret = dev->bus->pm->runtime_resume(dev);
638 if (ret)
639 return ret;
640
641 return 0;
642}
643
644/* this version is for the case where the power switch is separate
645 to the device being powered down. */
646int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
647{
648 /* copy over all the bus versions */
649 if (dev->bus && dev->bus->pm) {
650 domain->ops = *dev->bus->pm;
651 domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
652 domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
653
654 dev->pm_domain = domain;
655 return 0;
656 }
657 dev->pm_domain = NULL;
658 return -EINVAL;
659}
660EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
661
662static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
663{
664 struct pci_dev *pdev = to_pci_dev(dev);
665 int ret;
666 struct vga_switcheroo_client *client, *found = NULL;
667
668 /* we need to check if we have to switch back on the video
669 device so the audio device can come back */
670 list_for_each_entry(client, &vgasr_priv.clients, list) {
671 if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
672 found = client;
673 ret = pm_runtime_get_sync(&client->pdev->dev);
674 if (ret) {
675 if (ret != 1)
676 return ret;
677 }
678 break;
679 }
680 }
681 ret = dev->bus->pm->runtime_resume(dev);
682
683 /* put the reference for the gpu */
684 if (found) {
685 pm_runtime_mark_last_busy(&found->pdev->dev);
686 pm_runtime_put_autosuspend(&found->pdev->dev);
687 }
688 return ret;
689}
690
691int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
692{
693 /* copy over all the bus versions */
694 if (dev->bus && dev->bus->pm) {
695 domain->ops = *dev->bus->pm;
696 domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
697
698 dev->pm_domain = domain;
699 return 0;
700 }
701 dev->pm_domain = NULL;
702 return -EINVAL;
703}
704EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c70ebf9..deb5c25305af 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
562 struct hv_hotadd_state *has) 562 struct hv_hotadd_state *has)
563{ 563{
564 int ret = 0; 564 int ret = 0;
565 int i, nid, t; 565 int i, nid;
566 unsigned long start_pfn; 566 unsigned long start_pfn;
567 unsigned long processed_pfn; 567 unsigned long processed_pfn;
568 unsigned long total_pfn = pfn_count; 568 unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
607 607
608 /* 608 /*
609 * Wait for the memory block to be onlined. 609 * Wait for the memory block to be onlined.
610 * Since the hot add has succeeded, it is ok to
611 * proceed even if the pages in the hot added region
612 * have not been "onlined" within the allowed time.
610 */ 613 */
611 t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); 614 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
612 if (t == 0) {
613 pr_info("hot_add memory timedout\n");
614 has->ha_end_pfn -= HA_CHUNK;
615 has->covered_end_pfn -= processed_pfn;
616 break;
617 }
618 615
619 } 616 }
620 617
@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
978 dm->num_pages_ballooned + 975 dm->num_pages_ballooned +
979 compute_balloon_floor(); 976 compute_balloon_floor();
980 977
978 /*
979 * If our transaction ID is no longer current, just don't
980 * send the status. This can happen if we were interrupted
981 * after we picked our transaction ID.
982 */
983 if (status.hdr.trans_id != atomic_read(&trans_id))
984 return;
985
981 vmbus_sendpacket(dm->dev->channel, &status, 986 vmbus_sendpacket(dm->dev->channel, &status,
982 sizeof(struct dm_status), 987 sizeof(struct dm_status),
983 (unsigned long)NULL, 988 (unsigned long)NULL,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a2464bf07c49..e8e071fc1d6d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
690 if (ret) 690 if (ret)
691 pr_err("Unable to register child device\n"); 691 pr_err("Unable to register child device\n");
692 else 692 else
693 pr_info("child device %s registered\n", 693 pr_debug("child device %s registered\n",
694 dev_name(&child_device_obj->device)); 694 dev_name(&child_device_obj->device));
695 695
696 return ret; 696 return ret;
@@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
702 */ 702 */
703void vmbus_device_unregister(struct hv_device *device_obj) 703void vmbus_device_unregister(struct hv_device *device_obj)
704{ 704{
705 pr_debug("child device %s unregistered\n",
706 dev_name(&device_obj->device));
707
705 /* 708 /*
706 * Kick off the process of unregistering the device. 709 * Kick off the process of unregistering the device.
707 * This will call vmbus_remove() and eventually vmbus_device_release() 710 * This will call vmbus_remove() and eventually vmbus_device_release()
708 */ 711 */
709 device_unregister(&device_obj->device); 712 device_unregister(&device_obj->device);
710
711 pr_info("child device %s unregistered\n",
712 dev_name(&device_obj->device));
713} 713}
714 714
715 715
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 957a719e8c2f..df7b0a06b0ea 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2290,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2290 d = r10_bio->devs[1].devnum; 2290 d = r10_bio->devs[1].devnum;
2291 wbio = r10_bio->devs[1].bio; 2291 wbio = r10_bio->devs[1].bio;
2292 wbio2 = r10_bio->devs[1].repl_bio; 2292 wbio2 = r10_bio->devs[1].repl_bio;
2293 /* Need to test wbio2->bi_end_io before we call
2294 * generic_make_request as if the former is NULL,
2295 * the latter is free to free wbio2.
2296 */
2297 if (wbio2 && !wbio2->bi_end_io)
2298 wbio2 = NULL;
2293 if (wbio->bi_end_io) { 2299 if (wbio->bi_end_io) {
2294 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2300 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2295 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2301 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2296 generic_make_request(wbio); 2302 generic_make_request(wbio);
2297 } 2303 }
2298 if (wbio2 && wbio2->bi_end_io) { 2304 if (wbio2) {
2299 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2305 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2300 md_sync_acct(conf->mirrors[d].replacement->bdev, 2306 md_sync_acct(conf->mirrors[d].replacement->bdev,
2301 bio_sectors(wbio2)); 2307 bio_sectors(wbio2));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bf094a587cb..78ea44336e75 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3462 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3463 set_bit(STRIPE_SYNCING, &sh->state); 3463 set_bit(STRIPE_SYNCING, &sh->state);
3464 clear_bit(STRIPE_INSYNC, &sh->state); 3464 clear_bit(STRIPE_INSYNC, &sh->state);
3465 clear_bit(STRIPE_REPLACED, &sh->state);
3465 } 3466 }
3466 spin_unlock(&sh->stripe_lock); 3467 spin_unlock(&sh->stripe_lock);
3467 } 3468 }
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
3607 handle_parity_checks5(conf, sh, &s, disks); 3608 handle_parity_checks5(conf, sh, &s, disks);
3608 } 3609 }
3609 3610
3610 if (s.replacing && s.locked == 0 3611 if ((s.replacing || s.syncing) && s.locked == 0
3611 && !test_bit(STRIPE_INSYNC, &sh->state)) { 3612 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
3613 && !test_bit(STRIPE_REPLACED, &sh->state)) {
3612 /* Write out to replacement devices where possible */ 3614 /* Write out to replacement devices where possible */
3613 for (i = 0; i < conf->raid_disks; i++) 3615 for (i = 0; i < conf->raid_disks; i++)
3614 if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && 3616 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3615 test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 3617 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
3616 set_bit(R5_WantReplace, &sh->dev[i].flags); 3618 set_bit(R5_WantReplace, &sh->dev[i].flags);
3617 set_bit(R5_LOCKED, &sh->dev[i].flags); 3619 set_bit(R5_LOCKED, &sh->dev[i].flags);
3618 s.locked++; 3620 s.locked++;
3619 } 3621 }
3620 set_bit(STRIPE_INSYNC, &sh->state); 3622 if (s.replacing)
3623 set_bit(STRIPE_INSYNC, &sh->state);
3624 set_bit(STRIPE_REPLACED, &sh->state);
3621 } 3625 }
3622 if ((s.syncing || s.replacing) && s.locked == 0 && 3626 if ((s.syncing || s.replacing) && s.locked == 0 &&
3627 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3623 test_bit(STRIPE_INSYNC, &sh->state)) { 3628 test_bit(STRIPE_INSYNC, &sh->state)) {
3624 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3629 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3625 clear_bit(STRIPE_SYNCING, &sh->state); 3630 clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b119a8..70c49329ca9a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
306 STRIPE_SYNC_REQUESTED, 306 STRIPE_SYNC_REQUESTED,
307 STRIPE_SYNCING, 307 STRIPE_SYNCING,
308 STRIPE_INSYNC, 308 STRIPE_INSYNC,
309 STRIPE_REPLACED,
309 STRIPE_PREREAD_ACTIVE, 310 STRIPE_PREREAD_ACTIVE,
310 STRIPE_DELAYED, 311 STRIPE_DELAYED,
311 STRIPE_DEGRADED, 312 STRIPE_DEGRADED,
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index f7b90661e321..e068a76a5f6f 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -66,14 +66,19 @@ EXPORT_SYMBOL(ssc_request);
66 66
67void ssc_free(struct ssc_device *ssc) 67void ssc_free(struct ssc_device *ssc)
68{ 68{
69 bool disable_clk = true;
70
69 spin_lock(&user_lock); 71 spin_lock(&user_lock);
70 if (ssc->user) { 72 if (ssc->user)
71 ssc->user--; 73 ssc->user--;
72 clk_disable_unprepare(ssc->clk); 74 else {
73 } else { 75 disable_clk = false;
74 dev_dbg(&ssc->pdev->dev, "device already free\n"); 76 dev_dbg(&ssc->pdev->dev, "device already free\n");
75 } 77 }
76 spin_unlock(&user_lock); 78 spin_unlock(&user_lock);
79
80 if (disable_clk)
81 clk_disable_unprepare(ssc->clk);
77} 82}
78EXPORT_SYMBOL(ssc_free); 83EXPORT_SYMBOL(ssc_free);
79 84
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index f9296abcf02a..6127ab64bb39 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev)
167 167
168 dev->hbm_state = MEI_HBM_IDLE; 168 dev->hbm_state = MEI_HBM_IDLE;
169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 169 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
170 dev_err(&dev->pdev->dev, "version message writet failed\n"); 170 dev_err(&dev->pdev->dev, "version message write failed\n");
171 dev->dev_state = MEI_DEV_RESETTING; 171 dev->dev_state = MEI_DEV_RESETTING;
172 mei_reset(dev, 1); 172 mei_reset(dev, 1);
173 return -ENODEV; 173 return -ENODEV;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e4f8dec4dc3c..b22c7e247225 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -239,14 +239,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
239 if (mei_me_hw_is_ready(dev)) 239 if (mei_me_hw_is_ready(dev))
240 return 0; 240 return 0;
241 241
242 dev->recvd_hw_ready = false;
242 mutex_unlock(&dev->device_lock); 243 mutex_unlock(&dev->device_lock);
243 err = wait_event_interruptible_timeout(dev->wait_hw_ready, 244 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
244 dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT); 245 dev->recvd_hw_ready,
246 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
245 mutex_lock(&dev->device_lock); 247 mutex_lock(&dev->device_lock);
246 if (!err && !dev->recvd_hw_ready) { 248 if (!err && !dev->recvd_hw_ready) {
249 if (!err)
250 err = -ETIMEDOUT;
247 dev_err(&dev->pdev->dev, 251 dev_err(&dev->pdev->dev,
248 "wait hw ready failed. status = 0x%x\n", err); 252 "wait hw ready failed. status = %d\n", err);
249 return -ETIMEDOUT; 253 return err;
250 } 254 }
251 255
252 dev->recvd_hw_ready = false; 256 dev->recvd_hw_ready = false;
@@ -483,7 +487,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
483 /* check if ME wants a reset */ 487 /* check if ME wants a reset */
484 if (!mei_hw_is_ready(dev) && 488 if (!mei_hw_is_ready(dev) &&
485 dev->dev_state != MEI_DEV_RESETTING && 489 dev->dev_state != MEI_DEV_RESETTING &&
486 dev->dev_state != MEI_DEV_INITIALIZING) { 490 dev->dev_state != MEI_DEV_INITIALIZING &&
491 dev->dev_state != MEI_DEV_POWER_DOWN &&
492 dev->dev_state != MEI_DEV_POWER_UP) {
487 dev_dbg(&dev->pdev->dev, "FW not ready.\n"); 493 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
488 mei_reset(dev, 1); 494 mei_reset(dev, 1);
489 mutex_unlock(&dev->device_lock); 495 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index ed1d75203af6..e6f16f83ecde 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -148,7 +148,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
148 148
149 dev->hbm_state = MEI_HBM_IDLE; 149 dev->hbm_state = MEI_HBM_IDLE;
150 150
151 if (dev->dev_state != MEI_DEV_INITIALIZING) { 151 if (dev->dev_state != MEI_DEV_INITIALIZING &&
152 dev->dev_state != MEI_DEV_POWER_UP) {
152 if (dev->dev_state != MEI_DEV_DISABLED && 153 if (dev->dev_state != MEI_DEV_DISABLED &&
153 dev->dev_state != MEI_DEV_POWER_DOWN) 154 dev->dev_state != MEI_DEV_POWER_DOWN)
154 dev->dev_state = MEI_DEV_RESETTING; 155 dev->dev_state = MEI_DEV_RESETTING;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 847b1996ce8e..2c5a91bb8ec3 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,
128 !!on ^ host->pdata->gpio_power_invert); 128 !!on ^ host->pdata->gpio_power_invert);
129 } 129 }
130 if (!host->vcc && host->pdata && host->pdata->setpower) 130 if (!host->vcc && host->pdata && host->pdata->setpower)
131 host->pdata->setpower(mmc_dev(host->mmc), vdd); 131 return host->pdata->setpower(mmc_dev(host->mmc), vdd);
132 132
133 return 0; 133 return 0;
134} 134}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a3c1c5aae6a9..1264923ade0f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 if (r && irq) { 345 if (r && irq) {
346 const char *name = NULL; 346 const char *name = NULL;
347 347
348 memset(r, 0, sizeof(*r));
348 /* 349 /*
349 * Get optional "interrupts-names" property to add a name 350 * Get optional "interrupts-names" property to add a name
350 * to the resource. 351 * to the resource.
@@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches)
482 } 483 }
483 484
484 /* Get the next pending parent that might have children */ 485 /* Get the next pending parent that might have children */
485 desc = list_first_entry(&intc_parent_list, typeof(*desc), list); 486 desc = list_first_entry_or_null(&intc_parent_list,
486 if (list_empty(&intc_parent_list) || !desc) { 487 typeof(*desc), list);
488 if (!desc) {
487 pr_err("of_irq_init: children remain, but no parents\n"); 489 pr_err("of_irq_init: children remain, but no parents\n");
488 break; 490 break;
489 } 491 }
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index b29e20b7862f..bb7af78e4eed 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
388 /* Remove the EADS bridge device itself */ 388 /* Remove the EADS bridge device itself */
389 BUG_ON(!bus->self); 389 BUG_ON(!bus->self);
390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); 390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
391 eeh_remove_bus_device(bus->self, true);
392 pci_stop_and_remove_bus_device(bus->self); 391 pci_stop_and_remove_bus_device(bus->self);
393 392
394 return 0; 393 return 0;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5b272bfd261d..2a00239661b3 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1193,6 +1193,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
1193 list_for_each_entry(maps_node, &pinctrl_maps, node) { 1193 list_for_each_entry(maps_node, &pinctrl_maps, node) {
1194 if (maps_node->maps == map) { 1194 if (maps_node->maps == map) {
1195 list_del(&maps_node->node); 1195 list_del(&maps_node->node);
1196 kfree(maps_node);
1196 mutex_unlock(&pinctrl_maps_mutex); 1197 mutex_unlock(&pinctrl_maps_mutex);
1197 return; 1198 return;
1198 } 1199 }
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 6866548fab31..7323cca440b5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1483,6 +1483,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
1483 return ret; 1483 return ret;
1484} 1484}
1485 1485
1486#ifdef CONFIG_PM
1486static int pinctrl_single_suspend(struct platform_device *pdev, 1487static int pinctrl_single_suspend(struct platform_device *pdev,
1487 pm_message_t state) 1488 pm_message_t state)
1488{ 1489{
@@ -1505,6 +1506,7 @@ static int pinctrl_single_resume(struct platform_device *pdev)
1505 1506
1506 return pinctrl_force_default(pcs->pctl); 1507 return pinctrl_force_default(pcs->pctl);
1507} 1508}
1509#endif
1508 1510
1509static int pcs_probe(struct platform_device *pdev) 1511static int pcs_probe(struct platform_device *pdev)
1510{ 1512{
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7956df58d751..31f7d0e04aaa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3785,6 +3785,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = {
3785 3785
3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = { 3786static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = {
3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), 3787 REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
3788 REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"),
3788}; 3789};
3789 3790
3790static const struct regulator_init_data sh73a0_vccq_mc0_init_data = { 3791static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 1fa39a444171..867c9681763c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
496static const struct sirfsoc_muxmask usp0_muxmask[] = { 496static const struct sirfsoc_muxmask usp0_muxmask[] = {
497 { 497 {
498 .group = 1, 498 .group = 1,
499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22), 499 .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
500 }, 500 },
501}; 501};
502 502
@@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = {
507 .funcval = 0, 507 .funcval = 0,
508}; 508};
509 509
510static const unsigned usp0_pins[] = { 51, 52, 53, 54 }; 510static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
511 511
512static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
513 {
514 .group = 1,
515 .mask = BIT(20) | BIT(21),
516 },
517};
518
519static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
520 .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
521 .muxmask = usp0_uart_nostreamctrl_muxmask,
522};
523
524static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
512static const struct sirfsoc_muxmask usp1_muxmask[] = { 525static const struct sirfsoc_muxmask usp1_muxmask[] = {
513 { 526 {
514 .group = 0, 527 .group = 0,
@@ -822,6 +835,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
822 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), 835 SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
823 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), 836 SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
824 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), 837 SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
838 SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
839 usp0_uart_nostreamctrl_pins),
825 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), 840 SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
826 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), 841 SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
827 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), 842 SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
@@ -862,6 +877,8 @@ static const char * const uart0grp[] = { "uart0grp" };
862static const char * const uart1grp[] = { "uart1grp" }; 877static const char * const uart1grp[] = { "uart1grp" };
863static const char * const uart2grp[] = { "uart2grp" }; 878static const char * const uart2grp[] = { "uart2grp" };
864static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; 879static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
880static const char * const usp0_uart_nostreamctrl_grp[] = {
881 "usp0_uart_nostreamctrl_grp" };
865static const char * const usp0grp[] = { "usp0grp" }; 882static const char * const usp0grp[] = { "usp0grp" };
866static const char * const usp1grp[] = { "usp1grp" }; 883static const char * const usp1grp[] = { "usp1grp" };
867static const char * const i2c0grp[] = { "i2c0grp" }; 884static const char * const i2c0grp[] = { "i2c0grp" };
@@ -904,6 +921,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
904 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), 921 SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
905 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), 922 SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
906 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), 923 SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
924 SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
925 usp0_uart_nostreamctrl_grp,
926 usp0_uart_nostreamctrl_padmux),
907 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), 927 SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
908 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), 928 SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
909 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), 929 SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7b082157eb79..99d2930b18c8 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
185 cmd_iu->_r_c = 0; 185 cmd_iu->_r_c = 0;
186 186
187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
188 task->ssp_task.cmd->cmd_len / sizeof(u32)); 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
189} 189}
190 190
191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 191static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020ac089c..0d30ca849e8f 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
491 struct isci_tmf tmf; 491 struct isci_tmf tmf;
492 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
493 unsigned long flags; 493 unsigned long flags;
494 int target_done_already = 0;
494 495
495 /* Get the isci_request reference from the task. Note that 496 /* Get the isci_request reference from the task. Note that
496 * this check does not depend on the pending request list 497 * this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
505 /* If task is already done, the request isn't valid */ 506 /* If task is already done, the request isn't valid */
506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 507 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 508 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
508 old_request) 509 old_request) {
509 idev = isci_get_device(task->dev->lldd_dev); 510 idev = isci_get_device(task->dev->lldd_dev);
510 511 target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
512 &old_request->flags);
513 }
511 spin_unlock(&task->task_state_lock); 514 spin_unlock(&task->task_state_lock);
512 spin_unlock_irqrestore(&ihost->scic_lock, flags); 515 spin_unlock_irqrestore(&ihost->scic_lock, flags);
513 516
@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
561 564
562 if (task->task_proto == SAS_PROTOCOL_SMP || 565 if (task->task_proto == SAS_PROTOCOL_SMP ||
563 sas_protocol_ata(task->task_proto) || 566 sas_protocol_ata(task->task_proto) ||
564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || 567 target_done_already ||
565 test_bit(IDEV_GONE, &idev->flags)) { 568 test_bit(IDEV_GONE, &idev->flags)) {
566 569
567 spin_unlock_irqrestore(&ihost->scic_lock, flags); 570 spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f14665a6293d..6b1b4e91e53f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1857 goto out; 1857 goto out;
1858 } 1858 }
1859 1859
1860 /* error info record present */ 1860 /*
1861 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1861 * error info record present; slot->response is 32 bit aligned but may
1862 * not be 64 bit aligned, so check for zero in two 32 bit reads
1863 */
1864 if (unlikely((rx_desc & RXQ_ERR)
1865 && (*((u32 *)slot->response)
1866 || *(((u32 *)slot->response) + 1)))) {
1862 mv_dprintk("port %d slot %d rx_desc %X has error info" 1867 mv_dprintk("port %d slot %d rx_desc %X has error info"
1863 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1868 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1864 rx_desc, (u64)(*(u64 *)slot->response)); 1869 rx_desc, get_unaligned_le64(slot->response));
1865 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1870 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1866 tstat->resp = SAS_TASK_COMPLETE; 1871 tstat->resp = SAS_TASK_COMPLETE;
1867 goto out; 1872 goto out;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 60e2fb7f2dca..d6b19dc80bee 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
39#include <linux/irq.h> 39#include <linux/irq.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <asm/unaligned.h>
42#include <scsi/libsas.h> 43#include <scsi/libsas.h>
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ef481db942..ef0a5481b9dd 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
419 __constant_cpu_to_le16(CF_SIMPLE_TAG); 419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
420 break; 420 break;
421 } 421 }
422 } else {
423 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 } 424 }
423 425
424 /* Load SCSI command packet. */ 426 /* Load SCSI command packet. */
@@ -1307,11 +1309,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1307 fcp_cmnd->task_attribute = TSK_ORDERED; 1309 fcp_cmnd->task_attribute = TSK_ORDERED;
1308 break; 1310 break;
1309 default: 1311 default:
1310 fcp_cmnd->task_attribute = 0; 1312 fcp_cmnd->task_attribute = TSK_SIMPLE;
1311 break; 1313 break;
1312 } 1314 }
1313 } else { 1315 } else {
1314 fcp_cmnd->task_attribute = 0; 1316 fcp_cmnd->task_attribute = TSK_SIMPLE;
1315 } 1317 }
1316 1318
1317 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1319 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1525,7 +1527,12 @@ qla24xx_start_scsi(srb_t *sp)
1525 case ORDERED_QUEUE_TAG: 1527 case ORDERED_QUEUE_TAG:
1526 cmd_pkt->task = TSK_ORDERED; 1528 cmd_pkt->task = TSK_ORDERED;
1527 break; 1529 break;
1530 default:
1531 cmd_pkt->task = TSK_SIMPLE;
1532 break;
1528 } 1533 }
1534 } else {
1535 cmd_pkt->task = TSK_SIMPLE;
1529 } 1536 }
1530 1537
1531 /* Load SCSI command packet. */ 1538 /* Load SCSI command packet. */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 80f39b8b0223..86fcf2c313ad 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -838,10 +838,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
838 838
839static void sd_unprep_fn(struct request_queue *q, struct request *rq) 839static void sd_unprep_fn(struct request_queue *q, struct request *rq)
840{ 840{
841 struct scsi_cmnd *SCpnt = rq->special;
842
841 if (rq->cmd_flags & REQ_DISCARD) { 843 if (rq->cmd_flags & REQ_DISCARD) {
842 free_page((unsigned long)rq->buffer); 844 free_page((unsigned long)rq->buffer);
843 rq->buffer = NULL; 845 rq->buffer = NULL;
844 } 846 }
847 if (SCpnt->cmnd != rq->cmd) {
848 mempool_free(SCpnt->cmnd, sd_cdb_pool);
849 SCpnt->cmnd = NULL;
850 SCpnt->cmd_len = 0;
851 }
845} 852}
846 853
847/** 854/**
@@ -1720,21 +1727,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1720 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1727 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1721 sd_dif_complete(SCpnt, good_bytes); 1728 sd_dif_complete(SCpnt, good_bytes);
1722 1729
1723 if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1724 == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1725
1726 /* We have to print a failed command here as the
1727 * extended CDB gets freed before scsi_io_completion()
1728 * is called.
1729 */
1730 if (result)
1731 scsi_print_command(SCpnt);
1732
1733 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1734 SCpnt->cmnd = NULL;
1735 SCpnt->cmd_len = 0;
1736 }
1737
1738 return good_bytes; 1730 return good_bytes;
1739} 1731}
1740 1732
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 080abf2faf97..a8c344422a77 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
469 unsigned long nr_segs, loff_t ppos) 469 unsigned long nr_segs, loff_t ppos)
470{ 470{
471 struct logger_log *log = file_get_log(iocb->ki_filp); 471 struct logger_log *log = file_get_log(iocb->ki_filp);
472 size_t orig = log->w_off; 472 size_t orig;
473 struct logger_entry header; 473 struct logger_entry header;
474 struct timespec now; 474 struct timespec now;
475 ssize_t ret = 0; 475 ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
490 490
491 mutex_lock(&log->mutex); 491 mutex_lock(&log->mutex);
492 492
493 orig = log->w_off;
494
493 /* 495 /*
494 * Fix up any readers, pulling them forward to the first readable 496 * Fix up any readers, pulling them forward to the first readable
495 * entry after (what will be) the new write offset. We do this now 497 * entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739b7e3e..fa8da9aada30 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
10copy: 10copy:
11 Ian Abbott <abbotti@mev.co.uk> 11 Ian Abbott <abbotti@mev.co.uk>
12 Frank Mori Hess <fmhess@users.sourceforge.net> 12 H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8647518259f6..f4a197b2d1fd 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1413 DPRINTK("subdevice busy\n"); 1413 DPRINTK("subdevice busy\n");
1414 return -EBUSY; 1414 return -EBUSY;
1415 } 1415 }
1416 s->busy = file;
1417 1416
1418 /* make sure channel/gain list isn't too long */ 1417 /* make sure channel/gain list isn't too long */
1419 if (cmd.chanlist_len > s->len_chanlist) { 1418 if (cmd.chanlist_len > s->len_chanlist) {
1420 DPRINTK("channel/gain list too long %u > %d\n", 1419 DPRINTK("channel/gain list too long %u > %d\n",
1421 cmd.chanlist_len, s->len_chanlist); 1420 cmd.chanlist_len, s->len_chanlist);
1422 ret = -EINVAL; 1421 return -EINVAL;
1423 goto cleanup;
1424 } 1422 }
1425 1423
1426 /* make sure channel/gain list isn't too short */ 1424 /* make sure channel/gain list isn't too short */
1427 if (cmd.chanlist_len < 1) { 1425 if (cmd.chanlist_len < 1) {
1428 DPRINTK("channel/gain list too short %u < 1\n", 1426 DPRINTK("channel/gain list too short %u < 1\n",
1429 cmd.chanlist_len); 1427 cmd.chanlist_len);
1430 ret = -EINVAL; 1428 return -EINVAL;
1431 goto cleanup;
1432 } 1429 }
1433 1430
1434 async->cmd = cmd; 1431 async->cmd = cmd;
@@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1438 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL); 1435 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
1439 if (!async->cmd.chanlist) { 1436 if (!async->cmd.chanlist) {
1440 DPRINTK("allocation failed\n"); 1437 DPRINTK("allocation failed\n");
1441 ret = -ENOMEM; 1438 return -ENOMEM;
1442 goto cleanup;
1443 } 1439 }
1444 1440
1445 if (copy_from_user(async->cmd.chanlist, user_chanlist, 1441 if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
1491 1487
1492 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING); 1488 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
1493 1489
1490 /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
1491 * comedi_read() or comedi_write() */
1492 s->busy = file;
1494 ret = s->do_cmd(dev, s); 1493 ret = s->do_cmd(dev, s);
1495 if (ret == 0) 1494 if (ret == 0)
1496 return 0; 1495 return 0;
@@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1705 void *file) 1704 void *file)
1706{ 1705{
1707 struct comedi_subdevice *s; 1706 struct comedi_subdevice *s;
1707 int ret;
1708 1708
1709 if (arg >= dev->n_subdevices) 1709 if (arg >= dev->n_subdevices)
1710 return -EINVAL; 1710 return -EINVAL;
@@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1721 if (s->busy != file) 1721 if (s->busy != file)
1722 return -EBUSY; 1722 return -EBUSY;
1723 1723
1724 return do_cancel(dev, s); 1724 ret = do_cancel(dev, s);
1725 if (comedi_get_subdevice_runflags(s) & SRF_USER)
1726 wake_up_interruptible(&s->async->wait_head);
1727
1728 return ret;
1725} 1729}
1726 1730
1727/* 1731/*
@@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2053 2057
2054 if (!comedi_is_subdevice_running(s)) { 2058 if (!comedi_is_subdevice_running(s)) {
2055 if (count == 0) { 2059 if (count == 0) {
2060 mutex_lock(&dev->mutex);
2056 if (comedi_is_subdevice_in_error(s)) 2061 if (comedi_is_subdevice_in_error(s))
2057 retval = -EPIPE; 2062 retval = -EPIPE;
2058 else 2063 else
2059 retval = 0; 2064 retval = 0;
2060 do_become_nonbusy(dev, s); 2065 do_become_nonbusy(dev, s);
2066 mutex_unlock(&dev->mutex);
2061 } 2067 }
2062 break; 2068 break;
2063 } 2069 }
@@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2156 2162
2157 if (n == 0) { 2163 if (n == 0) {
2158 if (!comedi_is_subdevice_running(s)) { 2164 if (!comedi_is_subdevice_running(s)) {
2165 mutex_lock(&dev->mutex);
2159 do_become_nonbusy(dev, s); 2166 do_become_nonbusy(dev, s);
2160 if (comedi_is_subdevice_in_error(s)) 2167 if (comedi_is_subdevice_in_error(s))
2161 retval = -EPIPE; 2168 retval = -EPIPE;
2162 else 2169 else
2163 retval = 0; 2170 retval = 0;
2171 mutex_unlock(&dev->mutex);
2164 break; 2172 break;
2165 } 2173 }
2166 if (file->f_flags & O_NONBLOCK) { 2174 if (file->f_flags & O_NONBLOCK) {
@@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2198 buf += n; 2206 buf += n;
2199 break; /* makes device work like a pipe */ 2207 break; /* makes device work like a pipe */
2200 } 2208 }
2201 if (comedi_is_subdevice_idle(s) && 2209 if (comedi_is_subdevice_idle(s)) {
2202 async->buf_read_count - async->buf_write_count == 0) { 2210 mutex_lock(&dev->mutex);
2203 do_become_nonbusy(dev, s); 2211 if (async->buf_read_count - async->buf_write_count == 0)
2212 do_become_nonbusy(dev, s);
2213 mutex_unlock(&dev->mutex);
2204 } 2214 }
2205 set_current_state(TASK_RUNNING); 2215 set_current_state(TASK_RUNNING);
2206 remove_wait_queue(&async->wait_head, &wait); 2216 remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 5590ebf1da15..817f837b240d 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -827,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
827 mutex_unlock(&dev->mtx); 827 mutex_unlock(&dev->mtx);
828 usb_alphatrack_delete(dev); 828 usb_alphatrack_delete(dev);
829 } else { 829 } else {
830 atomic_set(&dev->writes_pending, 0);
830 dev->intf = NULL; 831 dev->intf = NULL;
831 mutex_unlock(&dev->mtx); 832 mutex_unlock(&dev->mtx);
832 } 833 }
833 834
834 atomic_set(&dev->writes_pending, 0);
835 mutex_unlock(&disconnect_mutex); 835 mutex_unlock(&disconnect_mutex);
836 836
837 dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n", 837 dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n",
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index b795353e8348..cc3692439a5c 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head)
250 250
251 list_for_each_entry_safe(entry, n, head, list) { 251 list_for_each_entry_safe(entry, n, head, list) {
252 list_del(&entry->list); 252 list_del(&entry->list);
253 free_qos_entry(entry);
254 gdm_wimax_send_tx(entry->skb, entry->dev); 253 gdm_wimax_send_tx(entry->skb, entry->dev);
254 free_qos_entry(entry);
255 } 255 }
256} 256}
257 257
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 22339059837f..bd0f2fd01db4 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -33,7 +33,6 @@ config DRM_IMX_TVE
33config DRM_IMX_LDB 33config DRM_IMX_LDB
34 tristate "Support for LVDS displays" 34 tristate "Support for LVDS displays"
35 depends on DRM_IMX 35 depends on DRM_IMX
36 select OF_VIDEOMODE
37 help 36 help
38 Choose this to enable the internal LVDS Display Bridge (LDB) 37 Choose this to enable the internal LVDS Display Bridge (LDB)
39 found on i.MX53 and i.MX6 processors. 38 found on i.MX53 and i.MX6 processors.
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 9854a1daf606..e826086ec308 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -69,28 +69,20 @@ struct imx_drm_connector {
69 struct module *owner; 69 struct module *owner;
70}; 70};
71 71
72static int imx_drm_driver_firstopen(struct drm_device *drm)
73{
74 if (!imx_drm_device_get())
75 return -EINVAL;
76
77 return 0;
78}
79
80static void imx_drm_driver_lastclose(struct drm_device *drm) 72static void imx_drm_driver_lastclose(struct drm_device *drm)
81{ 73{
82 struct imx_drm_device *imxdrm = drm->dev_private; 74 struct imx_drm_device *imxdrm = drm->dev_private;
83 75
84 if (imxdrm->fbhelper) 76 if (imxdrm->fbhelper)
85 drm_fbdev_cma_restore_mode(imxdrm->fbhelper); 77 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
86
87 imx_drm_device_put();
88} 78}
89 79
90static int imx_drm_driver_unload(struct drm_device *drm) 80static int imx_drm_driver_unload(struct drm_device *drm)
91{ 81{
92 struct imx_drm_device *imxdrm = drm->dev_private; 82 struct imx_drm_device *imxdrm = drm->dev_private;
93 83
84 imx_drm_device_put();
85
94 drm_mode_config_cleanup(imxdrm->drm); 86 drm_mode_config_cleanup(imxdrm->drm);
95 drm_kms_helper_poll_fini(imxdrm->drm); 87 drm_kms_helper_poll_fini(imxdrm->drm);
96 88
@@ -207,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
207 .unlocked_ioctl = drm_ioctl, 199 .unlocked_ioctl = drm_ioctl,
208 .mmap = drm_gem_cma_mmap, 200 .mmap = drm_gem_cma_mmap,
209 .poll = drm_poll, 201 .poll = drm_poll,
210 .fasync = drm_fasync,
211 .read = drm_read, 202 .read = drm_read,
212 .llseek = noop_llseek, 203 .llseek = noop_llseek,
213}; 204};
@@ -226,8 +217,6 @@ struct drm_device *imx_drm_device_get(void)
226 struct imx_drm_connector *con; 217 struct imx_drm_connector *con;
227 struct imx_drm_crtc *crtc; 218 struct imx_drm_crtc *crtc;
228 219
229 mutex_lock(&imxdrm->mutex);
230
231 list_for_each_entry(enc, &imxdrm->encoder_list, list) { 220 list_for_each_entry(enc, &imxdrm->encoder_list, list) {
232 if (!try_module_get(enc->owner)) { 221 if (!try_module_get(enc->owner)) {
233 dev_err(imxdrm->dev, "could not get module %s\n", 222 dev_err(imxdrm->dev, "could not get module %s\n",
@@ -254,8 +243,6 @@ struct drm_device *imx_drm_device_get(void)
254 243
255 imxdrm->references++; 244 imxdrm->references++;
256 245
257 mutex_unlock(&imxdrm->mutex);
258
259 return imxdrm->drm; 246 return imxdrm->drm;
260 247
261unwind_crtc: 248unwind_crtc:
@@ -447,6 +434,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
447 */ 434 */
448 imxdrm->drm->vblank_disable_allowed = 1; 435 imxdrm->drm->vblank_disable_allowed = 1;
449 436
437 if (!imx_drm_device_get())
438 ret = -EINVAL;
439
450 ret = 0; 440 ret = 0;
451 441
452err_init: 442err_init:
@@ -783,7 +773,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
783} 773}
784EXPORT_SYMBOL_GPL(imx_drm_remove_connector); 774EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
785 775
786static struct drm_ioctl_desc imx_drm_ioctls[] = { 776static const struct drm_ioctl_desc imx_drm_ioctls[] = {
787 /* none so far */ 777 /* none so far */
788}; 778};
789 779
@@ -791,13 +781,12 @@ static struct drm_driver imx_drm_driver = {
791 .driver_features = DRIVER_MODESET | DRIVER_GEM, 781 .driver_features = DRIVER_MODESET | DRIVER_GEM,
792 .load = imx_drm_driver_load, 782 .load = imx_drm_driver_load,
793 .unload = imx_drm_driver_unload, 783 .unload = imx_drm_driver_unload,
794 .firstopen = imx_drm_driver_firstopen,
795 .lastclose = imx_drm_driver_lastclose, 784 .lastclose = imx_drm_driver_lastclose,
796 .gem_free_object = drm_gem_cma_free_object, 785 .gem_free_object = drm_gem_cma_free_object,
797 .gem_vm_ops = &drm_gem_cma_vm_ops, 786 .gem_vm_ops = &drm_gem_cma_vm_ops,
798 .dumb_create = drm_gem_cma_dumb_create, 787 .dumb_create = drm_gem_cma_dumb_create,
799 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 788 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
800 .dumb_destroy = drm_gem_cma_dumb_destroy, 789 .dumb_destroy = drm_gem_dumb_destroy,
801 790
802 .get_vblank_counter = drm_vblank_count, 791 .get_vblank_counter = drm_vblank_count,
803 .enable_vblank = imx_drm_enable_vblank, 792 .enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 9176a8171e6f..e39690a03e38 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
129 129
130static int ipu_page_flip(struct drm_crtc *crtc, 130static int ipu_page_flip(struct drm_crtc *crtc,
131 struct drm_framebuffer *fb, 131 struct drm_framebuffer *fb,
132 struct drm_pending_vblank_event *event) 132 struct drm_pending_vblank_event *event,
133 uint32_t page_flip_flags)
133{ 134{
134 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 135 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
135 int ret; 136 int ret;
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index c191ae203565..41e88abe47af 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
1120 or DYN_EXTERNAL, then mem granularity information is present 1120 or DYN_EXTERNAL, then mem granularity information is present
1121 within the section name - only process if there are at least three 1121 within the section name - only process if there are at least three
1122 tokens within the section name (just a minor optimization) */ 1122 tokens within the section name (just a minor optimization) */
1123 if (count >= 3) 1123 if (count >= 3) {
1124 strict_strtol(sz_last_token, 10, (long *)&req); 1124 status = kstrtos32(sz_last_token, 10, &req);
1125 if (status)
1126 goto func_cont;
1127 }
1125 1128
1126 if ((req == 0) || (req == 1)) { 1129 if ((req == 0) || (req == 1)) {
1127 if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { 1130 if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 82c7202fd5cc..e77fb6ea40c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -527,8 +527,11 @@ static void zram_reset_device(struct zram *zram)
527 size_t index; 527 size_t index;
528 struct zram_meta *meta; 528 struct zram_meta *meta;
529 529
530 if (!zram->init_done) 530 down_write(&zram->init_lock);
531 if (!zram->init_done) {
532 up_write(&zram->init_lock);
531 return; 533 return;
534 }
532 535
533 meta = zram->meta; 536 meta = zram->meta;
534 zram->init_done = 0; 537 zram->init_done = 0;
@@ -549,6 +552,7 @@ static void zram_reset_device(struct zram *zram)
549 552
550 zram->disksize = 0; 553 zram->disksize = 0;
551 set_capacity(zram->disk, 0); 554 set_capacity(zram->disk, 0);
555 up_write(&zram->init_lock);
552} 556}
553 557
554static void zram_init_device(struct zram *zram, struct zram_meta *meta) 558static void zram_init_device(struct zram *zram, struct zram_meta *meta)
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 721904f8efa9..946ddd2b3a54 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -193,7 +193,8 @@ static int __init parse_options(struct early_serial8250_device *device,
193 if (options) { 193 if (options) {
194 options++; 194 options++;
195 device->baud = simple_strtoul(options, NULL, 0); 195 device->baud = simple_strtoul(options, NULL, 0);
196 length = min(strcspn(options, " "), sizeof(device->options)); 196 length = min(strcspn(options, " ") + 1,
197 sizeof(device->options));
197 strlcpy(device->options, options, length); 198 strlcpy(device->options, options, length);
198 } else { 199 } else {
199 device->baud = probe_baud(port); 200 device->baud = probe_baud(port);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5e3d68917ffe..1456673bcca0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -277,7 +277,7 @@ config SERIAL_TEGRA
277 select SERIAL_CORE 277 select SERIAL_CORE
278 help 278 help
279 Support for the on-chip UARTs on the NVIDIA Tegra series SOCs 279 Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
280 providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not 280 providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not
281 provide all of these ports, depending on how the serial port 281 provide all of these ports, depending on how the serial port
282 are enabled). This driver uses the APB DMA to achieve higher baudrate 282 are enabled). This driver uses the APB DMA to achieve higher baudrate
283 and better performance. 283 and better performance.
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index ff171384ea52..dc6e96996ead 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3478,7 +3478,7 @@ static int alloc_buf_list(SLMP_INFO *info)
3478 for ( i = 0; i < info->rx_buf_count; i++ ) { 3478 for ( i = 0; i < info->rx_buf_count; i++ ) {
3479 /* calculate and store physical address of this buffer entry */ 3479 /* calculate and store physical address of this buffer entry */
3480 info->rx_buf_list_ex[i].phys_entry = 3480 info->rx_buf_list_ex[i].phys_entry =
3481 info->buffer_list_phys + (i * sizeof(SCABUFSIZE)); 3481 info->buffer_list_phys + (i * SCABUFSIZE);
3482 3482
3483 /* calculate and store physical address of */ 3483 /* calculate and store physical address of */
3484 /* next entry in cirular list of entries */ 3484 /* next entry in cirular list of entries */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4191db32f12c..4a8a1d68002c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -668,6 +668,15 @@ resubmit:
668static inline int 668static inline int
669hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) 669hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
670{ 670{
671 /* Need to clear both directions for control ep */
672 if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
673 USB_ENDPOINT_XFER_CONTROL) {
674 int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
675 HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
676 devinfo ^ 0x8000, tt, NULL, 0, 1000);
677 if (status)
678 return status;
679 }
671 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), 680 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
672 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, 681 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
673 tt, NULL, 0, 1000); 682 tt, NULL, 0, 1000);
@@ -2848,6 +2857,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
2848 USB_CTRL_SET_TIMEOUT); 2857 USB_CTRL_SET_TIMEOUT);
2849} 2858}
2850 2859
2860/* Count of wakeup-enabled devices at or below udev */
2861static unsigned wakeup_enabled_descendants(struct usb_device *udev)
2862{
2863 struct usb_hub *hub = usb_hub_to_struct_hub(udev);
2864
2865 return udev->do_remote_wakeup +
2866 (hub ? hub->wakeup_enabled_descendants : 0);
2867}
2868
2851/* 2869/*
2852 * usb_port_suspend - suspend a usb device's upstream port 2870 * usb_port_suspend - suspend a usb device's upstream port
2853 * @udev: device that's no longer in active use, not a root hub 2871 * @udev: device that's no longer in active use, not a root hub
@@ -2888,8 +2906,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
2888 * Linux (2.6) currently has NO mechanisms to initiate that: no khubd 2906 * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
2889 * timer, no SRP, no requests through sysfs. 2907 * timer, no SRP, no requests through sysfs.
2890 * 2908 *
2891 * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get 2909 * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
2892 * suspended only when their bus goes into global suspend (i.e., the root 2910 * suspended until their bus goes into global suspend (i.e., the root
2893 * hub is suspended). Nevertheless, we change @udev->state to 2911 * hub is suspended). Nevertheless, we change @udev->state to
2894 * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual 2912 * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
2895 * upstream port setting is stored in @udev->port_is_suspended. 2913 * upstream port setting is stored in @udev->port_is_suspended.
@@ -2960,15 +2978,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2960 /* see 7.1.7.6 */ 2978 /* see 7.1.7.6 */
2961 if (hub_is_superspeed(hub->hdev)) 2979 if (hub_is_superspeed(hub->hdev))
2962 status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); 2980 status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
2963 else if (PMSG_IS_AUTO(msg)) 2981
2964 status = set_port_feature(hub->hdev, port1,
2965 USB_PORT_FEAT_SUSPEND);
2966 /* 2982 /*
2967 * For system suspend, we do not need to enable the suspend feature 2983 * For system suspend, we do not need to enable the suspend feature
2968 * on individual USB-2 ports. The devices will automatically go 2984 * on individual USB-2 ports. The devices will automatically go
2969 * into suspend a few ms after the root hub stops sending packets. 2985 * into suspend a few ms after the root hub stops sending packets.
2970 * The USB 2.0 spec calls this "global suspend". 2986 * The USB 2.0 spec calls this "global suspend".
2987 *
2988 * However, many USB hubs have a bug: They don't relay wakeup requests
2989 * from a downstream port if the port's suspend feature isn't on.
2990 * Therefore we will turn on the suspend feature if udev or any of its
2991 * descendants is enabled for remote wakeup.
2971 */ 2992 */
2993 else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
2994 status = set_port_feature(hub->hdev, port1,
2995 USB_PORT_FEAT_SUSPEND);
2972 else { 2996 else {
2973 really_suspend = false; 2997 really_suspend = false;
2974 status = 0; 2998 status = 0;
@@ -3003,15 +3027,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3003 if (!PMSG_IS_AUTO(msg)) 3027 if (!PMSG_IS_AUTO(msg))
3004 status = 0; 3028 status = 0;
3005 } else { 3029 } else {
3006 /* device has up to 10 msec to fully suspend */
3007 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", 3030 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
3008 (PMSG_IS_AUTO(msg) ? "auto-" : ""), 3031 (PMSG_IS_AUTO(msg) ? "auto-" : ""),
3009 udev->do_remote_wakeup); 3032 udev->do_remote_wakeup);
3010 usb_set_device_state(udev, USB_STATE_SUSPENDED);
3011 if (really_suspend) { 3033 if (really_suspend) {
3012 udev->port_is_suspended = 1; 3034 udev->port_is_suspended = 1;
3035
3036 /* device has up to 10 msec to fully suspend */
3013 msleep(10); 3037 msleep(10);
3014 } 3038 }
3039 usb_set_device_state(udev, USB_STATE_SUSPENDED);
3015 } 3040 }
3016 3041
3017 /* 3042 /*
@@ -3293,7 +3318,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3293 unsigned port1; 3318 unsigned port1;
3294 int status; 3319 int status;
3295 3320
3296 /* Warn if children aren't already suspended */ 3321 /*
3322 * Warn if children aren't already suspended.
3323 * Also, add up the number of wakeup-enabled descendants.
3324 */
3325 hub->wakeup_enabled_descendants = 0;
3297 for (port1 = 1; port1 <= hdev->maxchild; port1++) { 3326 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
3298 struct usb_device *udev; 3327 struct usb_device *udev;
3299 3328
@@ -3303,6 +3332,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3303 if (PMSG_IS_AUTO(msg)) 3332 if (PMSG_IS_AUTO(msg))
3304 return -EBUSY; 3333 return -EBUSY;
3305 } 3334 }
3335 if (udev)
3336 hub->wakeup_enabled_descendants +=
3337 wakeup_enabled_descendants(udev);
3306 } 3338 }
3307 3339
3308 if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { 3340 if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 6508e02b3dac..4e4790dea343 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@ struct usb_hub {
59 struct usb_tt tt; /* Transaction Translator */ 59 struct usb_tt tt; /* Transaction Translator */
60 60
61 unsigned mA_per_port; /* current for each child */ 61 unsigned mA_per_port; /* current for each child */
62#ifdef CONFIG_PM
63 unsigned wakeup_enabled_descendants;
64#endif
62 65
63 unsigned limited_power:1; 66 unsigned limited_power:1;
64 unsigned quiescing:1; 67 unsigned quiescing:1;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18027d0..2378958ea63e 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,6 @@
1config USB_DWC3 1config USB_DWC3
2 tristate "DesignWare USB3 DRD Core Support" 2 tristate "DesignWare USB3 DRD Core Support"
3 depends on (USB || USB_GADGET) && GENERIC_HARDIRQS 3 depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA
4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD 4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
5 help 5 help
6 Say Y or M here if your system has a Dual Role SuperSpeed 6 Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d39b76..358375e0b291 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev)
450 } 450 }
451 451
452 if (IS_ERR(dwc->usb3_phy)) { 452 if (IS_ERR(dwc->usb3_phy)) {
453 ret = PTR_ERR(dwc->usb2_phy); 453 ret = PTR_ERR(dwc->usb3_phy);
454 454
455 /* 455 /*
456 * if -ENXIO is returned, it means PHY layer wasn't 456 * if -ENXIO is returned, it means PHY layer wasn't
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322e3cab..27dad993b007 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -759,8 +759,8 @@ struct dwc3 {
759 759
760struct dwc3_event_type { 760struct dwc3_event_type {
761 u32 is_devspec:1; 761 u32 is_devspec:1;
762 u32 type:6; 762 u32 type:7;
763 u32 reserved8_31:25; 763 u32 reserved8_31:24;
764} __packed; 764} __packed;
765 765
766#define DWC3_DEPEVT_XFERCOMPLETE 0x01 766#define DWC3_DEPEVT_XFERCOMPLETE 0x01
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35df49c..f77083fedc68 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1584,6 +1584,7 @@ err1:
1584 __dwc3_gadget_ep_disable(dwc->eps[0]); 1584 __dwc3_gadget_ep_disable(dwc->eps[0]);
1585 1585
1586err0: 1586err0:
1587 dwc->gadget_driver = NULL;
1587 spin_unlock_irqrestore(&dwc->lock, flags); 1588 spin_unlock_irqrestore(&dwc->lock, flags);
1588 1589
1589 return ret; 1590 return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 62f6802f6e0f..8e9368330b10 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -193,6 +193,7 @@ config USB_FUSB300
193 Faraday usb device controller FUSB300 driver 193 Faraday usb device controller FUSB300 driver
194 194
195config USB_FOTG210_UDC 195config USB_FOTG210_UDC
196 depends on HAS_DMA
196 tristate "Faraday FOTG210 USB Peripheral Controller" 197 tristate "Faraday FOTG210 USB Peripheral Controller"
197 help 198 help
198 Faraday USB2.0 OTG controller which can be configured as 199 Faraday USB2.0 OTG controller which can be configured as
@@ -328,13 +329,14 @@ config USB_S3C_HSUDC
328 329
329config USB_MV_UDC 330config USB_MV_UDC
330 tristate "Marvell USB2.0 Device Controller" 331 tristate "Marvell USB2.0 Device Controller"
331 depends on GENERIC_HARDIRQS 332 depends on GENERIC_HARDIRQS && HAS_DMA
332 help 333 help
333 Marvell Socs (including PXA and MMP series) include a high speed 334 Marvell Socs (including PXA and MMP series) include a high speed
334 USB2.0 OTG controller, which can be configured as high speed or 335 USB2.0 OTG controller, which can be configured as high speed or
335 full speed USB peripheral. 336 full speed USB peripheral.
336 337
337config USB_MV_U3D 338config USB_MV_U3D
339 depends on HAS_DMA
338 tristate "MARVELL PXA2128 USB 3.0 controller" 340 tristate "MARVELL PXA2128 USB 3.0 controller"
339 help 341 help
340 MARVELL PXA2128 Processor series include a super speed USB3.0 device 342 MARVELL PXA2128 Processor series include a super speed USB3.0 device
@@ -639,6 +641,7 @@ config USB_CONFIGFS_RNDIS
639 depends on USB_CONFIGFS 641 depends on USB_CONFIGFS
640 depends on NET 642 depends on NET
641 select USB_U_ETHER 643 select USB_U_ETHER
644 select USB_U_RNDIS
642 select USB_F_RNDIS 645 select USB_F_RNDIS
643 help 646 help
644 Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol, 647 Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 073b938f9135..d9a6add0c852 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -870,8 +870,8 @@ static void clk_on(struct at91_udc *udc)
870 if (udc->clocked) 870 if (udc->clocked)
871 return; 871 return;
872 udc->clocked = 1; 872 udc->clocked = 1;
873 clk_enable(udc->iclk); 873 clk_prepare_enable(udc->iclk);
874 clk_enable(udc->fclk); 874 clk_prepare_enable(udc->fclk);
875} 875}
876 876
877static void clk_off(struct at91_udc *udc) 877static void clk_off(struct at91_udc *udc)
@@ -880,8 +880,8 @@ static void clk_off(struct at91_udc *udc)
880 return; 880 return;
881 udc->clocked = 0; 881 udc->clocked = 0;
882 udc->gadget.speed = USB_SPEED_UNKNOWN; 882 udc->gadget.speed = USB_SPEED_UNKNOWN;
883 clk_disable(udc->fclk); 883 clk_disable_unprepare(udc->fclk);
884 clk_disable(udc->iclk); 884 clk_disable_unprepare(udc->iclk);
885} 885}
886 886
887/* 887/*
@@ -1725,7 +1725,7 @@ static int at91udc_probe(struct platform_device *pdev)
1725 /* init software state */ 1725 /* init software state */
1726 udc = &controller; 1726 udc = &controller;
1727 udc->gadget.dev.parent = dev; 1727 udc->gadget.dev.parent = dev;
1728 if (pdev->dev.of_node) 1728 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
1729 at91udc_of_init(udc, pdev->dev.of_node); 1729 at91udc_of_init(udc, pdev->dev.of_node);
1730 else 1730 else
1731 memcpy(&udc->board, dev->platform_data, 1731 memcpy(&udc->board, dev->platform_data,
@@ -1782,12 +1782,14 @@ static int at91udc_probe(struct platform_device *pdev)
1782 } 1782 }
1783 1783
1784 /* don't do anything until we have both gadget driver and VBUS */ 1784 /* don't do anything until we have both gadget driver and VBUS */
1785 clk_enable(udc->iclk); 1785 retval = clk_prepare_enable(udc->iclk);
1786 if (retval)
1787 goto fail1;
1786 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); 1788 at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
1787 at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); 1789 at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
1788 /* Clear all pending interrupts - UDP may be used by bootloader. */ 1790 /* Clear all pending interrupts - UDP may be used by bootloader. */
1789 at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); 1791 at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
1790 clk_disable(udc->iclk); 1792 clk_disable_unprepare(udc->iclk);
1791 1793
1792 /* request UDC and maybe VBUS irqs */ 1794 /* request UDC and maybe VBUS irqs */
1793 udc->udp_irq = platform_get_irq(pdev, 0); 1795 udc->udp_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 5d3561ea1c15..edab45da3741 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -959,8 +959,11 @@ static struct usb_function_instance *ecm_alloc_inst(void)
959 mutex_init(&opts->lock); 959 mutex_init(&opts->lock);
960 opts->func_inst.free_func_inst = ecm_free_inst; 960 opts->func_inst.free_func_inst = ecm_free_inst;
961 opts->net = gether_setup_default(); 961 opts->net = gether_setup_default();
962 if (IS_ERR(opts->net)) 962 if (IS_ERR(opts->net)) {
963 return ERR_PTR(PTR_ERR(opts->net)); 963 struct net_device *net = opts->net;
964 kfree(opts);
965 return ERR_CAST(net);
966 }
964 967
965 config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type); 968 config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
966 969
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 90ee8022e8d8..d00392d879db 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -593,8 +593,11 @@ static struct usb_function_instance *eem_alloc_inst(void)
593 mutex_init(&opts->lock); 593 mutex_init(&opts->lock);
594 opts->func_inst.free_func_inst = eem_free_inst; 594 opts->func_inst.free_func_inst = eem_free_inst;
595 opts->net = gether_setup_default(); 595 opts->net = gether_setup_default();
596 if (IS_ERR(opts->net)) 596 if (IS_ERR(opts->net)) {
597 return ERR_CAST(opts->net); 597 struct net_device *net = opts->net;
598 kfree(opts);
599 return ERR_CAST(net);
600 }
598 601
599 config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type); 602 config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
600 603
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 952177f7eb9b..1c28fe13328a 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -1350,8 +1350,11 @@ static struct usb_function_instance *ncm_alloc_inst(void)
1350 mutex_init(&opts->lock); 1350 mutex_init(&opts->lock);
1351 opts->func_inst.free_func_inst = ncm_free_inst; 1351 opts->func_inst.free_func_inst = ncm_free_inst;
1352 opts->net = gether_setup_default(); 1352 opts->net = gether_setup_default();
1353 if (IS_ERR(opts->net)) 1353 if (IS_ERR(opts->net)) {
1354 return ERR_PTR(PTR_ERR(opts->net)); 1354 struct net_device *net = opts->net;
1355 kfree(opts);
1356 return ERR_CAST(net);
1357 }
1355 1358
1356 config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); 1359 config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
1357 1360
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 7944fb0efe3b..1bf26e9f38cd 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -656,8 +656,11 @@ static struct usb_function_instance *phonet_alloc_inst(void)
656 656
657 opts->func_inst.free_func_inst = phonet_free_inst; 657 opts->func_inst.free_func_inst = phonet_free_inst;
658 opts->net = gphonet_setup_default(); 658 opts->net = gphonet_setup_default();
659 if (IS_ERR(opts->net)) 659 if (IS_ERR(opts->net)) {
660 return ERR_PTR(PTR_ERR(opts->net)); 660 struct net_device *net = opts->net;
661 kfree(opts);
662 return ERR_CAST(net);
663 }
661 664
662 config_group_init_type_name(&opts->func_inst.group, "", 665 config_group_init_type_name(&opts->func_inst.group, "",
663 &phonet_func_type); 666 &phonet_func_type);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 191df35ae69d..717ed7f95639 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -963,8 +963,11 @@ static struct usb_function_instance *rndis_alloc_inst(void)
963 mutex_init(&opts->lock); 963 mutex_init(&opts->lock);
964 opts->func_inst.free_func_inst = rndis_free_inst; 964 opts->func_inst.free_func_inst = rndis_free_inst;
965 opts->net = gether_setup_default(); 965 opts->net = gether_setup_default();
966 if (IS_ERR(opts->net)) 966 if (IS_ERR(opts->net)) {
967 return ERR_CAST(opts->net); 967 struct net_device *net = opts->net;
968 kfree(opts);
969 return ERR_CAST(net);
970 }
968 971
969 config_group_init_type_name(&opts->func_inst.group, "", 972 config_group_init_type_name(&opts->func_inst.group, "",
970 &rndis_func_type); 973 &rndis_func_type);
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 5601e1d96c4f..7c8674fa7e80 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -505,8 +505,11 @@ static struct usb_function_instance *geth_alloc_inst(void)
505 mutex_init(&opts->lock); 505 mutex_init(&opts->lock);
506 opts->func_inst.free_func_inst = geth_free_inst; 506 opts->func_inst.free_func_inst = geth_free_inst;
507 opts->net = gether_setup_default(); 507 opts->net = gether_setup_default();
508 if (IS_ERR(opts->net)) 508 if (IS_ERR(opts->net)) {
509 return ERR_CAST(opts->net); 509 struct net_device *net = opts->net;
510 kfree(opts);
511 return ERR_CAST(net);
512 }
510 513
511 config_group_init_type_name(&opts->func_inst.group, "", 514 config_group_init_type_name(&opts->func_inst.group, "",
512 &gether_func_type); 515 &gether_func_type);
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index cce5535b1dc6..10cd18ddd0d4 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1074,7 +1074,7 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
1074 .udc_stop = fotg210_udc_stop, 1074 .udc_stop = fotg210_udc_stop,
1075}; 1075};
1076 1076
1077static int __exit fotg210_udc_remove(struct platform_device *pdev) 1077static int fotg210_udc_remove(struct platform_device *pdev)
1078{ 1078{
1079 struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev); 1079 struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev);
1080 1080
@@ -1088,7 +1088,7 @@ static int __exit fotg210_udc_remove(struct platform_device *pdev)
1088 return 0; 1088 return 0;
1089} 1089}
1090 1090
1091static int __init fotg210_udc_probe(struct platform_device *pdev) 1091static int fotg210_udc_probe(struct platform_device *pdev)
1092{ 1092{
1093 struct resource *res, *ires; 1093 struct resource *res, *ires;
1094 struct fotg210_udc *fotg210 = NULL; 1094 struct fotg210_udc *fotg210 = NULL;
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 07fdb3eaf48a..ec6a2d290398 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -1776,7 +1776,7 @@ static int mv_u3d_remove(struct platform_device *dev)
1776 kfree(u3d->eps); 1776 kfree(u3d->eps);
1777 1777
1778 if (u3d->irq) 1778 if (u3d->irq)
1779 free_irq(u3d->irq, &dev->dev); 1779 free_irq(u3d->irq, u3d);
1780 1780
1781 if (u3d->cap_regs) 1781 if (u3d->cap_regs)
1782 iounmap(u3d->cap_regs); 1782 iounmap(u3d->cap_regs);
@@ -1974,7 +1974,7 @@ static int mv_u3d_probe(struct platform_device *dev)
1974 return 0; 1974 return 0;
1975 1975
1976err_unregister: 1976err_unregister:
1977 free_irq(u3d->irq, &dev->dev); 1977 free_irq(u3d->irq, u3d);
1978err_request_irq: 1978err_request_irq:
1979err_get_irq: 1979err_get_irq:
1980 kfree(u3d->status_req); 1980 kfree(u3d->status_req);
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index ffd8fa541101..c28ac9872030 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -50,6 +50,8 @@ static DEFINE_MUTEX(udc_lock);
50 50
51/* ------------------------------------------------------------------------- */ 51/* ------------------------------------------------------------------------- */
52 52
53#ifdef CONFIG_HAS_DMA
54
53int usb_gadget_map_request(struct usb_gadget *gadget, 55int usb_gadget_map_request(struct usb_gadget *gadget,
54 struct usb_request *req, int is_in) 56 struct usb_request *req, int is_in)
55{ 57{
@@ -99,6 +101,8 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
99} 101}
100EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); 102EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
101 103
104#endif /* CONFIG_HAS_DMA */
105
102/* ------------------------------------------------------------------------- */ 106/* ------------------------------------------------------------------------- */
103 107
104void usb_gadget_set_state(struct usb_gadget *gadget, 108void usb_gadget_set_state(struct usb_gadget *gadget,
@@ -194,9 +198,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
194 dev_set_name(&gadget->dev, "gadget"); 198 dev_set_name(&gadget->dev, "gadget");
195 gadget->dev.parent = parent; 199 gadget->dev.parent = parent;
196 200
201#ifdef CONFIG_HAS_DMA
197 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); 202 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
198 gadget->dev.dma_parms = parent->dma_parms; 203 gadget->dev.dma_parms = parent->dma_parms;
199 gadget->dev.dma_mask = parent->dma_mask; 204 gadget->dev.dma_mask = parent->dma_mask;
205#endif
200 206
201 if (release) 207 if (release)
202 gadget->dev.release = release; 208 gadget->dev.release = release;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2b702772d04d..6dce37555c4f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -874,6 +874,7 @@ static int ehci_hub_control (
874 ehci->reset_done[wIndex] = jiffies 874 ehci->reset_done[wIndex] = jiffies
875 + msecs_to_jiffies(20); 875 + msecs_to_jiffies(20);
876 usb_hcd_start_port_resume(&hcd->self, wIndex); 876 usb_hcd_start_port_resume(&hcd->self, wIndex);
877 set_bit(wIndex, &ehci->resuming_ports);
877 /* check the port again */ 878 /* check the port again */
878 mod_timer(&ehci_to_hcd(ehci)->rh_timer, 879 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
879 ehci->reset_done[wIndex]); 880 ehci->reset_done[wIndex]);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 4b8a2092432f..978c849f9c9a 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -13,6 +13,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
13void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 13void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
14void sb800_prefetch(struct device *dev, int on); 14void sb800_prefetch(struct device *dev, int on);
15#else 15#else
16struct pci_dev;
16static inline void usb_amd_quirk_pll_disable(void) {} 17static inline void usb_amd_quirk_pll_disable(void) {}
17static inline void usb_amd_quirk_pll_enable(void) {} 18static inline void usb_amd_quirk_pll_enable(void) {}
18static inline void usb_amd_dev_put(void) {} 19static inline void usb_amd_dev_put(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39b97d5..f00cb203faea 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
93 } 93 }
94 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 94 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
95 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { 95 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
96 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
97 xhci->quirks |= XHCI_EP_LIMIT_QUIRK; 96 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
98 xhci->limit_active_eps = 64; 97 xhci->limit_active_eps = 64;
99 xhci->quirks |= XHCI_SW_BW_CHECKING; 98 xhci->quirks |= XHCI_SW_BW_CHECKING;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e57eafa6910..5b08cd85f8e7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
434 434
435 /* A ring has pending URBs if its TD list is not empty */ 435 /* A ring has pending URBs if its TD list is not empty */
436 if (!(ep->ep_state & EP_HAS_STREAMS)) { 436 if (!(ep->ep_state & EP_HAS_STREAMS)) {
437 if (!(list_empty(&ep->ring->td_list))) 437 if (ep->ring && !(list_empty(&ep->ring->td_list)))
438 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 438 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
439 return; 439 return;
440 } 440 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2c49f00260ca..41eb4fc33453 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -329,7 +329,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
329 return; 329 return;
330} 330}
331 331
332static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 332static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
333{ 333{
334 int i; 334 int i;
335 335
@@ -1181,9 +1181,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1181 } 1181 }
1182 1182
1183 xhci = hcd_to_xhci(hcd); 1183 xhci = hcd_to_xhci(hcd);
1184 if (xhci->xhc_state & XHCI_STATE_HALTED)
1185 return -ENODEV;
1186
1187 if (check_virt_dev) { 1184 if (check_virt_dev) {
1188 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1185 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1189 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1186 printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1199,6 +1196,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1199 } 1196 }
1200 } 1197 }
1201 1198
1199 if (xhci->xhc_state & XHCI_STATE_HALTED)
1200 return -ENODEV;
1201
1202 return 1; 1202 return 1;
1203} 1203}
1204 1204
@@ -3898,7 +3898,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3898 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 3898 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3899 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 3899 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3900 */ 3900 */
3901static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, 3901static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3902 struct usb_device *udev, u16 max_exit_latency) 3902 struct usb_device *udev, u16 max_exit_latency)
3903{ 3903{
3904 struct xhci_virt_device *virt_dev; 3904 struct xhci_virt_device *virt_dev;
@@ -4892,6 +4892,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4892 4892
4893 get_quirks(dev, xhci); 4893 get_quirks(dev, xhci);
4894 4894
4895 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4896 * success event after a short transfer. This quirk will ignore such
4897 * spurious event.
4898 */
4899 if (xhci->hci_version > 0x96)
4900 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4901
4895 /* Make sure the HC is halted. */ 4902 /* Make sure the HC is halted. */
4896 retval = xhci_halt(xhci); 4903 retval = xhci_halt(xhci);
4897 if (retval) 4904 if (retval)
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386ec5d35..de98906f786d 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
3247 { USB_DEVICE(0x0711, 0x0903) }, 3247 { USB_DEVICE(0x0711, 0x0903) },
3248 { USB_DEVICE(0x0711, 0x0918) }, 3248 { USB_DEVICE(0x0711, 0x0918) },
3249 { USB_DEVICE(0x0711, 0x0920) }, 3249 { USB_DEVICE(0x0711, 0x0920) },
3250 { USB_DEVICE(0x0711, 0x0950) },
3250 { USB_DEVICE(0x182d, 0x021c) }, 3251 { USB_DEVICE(0x182d, 0x021c) },
3251 { USB_DEVICE(0x182d, 0x0269) }, 3252 { USB_DEVICE(0x182d, 0x0269) },
3252 { } 3253 { }
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index efe6e1464f45..a2fb30bbb971 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -71,9 +71,9 @@ static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
71 {1250, 5, 4, 20, 0}, /* 12 MHz */ 71 {1250, 5, 4, 20, 0}, /* 12 MHz */
72 {3125, 20, 4, 20, 0}, /* 16.8 MHz */ 72 {3125, 20, 4, 20, 0}, /* 16.8 MHz */
73 {1172, 8, 4, 20, 65537}, /* 19.2 MHz */ 73 {1172, 8, 4, 20, 65537}, /* 19.2 MHz */
74 {1000, 7, 4, 10, 0}, /* 20 MHz */
74 {1250, 12, 4, 20, 0}, /* 26 MHz */ 75 {1250, 12, 4, 20, 0}, /* 26 MHz */
75 {3125, 47, 4, 20, 92843}, /* 38.4 MHz */ 76 {3125, 47, 4, 20, 92843}, /* 38.4 MHz */
76 {1000, 7, 4, 10, 0}, /* 20 MHz */
77 77
78}; 78};
79 79
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index 1011c16ade7e..758b86d0fcb3 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -388,7 +388,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
388 clk = devm_clk_get(dev, "otg"); 388 clk = devm_clk_get(dev, "otg");
389 389
390 if (IS_ERR(clk)) { 390 if (IS_ERR(clk)) {
391 dev_err(dev, "Failed to get otg clock\n"); 391 dev_err(dev, "Failed to get usbhost/otg clock\n");
392 return PTR_ERR(clk); 392 return PTR_ERR(clk);
393 } 393 }
394 394
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ed4949faa70d..805940c37353 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -855,10 +855,6 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget,
855 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 855 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
856 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 856 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
857 857
858 if (!driver ||
859 !driver->unbind)
860 return -EINVAL;
861
862 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); 858 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
863 gpriv->driver = NULL; 859 gpriv->driver = NULL;
864 860
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index d6ef2f8da37d..0eae4ba3760e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
53 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 53 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
54 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 54 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
55 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 55 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
56 { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
56 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 57 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
57 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 58 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
58 { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ 59 { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
@@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
118 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 119 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
119 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 120 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
120 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 121 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
122 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
123 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
121 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 124 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
122 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 125 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
123 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 126 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
148 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ 151 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
149 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 152 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
150 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 153 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
154 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
151 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 155 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
152 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 156 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
153 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ 157 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 0a818b238508..603fb70dde80 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -905,20 +905,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
905 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); 905 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
906 if (status < 0) { 906 if (status < 0) {
907 dev_dbg(&port->dev, "Reading Spreg failed\n"); 907 dev_dbg(&port->dev, "Reading Spreg failed\n");
908 return -1; 908 goto err;
909 } 909 }
910 Data |= 0x80; 910 Data |= 0x80;
911 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 911 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
912 if (status < 0) { 912 if (status < 0) {
913 dev_dbg(&port->dev, "writing Spreg failed\n"); 913 dev_dbg(&port->dev, "writing Spreg failed\n");
914 return -1; 914 goto err;
915 } 915 }
916 916
917 Data &= ~0x80; 917 Data &= ~0x80;
918 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 918 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
919 if (status < 0) { 919 if (status < 0) {
920 dev_dbg(&port->dev, "writing Spreg failed\n"); 920 dev_dbg(&port->dev, "writing Spreg failed\n");
921 return -1; 921 goto err;
922 } 922 }
923 /* End of block to be checked */ 923 /* End of block to be checked */
924 924
@@ -927,7 +927,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
927 &Data); 927 &Data);
928 if (status < 0) { 928 if (status < 0) {
929 dev_dbg(&port->dev, "Reading Controlreg failed\n"); 929 dev_dbg(&port->dev, "Reading Controlreg failed\n");
930 return -1; 930 goto err;
931 } 931 }
932 Data |= 0x08; /* Driver done bit */ 932 Data |= 0x08; /* Driver done bit */
933 Data |= 0x20; /* rx_disable */ 933 Data |= 0x20; /* rx_disable */
@@ -935,7 +935,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
935 mos7840_port->ControlRegOffset, Data); 935 mos7840_port->ControlRegOffset, Data);
936 if (status < 0) { 936 if (status < 0) {
937 dev_dbg(&port->dev, "writing Controlreg failed\n"); 937 dev_dbg(&port->dev, "writing Controlreg failed\n");
938 return -1; 938 goto err;
939 } 939 }
940 /* do register settings here */ 940 /* do register settings here */
941 /* Set all regs to the device default values. */ 941 /* Set all regs to the device default values. */
@@ -946,21 +946,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
946 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); 946 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
947 if (status < 0) { 947 if (status < 0) {
948 dev_dbg(&port->dev, "disabling interrupts failed\n"); 948 dev_dbg(&port->dev, "disabling interrupts failed\n");
949 return -1; 949 goto err;
950 } 950 }
951 /* Set FIFO_CONTROL_REGISTER to the default value */ 951 /* Set FIFO_CONTROL_REGISTER to the default value */
952 Data = 0x00; 952 Data = 0x00;
953 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 953 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
954 if (status < 0) { 954 if (status < 0) {
955 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); 955 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
956 return -1; 956 goto err;
957 } 957 }
958 958
959 Data = 0xcf; 959 Data = 0xcf;
960 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 960 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
961 if (status < 0) { 961 if (status < 0) {
962 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); 962 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
963 return -1; 963 goto err;
964 } 964 }
965 965
966 Data = 0x03; 966 Data = 0x03;
@@ -1103,6 +1103,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
1103 /* mos7840_change_port_settings(mos7840_port,old_termios); */ 1103 /* mos7840_change_port_settings(mos7840_port,old_termios); */
1104 1104
1105 return 0; 1105 return 0;
1106err:
1107 for (j = 0; j < NUM_URBS; ++j) {
1108 urb = mos7840_port->write_urb_pool[j];
1109 if (!urb)
1110 continue;
1111 kfree(urb->transfer_buffer);
1112 usb_free_urb(urb);
1113 }
1114 return status;
1106} 1115}
1107 1116
1108/***************************************************************************** 1117/*****************************************************************************
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5dd857de05b0..1cf6f125f5f0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
341#define OLIVETTI_VENDOR_ID 0x0b3c 341#define OLIVETTI_VENDOR_ID 0x0b3c
342#define OLIVETTI_PRODUCT_OLICARD100 0xc000 342#define OLIVETTI_PRODUCT_OLICARD100 0xc000
343#define OLIVETTI_PRODUCT_OLICARD145 0xc003 343#define OLIVETTI_PRODUCT_OLICARD145 0xc003
344#define OLIVETTI_PRODUCT_OLICARD200 0xc005
344 345
345/* Celot products */ 346/* Celot products */
346#define CELOT_VENDOR_ID 0x211f 347#define CELOT_VENDOR_ID 0x211f
347#define CELOT_PRODUCT_CT680M 0x6801 348#define CELOT_PRODUCT_CT680M 0x6801
348 349
349/* ONDA Communication vendor id */
350#define ONDA_VENDOR_ID 0x1ee8
351
352/* ONDA MT825UP HSDPA 14.2 modem */
353#define ONDA_MT825UP 0x000b
354
355/* Samsung products */ 350/* Samsung products */
356#define SAMSUNG_VENDOR_ID 0x04e8 351#define SAMSUNG_VENDOR_ID 0x04e8
357#define SAMSUNG_PRODUCT_GT_B3730 0x6889 352#define SAMSUNG_PRODUCT_GT_B3730 0x6889
@@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
444 439
445/* Hyundai Petatel Inc. products */ 440/* Hyundai Petatel Inc. products */
446#define PETATEL_VENDOR_ID 0x1ff4 441#define PETATEL_VENDOR_ID 0x1ff4
447#define PETATEL_PRODUCT_NP10T 0x600e 442#define PETATEL_PRODUCT_NP10T_600A 0x600a
443#define PETATEL_PRODUCT_NP10T_600E 0x600e
448 444
449/* TP-LINK Incorporated products */ 445/* TP-LINK Incorporated products */
450#define TPLINK_VENDOR_ID 0x2357 446#define TPLINK_VENDOR_ID 0x2357
@@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
782 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 778 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
783 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 779 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
784 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 780 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
781 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
785 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 782 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
786 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 783 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
787 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, 784 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
@@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
817 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), 814 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
818 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 815 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, 816 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, 817 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
818 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
821 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, 819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
822 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), 820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
823 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 821 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
1256 1254
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1255 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1258 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1256 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
1259 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1258 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1260 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
1261 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1259 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1262 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1260 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1263 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, 1261 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
@@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
1329 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, 1327 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
1330 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, 1328 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
1331 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 1329 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1332 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1330 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
1331 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
1333 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1332 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
1334 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1333 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1334 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
1335 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1335 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, 1336 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
1336 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ 1337 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
1337 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ 1338 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
1339 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, 1340 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1340 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, 1341 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1341 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1342 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1343 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1344 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1342 { } /* Terminating entry */ 1345 { } /* Terminating entry */
1343}; 1346};
1344MODULE_DEVICE_TABLE(usb, option_ids); 1347MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 7182bb774b79..375b5a400b6f 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial)
371 usb_set_serial_data(serial, tdev); 371 usb_set_serial_data(serial, tdev);
372 372
373 /* determine device type */ 373 /* determine device type */
374 if (usb_match_id(serial->interface, ti_id_table_3410)) 374 if (serial->type == &ti_1port_device)
375 tdev->td_is_3410 = 1; 375 tdev->td_is_3410 = 1;
376 dev_dbg(&dev->dev, "%s - device type is %s\n", __func__, 376 dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
377 tdev->td_is_3410 ? "3410" : "5052"); 377 tdev->td_is_3410 ? "3410" : "5052");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1799335288bd..c015f2c16729 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
665 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 665 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
666 US_FL_FIX_INQUIRY ), 666 US_FL_FIX_INQUIRY ),
667 667
668/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
669UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
670 "Sony Corp.",
671 "MicroVault Flash Drive",
672 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
673 US_FL_NO_READ_CAPACITY_16 ),
674
668/* floppy reports multiple luns */ 675/* floppy reports multiple luns */
669UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, 676UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
670 "SAMSUNG", 677 "SAMSUNG",
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 5ca11b066b7e..886e797f75f9 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,33 +101,37 @@ static const struct backlight_ops max8925_backlight_ops = {
101 .get_brightness = max8925_backlight_get_brightness, 101 .get_brightness = max8925_backlight_get_brightness,
102}; 102};
103 103
104#ifdef CONFIG_OF 104static void max8925_backlight_dt_init(struct platform_device *pdev)
105static int max8925_backlight_dt_init(struct platform_device *pdev,
106 struct max8925_backlight_pdata *pdata)
107{ 105{
108 struct device_node *nproot = pdev->dev.parent->of_node, *np; 106 struct device_node *nproot = pdev->dev.parent->of_node, *np;
109 int dual_string; 107 struct max8925_backlight_pdata *pdata;
108 u32 val;
109
110 if (!nproot || !IS_ENABLED(CONFIG_OF))
111 return;
112
113 pdata = devm_kzalloc(&pdev->dev,
114 sizeof(struct max8925_backlight_pdata),
115 GFP_KERNEL);
116 if (!pdata)
117 return;
110 118
111 if (!nproot)
112 return -ENODEV;
113 np = of_find_node_by_name(nproot, "backlight"); 119 np = of_find_node_by_name(nproot, "backlight");
114 if (!np) { 120 if (!np) {
115 dev_err(&pdev->dev, "failed to find backlight node\n"); 121 dev_err(&pdev->dev, "failed to find backlight node\n");
116 return -ENODEV; 122 return;
117 } 123 }
118 124
119 of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string); 125 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
120 pdata->dual_string = dual_string; 126 pdata->dual_string = val;
121 return 0; 127
128 pdev->dev.platform_data = pdata;
122} 129}
123#else
124#define max8925_backlight_dt_init(x, y) (-1)
125#endif
126 130
127static int max8925_backlight_probe(struct platform_device *pdev) 131static int max8925_backlight_probe(struct platform_device *pdev)
128{ 132{
129 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); 133 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
130 struct max8925_backlight_pdata *pdata = pdev->dev.platform_data; 134 struct max8925_backlight_pdata *pdata;
131 struct max8925_backlight_data *data; 135 struct max8925_backlight_data *data;
132 struct backlight_device *bl; 136 struct backlight_device *bl;
133 struct backlight_properties props; 137 struct backlight_properties props;
@@ -170,13 +174,10 @@ static int max8925_backlight_probe(struct platform_device *pdev)
170 platform_set_drvdata(pdev, bl); 174 platform_set_drvdata(pdev, bl);
171 175
172 value = 0; 176 value = 0;
173 if (pdev->dev.parent->of_node && !pdata) { 177 if (!pdev->dev.platform_data)
174 pdata = devm_kzalloc(&pdev->dev, 178 max8925_backlight_dt_init(pdev);
175 sizeof(struct max8925_backlight_pdata),
176 GFP_KERNEL);
177 max8925_backlight_dt_init(pdev, pdata);
178 }
179 179
180 pdata = pdev->dev.platform_data;
180 if (pdata) { 181 if (pdata) {
181 if (pdata->lxw_scl) 182 if (pdata->lxw_scl)
182 value |= (1 << 7); 183 value |= (1 << 7);
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 635d5690dd5a..9e758a8f890d 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -96,13 +96,18 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
96 96
97 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); 97 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
98 98
99 if (frame->active_info_valid) 99 /*
100 * Data byte 1, bit 4 has to be set if we provide the active format
101 * aspect ratio
102 */
103 if (frame->active_aspect & 0xf)
100 ptr[0] |= BIT(4); 104 ptr[0] |= BIT(4);
101 105
102 if (frame->horizontal_bar_valid) 106 /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
107 if (frame->top_bar || frame->bottom_bar)
103 ptr[0] |= BIT(3); 108 ptr[0] |= BIT(3);
104 109
105 if (frame->vertical_bar_valid) 110 if (frame->left_bar || frame->right_bar)
106 ptr[0] |= BIT(2); 111 ptr[0] |= BIT(2);
107 112
108 ptr[1] = ((frame->colorimetry & 0x3) << 6) | 113 ptr[1] = ((frame->colorimetry & 0x3) << 6) |
@@ -283,9 +288,33 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
283EXPORT_SYMBOL(hdmi_audio_infoframe_pack); 288EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
284 289
285/** 290/**
286 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary 291 * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
287 * buffer
288 * @frame: HDMI vendor infoframe 292 * @frame: HDMI vendor infoframe
293 *
294 * Returns 0 on success or a negative error code on failure.
295 */
296int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
297{
298 memset(frame, 0, sizeof(*frame));
299
300 frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
301 frame->version = 1;
302
303 frame->oui = HDMI_IEEE_OUI;
304
305 /*
306 * 0 is a valid value for s3d_struct, so we use a special "not set"
307 * value
308 */
309 frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
310
311 return 0;
312}
313EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
314
315/**
316 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
317 * @frame: HDMI infoframe
289 * @buffer: destination buffer 318 * @buffer: destination buffer
290 * @size: size of buffer 319 * @size: size of buffer
291 * 320 *
@@ -298,11 +327,25 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
298 * error code on failure. 327 * error code on failure.
299 */ 328 */
300ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, 329ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
301 void *buffer, size_t size) 330 void *buffer, size_t size)
302{ 331{
303 u8 *ptr = buffer; 332 u8 *ptr = buffer;
304 size_t length; 333 size_t length;
305 334
335 /* empty info frame */
336 if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
337 return -EINVAL;
338
339 /* only one of those can be supplied */
340 if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
341 return -EINVAL;
342
343 /* for side by side (half) we also need to provide 3D_Ext_Data */
344 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
345 frame->length = 6;
346 else
347 frame->length = 5;
348
306 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; 349 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
307 350
308 if (size < length) 351 if (size < length)
@@ -315,7 +358,20 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
315 ptr[2] = frame->length; 358 ptr[2] = frame->length;
316 ptr[3] = 0; /* checksum */ 359 ptr[3] = 0; /* checksum */
317 360
318 memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length); 361 /* HDMI OUI */
362 ptr[4] = 0x03;
363 ptr[5] = 0x0c;
364 ptr[6] = 0x00;
365
366 if (frame->vic) {
367 ptr[7] = 0x1 << 5; /* video format */
368 ptr[8] = frame->vic;
369 } else {
370 ptr[7] = 0x2 << 5; /* video format */
371 ptr[8] = (frame->s3d_struct & 0xf) << 4;
372 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
373 ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
374 }
319 375
320 hdmi_infoframe_checksum(buffer, length); 376 hdmi_infoframe_checksum(buffer, length);
321 377
@@ -323,6 +379,20 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
323} 379}
324EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); 380EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
325 381
382/*
383 * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
384 */
385static ssize_t
386hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
387 void *buffer, size_t size)
388{
389 /* we only know about HDMI vendor infoframes */
390 if (frame->any.oui != HDMI_IEEE_OUI)
391 return -EINVAL;
392
393 return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
394}
395
326/** 396/**
327 * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer 397 * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
328 * @frame: HDMI infoframe 398 * @frame: HDMI infoframe
@@ -353,8 +423,8 @@ hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
353 length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size); 423 length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
354 break; 424 break;
355 case HDMI_INFOFRAME_TYPE_VENDOR: 425 case HDMI_INFOFRAME_TYPE_VENDOR:
356 length = hdmi_vendor_infoframe_pack(&frame->vendor, 426 length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
357 buffer, size); 427 buffer, size);
358 break; 428 break;
359 default: 429 default:
360 WARN(1, "Bad infoframe type %d\n", frame->any.type); 430 WARN(1, "Bad infoframe type %d\n", frame->any.type);